* Makefile.in (C_COMMON_OBJS): Depend on c-cilkplus.o.
[official-gcc.git] / gcc / c-family / c-omp.c
blob921b40628c364fb153936357f4a35b50e74e8518
1 /* This file contains routines to construct GNU OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2013 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tree.h"
28 #include "c-common.h"
29 #include "c-pragma.h"
30 #include "gimple-expr.h"
31 #include "langhooks.h"
34 /* Complete a #pragma omp master construct. STMT is the structured-block
35 that follows the pragma. LOC is the l*/
37 tree
38 c_finish_omp_master (location_t loc, tree stmt)
40 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
41 SET_EXPR_LOCATION (t, loc);
42 return t;
45 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
46 that follows the pragma. LOC is the l*/
48 tree
49 c_finish_omp_taskgroup (location_t loc, tree stmt)
51 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
52 SET_EXPR_LOCATION (t, loc);
53 return t;
56 /* Complete a #pragma omp critical construct. STMT is the structured-block
57 that follows the pragma, NAME is the identifier in the pragma, or null
58 if it was omitted. LOC is the location of the #pragma. */
60 tree
61 c_finish_omp_critical (location_t loc, tree body, tree name)
63 tree stmt = make_node (OMP_CRITICAL);
64 TREE_TYPE (stmt) = void_type_node;
65 OMP_CRITICAL_BODY (stmt) = body;
66 OMP_CRITICAL_NAME (stmt) = name;
67 SET_EXPR_LOCATION (stmt, loc);
68 return add_stmt (stmt);
71 /* Complete a #pragma omp ordered construct. STMT is the structured-block
72 that follows the pragma. LOC is the location of the #pragma. */
74 tree
75 c_finish_omp_ordered (location_t loc, tree stmt)
77 tree t = build1 (OMP_ORDERED, void_type_node, stmt);
78 SET_EXPR_LOCATION (t, loc);
79 return add_stmt (t);
83 /* Complete a #pragma omp barrier construct. LOC is the location of
84 the #pragma. */
86 void
87 c_finish_omp_barrier (location_t loc)
89 tree x;
91 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
92 x = build_call_expr_loc (loc, x, 0);
93 add_stmt (x);
97 /* Complete a #pragma omp taskwait construct. LOC is the location of the
98 pragma. */
100 void
101 c_finish_omp_taskwait (location_t loc)
103 tree x;
105 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
106 x = build_call_expr_loc (loc, x, 0);
107 add_stmt (x);
111 /* Complete a #pragma omp taskyield construct. LOC is the location of the
112 pragma. */
114 void
115 c_finish_omp_taskyield (location_t loc)
117 tree x;
119 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
120 x = build_call_expr_loc (loc, x, 0);
121 add_stmt (x);
125 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
126 the expression to be implemented atomically is LHS opcode= RHS.
127 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
128 opcode= RHS with the new or old content of LHS returned.
129 LOC is the location of the atomic statement. The value returned
130 is either error_mark_node (if the construct was erroneous) or an
131 OMP_ATOMIC* node which should be added to the current statement
132 tree with add_stmt. */
134 tree
135 c_finish_omp_atomic (location_t loc, enum tree_code code,
136 enum tree_code opcode, tree lhs, tree rhs,
137 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
139 tree x, type, addr;
141 if (lhs == error_mark_node || rhs == error_mark_node
142 || v == error_mark_node || lhs1 == error_mark_node
143 || rhs1 == error_mark_node)
144 return error_mark_node;
146 /* ??? According to one reading of the OpenMP spec, complex type are
147 supported, but there are no atomic stores for any architecture.
148 But at least icc 9.0 doesn't support complex types here either.
149 And lets not even talk about vector types... */
150 type = TREE_TYPE (lhs);
151 if (!INTEGRAL_TYPE_P (type)
152 && !POINTER_TYPE_P (type)
153 && !SCALAR_FLOAT_TYPE_P (type))
155 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
156 return error_mark_node;
159 /* ??? Validate that rhs does not overlap lhs. */
161 /* Take and save the address of the lhs. From then on we'll reference it
162 via indirection. */
163 addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
164 if (addr == error_mark_node)
165 return error_mark_node;
166 addr = save_expr (addr);
167 if (TREE_CODE (addr) != SAVE_EXPR
168 && (TREE_CODE (addr) != ADDR_EXPR
169 || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
171 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
172 it even after unsharing function body. */
173 tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL);
174 DECL_CONTEXT (var) = current_function_decl;
175 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
177 lhs = build_indirect_ref (loc, addr, RO_NULL);
179 if (code == OMP_ATOMIC_READ)
181 x = build1 (OMP_ATOMIC_READ, type, addr);
182 SET_EXPR_LOCATION (x, loc);
183 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
184 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
185 loc, x, NULL_TREE);
186 return x;
189 /* There are lots of warnings, errors, and conversions that need to happen
190 in the course of interpreting a statement. Use the normal mechanisms
191 to do this, and then take it apart again. */
192 if (swapped)
194 rhs = build2_loc (loc, opcode, TREE_TYPE (lhs), rhs, lhs);
195 opcode = NOP_EXPR;
197 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
198 if (x == error_mark_node)
199 return error_mark_node;
200 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
201 rhs = TREE_OPERAND (x, 1);
203 /* Punt the actual generation of atomic operations to common code. */
204 if (code == OMP_ATOMIC)
205 type = void_type_node;
206 x = build2 (code, type, addr, rhs);
207 SET_EXPR_LOCATION (x, loc);
208 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
210 /* Generally it is hard to prove lhs1 and lhs are the same memory
211 location, just diagnose different variables. */
212 if (rhs1
213 && TREE_CODE (rhs1) == VAR_DECL
214 && TREE_CODE (lhs) == VAR_DECL
215 && rhs1 != lhs)
217 if (code == OMP_ATOMIC)
218 error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
219 else
220 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
221 return error_mark_node;
224 if (code != OMP_ATOMIC)
226 /* Generally it is hard to prove lhs1 and lhs are the same memory
227 location, just diagnose different variables. */
228 if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
230 if (lhs1 != lhs)
232 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
233 return error_mark_node;
236 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
237 loc, x, NULL_TREE);
238 if (rhs1 && rhs1 != lhs)
240 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
241 if (rhs1addr == error_mark_node)
242 return error_mark_node;
243 x = omit_one_operand_loc (loc, type, x, rhs1addr);
245 if (lhs1 && lhs1 != lhs)
247 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
248 if (lhs1addr == error_mark_node)
249 return error_mark_node;
250 if (code == OMP_ATOMIC_CAPTURE_OLD)
251 x = omit_one_operand_loc (loc, type, x, lhs1addr);
252 else
254 x = save_expr (x);
255 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
259 else if (rhs1 && rhs1 != lhs)
261 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
262 if (rhs1addr == error_mark_node)
263 return error_mark_node;
264 x = omit_one_operand_loc (loc, type, x, rhs1addr);
267 return x;
271 /* Complete a #pragma omp flush construct. We don't do anything with
272 the variable list that the syntax allows. LOC is the location of
273 the #pragma. */
275 void
276 c_finish_omp_flush (location_t loc)
278 tree x;
280 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
281 x = build_call_expr_loc (loc, x, 0);
282 add_stmt (x);
286 /* Check and canonicalize #pragma omp for increment expression.
287 Helper function for c_finish_omp_for. */
289 static tree
290 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
292 tree t;
294 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
295 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
296 return error_mark_node;
298 if (exp == decl)
299 return build_int_cst (TREE_TYPE (exp), 0);
301 switch (TREE_CODE (exp))
303 CASE_CONVERT:
304 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
305 if (t != error_mark_node)
306 return fold_convert_loc (loc, TREE_TYPE (exp), t);
307 break;
308 case MINUS_EXPR:
309 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
310 if (t != error_mark_node)
311 return fold_build2_loc (loc, MINUS_EXPR,
312 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
313 break;
314 case PLUS_EXPR:
315 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
316 if (t != error_mark_node)
317 return fold_build2_loc (loc, PLUS_EXPR,
318 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
319 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
320 if (t != error_mark_node)
321 return fold_build2_loc (loc, PLUS_EXPR,
322 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
323 break;
324 case COMPOUND_EXPR:
326 /* cp_build_modify_expr forces preevaluation of the RHS to make
327 sure that it is evaluated before the lvalue-rvalue conversion
328 is applied to the LHS. Reconstruct the original expression. */
329 tree op0 = TREE_OPERAND (exp, 0);
330 if (TREE_CODE (op0) == TARGET_EXPR
331 && !VOID_TYPE_P (TREE_TYPE (op0)))
333 tree op1 = TREE_OPERAND (exp, 1);
334 tree temp = TARGET_EXPR_SLOT (op0);
335 if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary
336 && TREE_OPERAND (op1, 1) == temp)
338 op1 = copy_node (op1);
339 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
340 return check_omp_for_incr_expr (loc, op1, decl);
343 break;
345 default:
346 break;
349 return error_mark_node;
352 /* If the OMP_FOR increment expression in INCR is of pointer type,
353 canonicalize it into an expression handled by gimplify_omp_for()
354 and return it. DECL is the iteration variable. */
356 static tree
357 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
359 if (POINTER_TYPE_P (TREE_TYPE (decl))
360 && TREE_OPERAND (incr, 1))
362 tree t = fold_convert_loc (loc,
363 sizetype, TREE_OPERAND (incr, 1));
365 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
366 || TREE_CODE (incr) == PREDECREMENT_EXPR)
367 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
368 t = fold_build_pointer_plus (decl, t);
369 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
371 return incr;
374 /* Validate and emit code for the OpenMP directive #pragma omp for.
375 DECLV is a vector of iteration variables, for each collapsed loop.
376 INITV, CONDV and INCRV are vectors containing initialization
377 expressions, controlling predicates and increment expressions.
378 BODY is the body of the loop and PRE_BODY statements that go before
379 the loop. */
381 tree
382 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
383 tree initv, tree condv, tree incrv, tree body, tree pre_body)
385 location_t elocus;
386 bool fail = false;
387 int i;
389 if (code == CILK_SIMD
390 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
391 fail = true;
393 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
394 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
395 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
396 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
398 tree decl = TREE_VEC_ELT (declv, i);
399 tree init = TREE_VEC_ELT (initv, i);
400 tree cond = TREE_VEC_ELT (condv, i);
401 tree incr = TREE_VEC_ELT (incrv, i);
403 elocus = locus;
404 if (EXPR_HAS_LOCATION (init))
405 elocus = EXPR_LOCATION (init);
407 /* Validate the iteration variable. */
408 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
409 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
411 error_at (elocus, "invalid type for iteration variable %qE", decl);
412 fail = true;
415 /* In the case of "for (int i = 0...)", init will be a decl. It should
416 have a DECL_INITIAL that we can turn into an assignment. */
417 if (init == decl)
419 elocus = DECL_SOURCE_LOCATION (decl);
421 init = DECL_INITIAL (decl);
422 if (init == NULL)
424 error_at (elocus, "%qE is not initialized", decl);
425 init = integer_zero_node;
426 fail = true;
429 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
430 /* FIXME diagnostics: This should
431 be the location of the INIT. */
432 elocus,
433 init,
434 NULL_TREE);
436 if (init != error_mark_node)
438 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
439 gcc_assert (TREE_OPERAND (init, 0) == decl);
442 if (cond == NULL_TREE)
444 error_at (elocus, "missing controlling predicate");
445 fail = true;
447 else
449 bool cond_ok = false;
451 if (EXPR_HAS_LOCATION (cond))
452 elocus = EXPR_LOCATION (cond);
454 if (TREE_CODE (cond) == LT_EXPR
455 || TREE_CODE (cond) == LE_EXPR
456 || TREE_CODE (cond) == GT_EXPR
457 || TREE_CODE (cond) == GE_EXPR
458 || TREE_CODE (cond) == NE_EXPR
459 || TREE_CODE (cond) == EQ_EXPR)
461 tree op0 = TREE_OPERAND (cond, 0);
462 tree op1 = TREE_OPERAND (cond, 1);
464 /* 2.5.1. The comparison in the condition is computed in
465 the type of DECL, otherwise the behavior is undefined.
467 For example:
468 long n; int i;
469 i < n;
471 according to ISO will be evaluated as:
472 (long)i < n;
474 We want to force:
475 i < (int)n; */
476 if (TREE_CODE (op0) == NOP_EXPR
477 && decl == TREE_OPERAND (op0, 0))
479 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
480 TREE_OPERAND (cond, 1)
481 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
482 TREE_OPERAND (cond, 1));
484 else if (TREE_CODE (op1) == NOP_EXPR
485 && decl == TREE_OPERAND (op1, 0))
487 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
488 TREE_OPERAND (cond, 0)
489 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
490 TREE_OPERAND (cond, 0));
493 if (decl == TREE_OPERAND (cond, 0))
494 cond_ok = true;
495 else if (decl == TREE_OPERAND (cond, 1))
497 TREE_SET_CODE (cond,
498 swap_tree_comparison (TREE_CODE (cond)));
499 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
500 TREE_OPERAND (cond, 0) = decl;
501 cond_ok = true;
504 if (TREE_CODE (cond) == NE_EXPR
505 || TREE_CODE (cond) == EQ_EXPR)
507 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
508 cond_ok = false;
509 else if (operand_equal_p (TREE_OPERAND (cond, 1),
510 TYPE_MIN_VALUE (TREE_TYPE (decl)),
512 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
513 ? GT_EXPR : LE_EXPR);
514 else if (operand_equal_p (TREE_OPERAND (cond, 1),
515 TYPE_MAX_VALUE (TREE_TYPE (decl)),
517 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
518 ? LT_EXPR : GE_EXPR);
519 else if (code != CILK_SIMD)
520 cond_ok = false;
524 if (!cond_ok)
526 error_at (elocus, "invalid controlling predicate");
527 fail = true;
531 if (incr == NULL_TREE)
533 error_at (elocus, "missing increment expression");
534 fail = true;
536 else
538 bool incr_ok = false;
540 if (EXPR_HAS_LOCATION (incr))
541 elocus = EXPR_LOCATION (incr);
543 /* Check all the valid increment expressions: v++, v--, ++v, --v,
544 v = v + incr, v = incr + v and v = v - incr. */
545 switch (TREE_CODE (incr))
547 case POSTINCREMENT_EXPR:
548 case PREINCREMENT_EXPR:
549 case POSTDECREMENT_EXPR:
550 case PREDECREMENT_EXPR:
551 if (TREE_OPERAND (incr, 0) != decl)
552 break;
554 incr_ok = true;
555 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
556 break;
558 case MODIFY_EXPR:
559 if (TREE_OPERAND (incr, 0) != decl)
560 break;
561 if (TREE_OPERAND (incr, 1) == decl)
562 break;
563 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
564 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
565 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
566 incr_ok = true;
567 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
568 || (TREE_CODE (TREE_OPERAND (incr, 1))
569 == POINTER_PLUS_EXPR))
570 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
571 incr_ok = true;
572 else
574 tree t = check_omp_for_incr_expr (elocus,
575 TREE_OPERAND (incr, 1),
576 decl);
577 if (t != error_mark_node)
579 incr_ok = true;
580 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
581 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
584 break;
586 default:
587 break;
589 if (!incr_ok)
591 error_at (elocus, "invalid increment expression");
592 fail = true;
596 TREE_VEC_ELT (initv, i) = init;
597 TREE_VEC_ELT (incrv, i) = incr;
600 if (fail)
601 return NULL;
602 else
604 tree t = make_node (code);
606 TREE_TYPE (t) = void_type_node;
607 OMP_FOR_INIT (t) = initv;
608 OMP_FOR_COND (t) = condv;
609 OMP_FOR_INCR (t) = incrv;
610 OMP_FOR_BODY (t) = body;
611 OMP_FOR_PRE_BODY (t) = pre_body;
613 SET_EXPR_LOCATION (t, locus);
614 return add_stmt (t);
618 /* Right now we have 14 different combined constructs, this
619 function attempts to split or duplicate clauses for combined
620 constructs. CODE is the innermost construct in the combined construct,
621 and MASK allows to determine which constructs are combined together,
622 as every construct has at least one clause that no other construct
623 has (except for OMP_SECTIONS, but that can be only combined with parallel).
624 Combined constructs are:
625 #pragma omp parallel for
626 #pragma omp parallel sections
627 #pragma omp parallel for simd
628 #pragma omp for simd
629 #pragma omp distribute simd
630 #pragma omp distribute parallel for
631 #pragma omp distribute parallel for simd
632 #pragma omp teams distribute
633 #pragma omp teams distribute parallel for
634 #pragma omp teams distribute parallel for simd
635 #pragma omp target teams
636 #pragma omp target teams distribute
637 #pragma omp target teams distribute parallel for
638 #pragma omp target teams distribute parallel for simd */
640 void
641 c_omp_split_clauses (location_t loc, enum tree_code code,
642 omp_clause_mask mask, tree clauses, tree *cclauses)
644 tree next, c;
645 enum c_omp_clause_split s;
646 int i;
648 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
649 cclauses[i] = NULL;
650 /* Add implicit nowait clause on
651 #pragma omp parallel {for,for simd,sections}. */
652 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
653 switch (code)
655 case OMP_FOR:
656 case OMP_SIMD:
657 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
658 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
659 break;
660 case OMP_SECTIONS:
661 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
662 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
663 break;
664 default:
665 break;
668 for (; clauses ; clauses = next)
670 next = OMP_CLAUSE_CHAIN (clauses);
672 switch (OMP_CLAUSE_CODE (clauses))
674 /* First the clauses that are unique to some constructs. */
675 case OMP_CLAUSE_DEVICE:
676 case OMP_CLAUSE_MAP:
677 s = C_OMP_CLAUSE_SPLIT_TARGET;
678 break;
679 case OMP_CLAUSE_NUM_TEAMS:
680 case OMP_CLAUSE_THREAD_LIMIT:
681 s = C_OMP_CLAUSE_SPLIT_TEAMS;
682 break;
683 case OMP_CLAUSE_DIST_SCHEDULE:
684 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
685 break;
686 case OMP_CLAUSE_COPYIN:
687 case OMP_CLAUSE_NUM_THREADS:
688 case OMP_CLAUSE_PROC_BIND:
689 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
690 break;
691 case OMP_CLAUSE_ORDERED:
692 case OMP_CLAUSE_SCHEDULE:
693 case OMP_CLAUSE_NOWAIT:
694 s = C_OMP_CLAUSE_SPLIT_FOR;
695 break;
696 case OMP_CLAUSE_SAFELEN:
697 case OMP_CLAUSE_LINEAR:
698 case OMP_CLAUSE_ALIGNED:
699 s = C_OMP_CLAUSE_SPLIT_SIMD;
700 break;
701 /* Duplicate this to all of distribute, for and simd. */
702 case OMP_CLAUSE_COLLAPSE:
703 if (code == OMP_SIMD)
705 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
706 OMP_CLAUSE_COLLAPSE);
707 OMP_CLAUSE_COLLAPSE_EXPR (c)
708 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
709 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
710 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
712 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
714 if ((mask & (OMP_CLAUSE_MASK_1
715 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
717 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
718 OMP_CLAUSE_COLLAPSE);
719 OMP_CLAUSE_COLLAPSE_EXPR (c)
720 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
721 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
722 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
723 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
725 else
726 s = C_OMP_CLAUSE_SPLIT_FOR;
728 else
729 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
730 break;
731 /* Private clause is supported on all constructs but target,
732 it is enough to put it on the innermost one. For
733 #pragma omp {for,sections} put it on parallel though,
734 as that's what we did for OpenMP 3.1. */
735 case OMP_CLAUSE_PRIVATE:
736 switch (code)
738 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
739 case OMP_FOR: case OMP_SECTIONS:
740 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
741 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
742 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
743 default: gcc_unreachable ();
745 break;
746 /* Firstprivate clause is supported on all constructs but
747 target and simd. Put it on the outermost of those and
748 duplicate on parallel. */
749 case OMP_CLAUSE_FIRSTPRIVATE:
750 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
751 != 0)
753 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
754 | (OMP_CLAUSE_MASK_1
755 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
757 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
758 OMP_CLAUSE_FIRSTPRIVATE);
759 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
760 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
761 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
762 if ((mask & (OMP_CLAUSE_MASK_1
763 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
764 s = C_OMP_CLAUSE_SPLIT_TEAMS;
765 else
766 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
768 else
769 /* This must be
770 #pragma omp parallel{, for{, simd}, sections}. */
771 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
773 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
774 != 0)
776 /* This must be #pragma omp {,target }teams distribute. */
777 gcc_assert (code == OMP_DISTRIBUTE);
778 s = C_OMP_CLAUSE_SPLIT_TEAMS;
780 else if ((mask & (OMP_CLAUSE_MASK_1
781 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
783 /* This must be #pragma omp distribute simd. */
784 gcc_assert (code == OMP_SIMD);
785 s = C_OMP_CLAUSE_SPLIT_TEAMS;
787 else
789 /* This must be #pragma omp for simd. */
790 gcc_assert (code == OMP_SIMD);
791 s = C_OMP_CLAUSE_SPLIT_FOR;
793 break;
794 /* Lastprivate is allowed on for, sections and simd. In
795 parallel {for{, simd},sections} we actually want to put it on
796 parallel rather than for or sections. */
797 case OMP_CLAUSE_LASTPRIVATE:
798 if (code == OMP_FOR || code == OMP_SECTIONS)
800 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
801 != 0)
802 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
803 else
804 s = C_OMP_CLAUSE_SPLIT_FOR;
805 break;
807 gcc_assert (code == OMP_SIMD);
808 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
810 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
811 OMP_CLAUSE_LASTPRIVATE);
812 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
813 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
814 != 0)
815 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
816 else
817 s = C_OMP_CLAUSE_SPLIT_FOR;
818 OMP_CLAUSE_CHAIN (c) = cclauses[s];
819 cclauses[s] = c;
821 s = C_OMP_CLAUSE_SPLIT_SIMD;
822 break;
823 /* Shared and default clauses are allowed on private and teams. */
824 case OMP_CLAUSE_SHARED:
825 case OMP_CLAUSE_DEFAULT:
826 if (code == OMP_TEAMS)
828 s = C_OMP_CLAUSE_SPLIT_TEAMS;
829 break;
831 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
832 != 0)
834 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
835 OMP_CLAUSE_CODE (clauses));
836 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
837 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
838 else
839 OMP_CLAUSE_DEFAULT_KIND (c)
840 = OMP_CLAUSE_DEFAULT_KIND (clauses);
841 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
842 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
845 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
846 break;
847 /* Reduction is allowed on simd, for, parallel, sections and teams.
848 Duplicate it on all of them, but omit on for or sections if
849 parallel is present. */
850 case OMP_CLAUSE_REDUCTION:
851 if (code == OMP_SIMD)
853 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
854 OMP_CLAUSE_REDUCTION);
855 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
856 OMP_CLAUSE_REDUCTION_CODE (c)
857 = OMP_CLAUSE_REDUCTION_CODE (clauses);
858 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
859 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
860 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
861 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
863 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
865 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
866 != 0)
868 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
869 OMP_CLAUSE_REDUCTION);
870 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
871 OMP_CLAUSE_REDUCTION_CODE (c)
872 = OMP_CLAUSE_REDUCTION_CODE (clauses);
873 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
874 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
875 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
876 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
877 s = C_OMP_CLAUSE_SPLIT_TEAMS;
879 else if ((mask & (OMP_CLAUSE_MASK_1
880 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
881 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
882 else
883 s = C_OMP_CLAUSE_SPLIT_FOR;
885 else if (code == OMP_SECTIONS)
886 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
887 else
888 s = C_OMP_CLAUSE_SPLIT_TEAMS;
889 break;
890 case OMP_CLAUSE_IF:
891 /* FIXME: This is currently being discussed. */
892 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
893 != 0)
894 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
895 else
896 s = C_OMP_CLAUSE_SPLIT_TARGET;
897 break;
898 default:
899 gcc_unreachable ();
901 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
902 cclauses[s] = clauses;
907 /* qsort callback to compare #pragma omp declare simd clauses. */
909 static int
910 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
912 tree a = *(const tree *) p;
913 tree b = *(const tree *) q;
914 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
916 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
917 return -1;
918 return 1;
920 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
921 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
922 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
924 int c = tree_low_cst (OMP_CLAUSE_DECL (a), 0);
925 int d = tree_low_cst (OMP_CLAUSE_DECL (b), 0);
926 if (c < d)
927 return 1;
928 if (c > d)
929 return -1;
931 return 0;
934 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
935 CLAUSES on FNDECL into argument indexes and sort them. */
937 tree
938 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
940 tree c;
941 vec<tree> clvec = vNULL;
943 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
945 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
946 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
947 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
949 tree decl = OMP_CLAUSE_DECL (c);
950 tree arg;
951 int idx;
952 for (arg = parms, idx = 0; arg;
953 arg = TREE_CHAIN (arg), idx++)
954 if (arg == decl)
955 break;
956 if (arg == NULL_TREE)
958 error_at (OMP_CLAUSE_LOCATION (c),
959 "%qD is not an function argument", decl);
960 continue;
962 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
964 clvec.safe_push (c);
966 if (!clvec.is_empty ())
968 unsigned int len = clvec.length (), i;
969 clvec.qsort (c_omp_declare_simd_clause_cmp);
970 clauses = clvec[0];
971 for (i = 0; i < len; i++)
972 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
974 clvec.release ();
975 return clauses;
978 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
980 void
981 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
983 tree c;
985 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
986 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
987 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
988 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
990 int idx = tree_low_cst (OMP_CLAUSE_DECL (c), 0), i;
991 tree arg;
992 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
993 arg = TREE_CHAIN (arg), i++)
994 if (i == idx)
995 break;
996 gcc_assert (arg);
997 OMP_CLAUSE_DECL (c) = arg;
1001 /* True if OpenMP sharing attribute of DECL is predetermined. */
1003 enum omp_clause_default_kind
1004 c_omp_predetermined_sharing (tree decl)
1006 /* Variables with const-qualified type having no mutable member
1007 are predetermined shared. */
1008 if (TREE_READONLY (decl))
1009 return OMP_CLAUSE_DEFAULT_SHARED;
1011 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;