2015-06-25 Zhouyi Zhou <yizhouzhou@ict.ac.cn>
[official-gcc.git] / gcc / c-family / c-omp.c
blob289016da19b1f7e75abbb497c439357741062b8e
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2015 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "alias.h"
29 #include "symtab.h"
30 #include "tree.h"
31 #include "c-common.h"
32 #include "c-pragma.h"
33 #include "gimple-expr.h"
34 #include "langhooks.h"
35 #include "omp-low.h"
36 #include "gomp-constants.h"
39 /* Complete a #pragma oacc wait construct. LOC is the location of
40 the #pragma. */
42 tree
43 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
45 const int nparms = list_length (parms);
46 tree stmt, t;
47 vec<tree, va_gc> *args;
49 vec_alloc (args, nparms + 2);
50 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
52 if (find_omp_clause (clauses, OMP_CLAUSE_ASYNC))
53 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
54 else
55 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
57 args->quick_push (t);
58 args->quick_push (build_int_cst (integer_type_node, nparms));
60 for (t = parms; t; t = TREE_CHAIN (t))
62 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
63 args->quick_push (build_int_cst (integer_type_node,
64 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
65 else
66 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
69 stmt = build_call_expr_loc_vec (loc, stmt, args);
70 add_stmt (stmt);
72 vec_free (args);
74 return stmt;
77 /* Complete a #pragma omp master construct. STMT is the structured-block
78 that follows the pragma. LOC is the l*/
80 tree
81 c_finish_omp_master (location_t loc, tree stmt)
83 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
84 SET_EXPR_LOCATION (t, loc);
85 return t;
88 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
89 that follows the pragma. LOC is the l*/
91 tree
92 c_finish_omp_taskgroup (location_t loc, tree stmt)
94 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
95 SET_EXPR_LOCATION (t, loc);
96 return t;
99 /* Complete a #pragma omp critical construct. STMT is the structured-block
100 that follows the pragma, NAME is the identifier in the pragma, or null
101 if it was omitted. LOC is the location of the #pragma. */
103 tree
104 c_finish_omp_critical (location_t loc, tree body, tree name)
106 tree stmt = make_node (OMP_CRITICAL);
107 TREE_TYPE (stmt) = void_type_node;
108 OMP_CRITICAL_BODY (stmt) = body;
109 OMP_CRITICAL_NAME (stmt) = name;
110 SET_EXPR_LOCATION (stmt, loc);
111 return add_stmt (stmt);
114 /* Complete a #pragma omp ordered construct. STMT is the structured-block
115 that follows the pragma. LOC is the location of the #pragma. */
117 tree
118 c_finish_omp_ordered (location_t loc, tree stmt)
120 tree t = build1 (OMP_ORDERED, void_type_node, stmt);
121 SET_EXPR_LOCATION (t, loc);
122 return add_stmt (t);
126 /* Complete a #pragma omp barrier construct. LOC is the location of
127 the #pragma. */
129 void
130 c_finish_omp_barrier (location_t loc)
132 tree x;
134 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
135 x = build_call_expr_loc (loc, x, 0);
136 add_stmt (x);
140 /* Complete a #pragma omp taskwait construct. LOC is the location of the
141 pragma. */
143 void
144 c_finish_omp_taskwait (location_t loc)
146 tree x;
148 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
149 x = build_call_expr_loc (loc, x, 0);
150 add_stmt (x);
154 /* Complete a #pragma omp taskyield construct. LOC is the location of the
155 pragma. */
157 void
158 c_finish_omp_taskyield (location_t loc)
160 tree x;
162 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
163 x = build_call_expr_loc (loc, x, 0);
164 add_stmt (x);
168 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
169 the expression to be implemented atomically is LHS opcode= RHS.
170 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
171 opcode= RHS with the new or old content of LHS returned.
172 LOC is the location of the atomic statement. The value returned
173 is either error_mark_node (if the construct was erroneous) or an
174 OMP_ATOMIC* node which should be added to the current statement
175 tree with add_stmt. */
177 tree
178 c_finish_omp_atomic (location_t loc, enum tree_code code,
179 enum tree_code opcode, tree lhs, tree rhs,
180 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
182 tree x, type, addr, pre = NULL_TREE;
184 if (lhs == error_mark_node || rhs == error_mark_node
185 || v == error_mark_node || lhs1 == error_mark_node
186 || rhs1 == error_mark_node)
187 return error_mark_node;
189 /* ??? According to one reading of the OpenMP spec, complex type are
190 supported, but there are no atomic stores for any architecture.
191 But at least icc 9.0 doesn't support complex types here either.
192 And lets not even talk about vector types... */
193 type = TREE_TYPE (lhs);
194 if (!INTEGRAL_TYPE_P (type)
195 && !POINTER_TYPE_P (type)
196 && !SCALAR_FLOAT_TYPE_P (type))
198 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
199 return error_mark_node;
202 if (opcode == RDIV_EXPR)
203 opcode = TRUNC_DIV_EXPR;
205 /* ??? Validate that rhs does not overlap lhs. */
207 /* Take and save the address of the lhs. From then on we'll reference it
208 via indirection. */
209 addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
210 if (addr == error_mark_node)
211 return error_mark_node;
212 addr = save_expr (addr);
213 if (TREE_CODE (addr) != SAVE_EXPR
214 && (TREE_CODE (addr) != ADDR_EXPR
215 || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
217 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
218 it even after unsharing function body. */
219 tree var = create_tmp_var_raw (TREE_TYPE (addr));
220 DECL_CONTEXT (var) = current_function_decl;
221 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
223 lhs = build_indirect_ref (loc, addr, RO_NULL);
225 if (code == OMP_ATOMIC_READ)
227 x = build1 (OMP_ATOMIC_READ, type, addr);
228 SET_EXPR_LOCATION (x, loc);
229 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
230 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
231 loc, x, NULL_TREE);
234 /* There are lots of warnings, errors, and conversions that need to happen
235 in the course of interpreting a statement. Use the normal mechanisms
236 to do this, and then take it apart again. */
237 if (swapped)
239 rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
240 opcode = NOP_EXPR;
242 bool save = in_late_binary_op;
243 in_late_binary_op = true;
244 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
245 in_late_binary_op = save;
246 if (x == error_mark_node)
247 return error_mark_node;
248 if (TREE_CODE (x) == COMPOUND_EXPR)
250 pre = TREE_OPERAND (x, 0);
251 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
252 x = TREE_OPERAND (x, 1);
254 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
255 rhs = TREE_OPERAND (x, 1);
257 /* Punt the actual generation of atomic operations to common code. */
258 if (code == OMP_ATOMIC)
259 type = void_type_node;
260 x = build2 (code, type, addr, rhs);
261 SET_EXPR_LOCATION (x, loc);
262 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
264 /* Generally it is hard to prove lhs1 and lhs are the same memory
265 location, just diagnose different variables. */
266 if (rhs1
267 && TREE_CODE (rhs1) == VAR_DECL
268 && TREE_CODE (lhs) == VAR_DECL
269 && rhs1 != lhs)
271 if (code == OMP_ATOMIC)
272 error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
273 else
274 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
275 return error_mark_node;
278 if (code != OMP_ATOMIC)
280 /* Generally it is hard to prove lhs1 and lhs are the same memory
281 location, just diagnose different variables. */
282 if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
284 if (lhs1 != lhs)
286 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
287 return error_mark_node;
290 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
291 loc, x, NULL_TREE);
292 if (rhs1 && rhs1 != lhs)
294 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
295 if (rhs1addr == error_mark_node)
296 return error_mark_node;
297 x = omit_one_operand_loc (loc, type, x, rhs1addr);
299 if (lhs1 && lhs1 != lhs)
301 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
302 if (lhs1addr == error_mark_node)
303 return error_mark_node;
304 if (code == OMP_ATOMIC_CAPTURE_OLD)
305 x = omit_one_operand_loc (loc, type, x, lhs1addr);
306 else
308 x = save_expr (x);
309 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
313 else if (rhs1 && rhs1 != lhs)
315 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
316 if (rhs1addr == error_mark_node)
317 return error_mark_node;
318 x = omit_one_operand_loc (loc, type, x, rhs1addr);
321 if (pre)
322 x = omit_one_operand_loc (loc, type, x, pre);
323 return x;
327 /* Complete a #pragma omp flush construct. We don't do anything with
328 the variable list that the syntax allows. LOC is the location of
329 the #pragma. */
331 void
332 c_finish_omp_flush (location_t loc)
334 tree x;
336 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
337 x = build_call_expr_loc (loc, x, 0);
338 add_stmt (x);
342 /* Check and canonicalize OMP_FOR increment expression.
343 Helper function for c_finish_omp_for. */
345 static tree
346 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
348 tree t;
350 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
351 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
352 return error_mark_node;
354 if (exp == decl)
355 return build_int_cst (TREE_TYPE (exp), 0);
357 switch (TREE_CODE (exp))
359 CASE_CONVERT:
360 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
361 if (t != error_mark_node)
362 return fold_convert_loc (loc, TREE_TYPE (exp), t);
363 break;
364 case MINUS_EXPR:
365 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
366 if (t != error_mark_node)
367 return fold_build2_loc (loc, MINUS_EXPR,
368 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
369 break;
370 case PLUS_EXPR:
371 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
372 if (t != error_mark_node)
373 return fold_build2_loc (loc, PLUS_EXPR,
374 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
375 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
376 if (t != error_mark_node)
377 return fold_build2_loc (loc, PLUS_EXPR,
378 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
379 break;
380 case COMPOUND_EXPR:
382 /* cp_build_modify_expr forces preevaluation of the RHS to make
383 sure that it is evaluated before the lvalue-rvalue conversion
384 is applied to the LHS. Reconstruct the original expression. */
385 tree op0 = TREE_OPERAND (exp, 0);
386 if (TREE_CODE (op0) == TARGET_EXPR
387 && !VOID_TYPE_P (TREE_TYPE (op0)))
389 tree op1 = TREE_OPERAND (exp, 1);
390 tree temp = TARGET_EXPR_SLOT (op0);
391 if (BINARY_CLASS_P (op1)
392 && TREE_OPERAND (op1, 1) == temp)
394 op1 = copy_node (op1);
395 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
396 return check_omp_for_incr_expr (loc, op1, decl);
399 break;
401 default:
402 break;
405 return error_mark_node;
408 /* If the OMP_FOR increment expression in INCR is of pointer type,
409 canonicalize it into an expression handled by gimplify_omp_for()
410 and return it. DECL is the iteration variable. */
412 static tree
413 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
415 if (POINTER_TYPE_P (TREE_TYPE (decl))
416 && TREE_OPERAND (incr, 1))
418 tree t = fold_convert_loc (loc,
419 sizetype, TREE_OPERAND (incr, 1));
421 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
422 || TREE_CODE (incr) == PREDECREMENT_EXPR)
423 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
424 t = fold_build_pointer_plus (decl, t);
425 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
427 return incr;
430 /* Validate and generate OMP_FOR.
431 DECLV is a vector of iteration variables, for each collapsed loop.
432 INITV, CONDV and INCRV are vectors containing initialization
433 expressions, controlling predicates and increment expressions.
434 BODY is the body of the loop and PRE_BODY statements that go before
435 the loop. */
437 tree
438 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
439 tree initv, tree condv, tree incrv, tree body, tree pre_body)
441 location_t elocus;
442 bool fail = false;
443 int i;
445 if ((code == CILK_SIMD || code == CILK_FOR)
446 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
447 fail = true;
449 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
450 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
451 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
452 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
454 tree decl = TREE_VEC_ELT (declv, i);
455 tree init = TREE_VEC_ELT (initv, i);
456 tree cond = TREE_VEC_ELT (condv, i);
457 tree incr = TREE_VEC_ELT (incrv, i);
459 elocus = locus;
460 if (EXPR_HAS_LOCATION (init))
461 elocus = EXPR_LOCATION (init);
463 /* Validate the iteration variable. */
464 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
465 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
467 error_at (elocus, "invalid type for iteration variable %qE", decl);
468 fail = true;
471 /* In the case of "for (int i = 0...)", init will be a decl. It should
472 have a DECL_INITIAL that we can turn into an assignment. */
473 if (init == decl)
475 elocus = DECL_SOURCE_LOCATION (decl);
477 init = DECL_INITIAL (decl);
478 if (init == NULL)
480 error_at (elocus, "%qE is not initialized", decl);
481 init = integer_zero_node;
482 fail = true;
485 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
486 /* FIXME diagnostics: This should
487 be the location of the INIT. */
488 elocus,
489 init,
490 NULL_TREE);
492 if (init != error_mark_node)
494 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
495 gcc_assert (TREE_OPERAND (init, 0) == decl);
498 if (cond == NULL_TREE)
500 error_at (elocus, "missing controlling predicate");
501 fail = true;
503 else
505 bool cond_ok = false;
507 if (EXPR_HAS_LOCATION (cond))
508 elocus = EXPR_LOCATION (cond);
510 if (TREE_CODE (cond) == LT_EXPR
511 || TREE_CODE (cond) == LE_EXPR
512 || TREE_CODE (cond) == GT_EXPR
513 || TREE_CODE (cond) == GE_EXPR
514 || TREE_CODE (cond) == NE_EXPR
515 || TREE_CODE (cond) == EQ_EXPR)
517 tree op0 = TREE_OPERAND (cond, 0);
518 tree op1 = TREE_OPERAND (cond, 1);
520 /* 2.5.1. The comparison in the condition is computed in
521 the type of DECL, otherwise the behavior is undefined.
523 For example:
524 long n; int i;
525 i < n;
527 according to ISO will be evaluated as:
528 (long)i < n;
530 We want to force:
531 i < (int)n; */
532 if (TREE_CODE (op0) == NOP_EXPR
533 && decl == TREE_OPERAND (op0, 0))
535 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
536 TREE_OPERAND (cond, 1)
537 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
538 TREE_OPERAND (cond, 1));
540 else if (TREE_CODE (op1) == NOP_EXPR
541 && decl == TREE_OPERAND (op1, 0))
543 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
544 TREE_OPERAND (cond, 0)
545 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
546 TREE_OPERAND (cond, 0));
549 if (decl == TREE_OPERAND (cond, 0))
550 cond_ok = true;
551 else if (decl == TREE_OPERAND (cond, 1))
553 TREE_SET_CODE (cond,
554 swap_tree_comparison (TREE_CODE (cond)));
555 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
556 TREE_OPERAND (cond, 0) = decl;
557 cond_ok = true;
560 if (TREE_CODE (cond) == NE_EXPR
561 || TREE_CODE (cond) == EQ_EXPR)
563 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
565 if (code != CILK_SIMD && code != CILK_FOR)
566 cond_ok = false;
568 else if (operand_equal_p (TREE_OPERAND (cond, 1),
569 TYPE_MIN_VALUE (TREE_TYPE (decl)),
571 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
572 ? GT_EXPR : LE_EXPR);
573 else if (operand_equal_p (TREE_OPERAND (cond, 1),
574 TYPE_MAX_VALUE (TREE_TYPE (decl)),
576 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
577 ? LT_EXPR : GE_EXPR);
578 else if (code != CILK_SIMD && code != CILK_FOR)
579 cond_ok = false;
583 if (!cond_ok)
585 error_at (elocus, "invalid controlling predicate");
586 fail = true;
590 if (incr == NULL_TREE)
592 error_at (elocus, "missing increment expression");
593 fail = true;
595 else
597 bool incr_ok = false;
599 if (EXPR_HAS_LOCATION (incr))
600 elocus = EXPR_LOCATION (incr);
602 /* Check all the valid increment expressions: v++, v--, ++v, --v,
603 v = v + incr, v = incr + v and v = v - incr. */
604 switch (TREE_CODE (incr))
606 case POSTINCREMENT_EXPR:
607 case PREINCREMENT_EXPR:
608 case POSTDECREMENT_EXPR:
609 case PREDECREMENT_EXPR:
610 if (TREE_OPERAND (incr, 0) != decl)
611 break;
613 incr_ok = true;
614 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
615 break;
617 case COMPOUND_EXPR:
618 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
619 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
620 break;
621 incr = TREE_OPERAND (incr, 1);
622 /* FALLTHRU */
623 case MODIFY_EXPR:
624 if (TREE_OPERAND (incr, 0) != decl)
625 break;
626 if (TREE_OPERAND (incr, 1) == decl)
627 break;
628 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
629 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
630 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
631 incr_ok = true;
632 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
633 || (TREE_CODE (TREE_OPERAND (incr, 1))
634 == POINTER_PLUS_EXPR))
635 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
636 incr_ok = true;
637 else
639 tree t = check_omp_for_incr_expr (elocus,
640 TREE_OPERAND (incr, 1),
641 decl);
642 if (t != error_mark_node)
644 incr_ok = true;
645 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
646 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
649 break;
651 default:
652 break;
654 if (!incr_ok)
656 error_at (elocus, "invalid increment expression");
657 fail = true;
661 TREE_VEC_ELT (initv, i) = init;
662 TREE_VEC_ELT (incrv, i) = incr;
665 if (fail)
666 return NULL;
667 else
669 tree t = make_node (code);
671 TREE_TYPE (t) = void_type_node;
672 OMP_FOR_INIT (t) = initv;
673 OMP_FOR_COND (t) = condv;
674 OMP_FOR_INCR (t) = incrv;
675 OMP_FOR_BODY (t) = body;
676 OMP_FOR_PRE_BODY (t) = pre_body;
678 SET_EXPR_LOCATION (t, locus);
679 return add_stmt (t);
683 /* Right now we have 14 different combined constructs, this
684 function attempts to split or duplicate clauses for combined
685 constructs. CODE is the innermost construct in the combined construct,
686 and MASK allows to determine which constructs are combined together,
687 as every construct has at least one clause that no other construct
688 has (except for OMP_SECTIONS, but that can be only combined with parallel).
689 Combined constructs are:
690 #pragma omp parallel for
691 #pragma omp parallel sections
692 #pragma omp parallel for simd
693 #pragma omp for simd
694 #pragma omp distribute simd
695 #pragma omp distribute parallel for
696 #pragma omp distribute parallel for simd
697 #pragma omp teams distribute
698 #pragma omp teams distribute parallel for
699 #pragma omp teams distribute parallel for simd
700 #pragma omp target teams
701 #pragma omp target teams distribute
702 #pragma omp target teams distribute parallel for
703 #pragma omp target teams distribute parallel for simd */
705 void
706 c_omp_split_clauses (location_t loc, enum tree_code code,
707 omp_clause_mask mask, tree clauses, tree *cclauses)
709 tree next, c;
710 enum c_omp_clause_split s;
711 int i;
713 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
714 cclauses[i] = NULL;
715 /* Add implicit nowait clause on
716 #pragma omp parallel {for,for simd,sections}. */
717 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
718 switch (code)
720 case OMP_FOR:
721 case OMP_SIMD:
722 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
723 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
724 break;
725 case OMP_SECTIONS:
726 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
727 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
728 break;
729 default:
730 break;
733 for (; clauses ; clauses = next)
735 next = OMP_CLAUSE_CHAIN (clauses);
737 switch (OMP_CLAUSE_CODE (clauses))
739 /* First the clauses that are unique to some constructs. */
740 case OMP_CLAUSE_DEVICE:
741 case OMP_CLAUSE_MAP:
742 s = C_OMP_CLAUSE_SPLIT_TARGET;
743 break;
744 case OMP_CLAUSE_NUM_TEAMS:
745 case OMP_CLAUSE_THREAD_LIMIT:
746 s = C_OMP_CLAUSE_SPLIT_TEAMS;
747 break;
748 case OMP_CLAUSE_DIST_SCHEDULE:
749 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
750 break;
751 case OMP_CLAUSE_COPYIN:
752 case OMP_CLAUSE_NUM_THREADS:
753 case OMP_CLAUSE_PROC_BIND:
754 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
755 break;
756 case OMP_CLAUSE_ORDERED:
757 case OMP_CLAUSE_SCHEDULE:
758 case OMP_CLAUSE_NOWAIT:
759 s = C_OMP_CLAUSE_SPLIT_FOR;
760 break;
761 case OMP_CLAUSE_SAFELEN:
762 case OMP_CLAUSE_LINEAR:
763 case OMP_CLAUSE_ALIGNED:
764 s = C_OMP_CLAUSE_SPLIT_SIMD;
765 break;
766 /* Duplicate this to all of distribute, for and simd. */
767 case OMP_CLAUSE_COLLAPSE:
768 if (code == OMP_SIMD)
770 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
771 OMP_CLAUSE_COLLAPSE);
772 OMP_CLAUSE_COLLAPSE_EXPR (c)
773 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
774 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
775 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
777 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
779 if ((mask & (OMP_CLAUSE_MASK_1
780 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
782 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
783 OMP_CLAUSE_COLLAPSE);
784 OMP_CLAUSE_COLLAPSE_EXPR (c)
785 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
786 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
787 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
788 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
790 else
791 s = C_OMP_CLAUSE_SPLIT_FOR;
793 else
794 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
795 break;
796 /* Private clause is supported on all constructs but target,
797 it is enough to put it on the innermost one. For
798 #pragma omp {for,sections} put it on parallel though,
799 as that's what we did for OpenMP 3.1. */
800 case OMP_CLAUSE_PRIVATE:
801 switch (code)
803 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
804 case OMP_FOR: case OMP_SECTIONS:
805 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
806 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
807 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
808 default: gcc_unreachable ();
810 break;
811 /* Firstprivate clause is supported on all constructs but
812 target and simd. Put it on the outermost of those and
813 duplicate on parallel. */
814 case OMP_CLAUSE_FIRSTPRIVATE:
815 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
816 != 0)
818 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
819 | (OMP_CLAUSE_MASK_1
820 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
822 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
823 OMP_CLAUSE_FIRSTPRIVATE);
824 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
825 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
826 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
827 if ((mask & (OMP_CLAUSE_MASK_1
828 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
829 s = C_OMP_CLAUSE_SPLIT_TEAMS;
830 else
831 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
833 else
834 /* This must be
835 #pragma omp parallel{, for{, simd}, sections}. */
836 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
838 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
839 != 0)
841 /* This must be one of
842 #pragma omp {,target }teams distribute
843 #pragma omp target teams
844 #pragma omp {,target }teams distribute simd. */
845 gcc_assert (code == OMP_DISTRIBUTE
846 || code == OMP_TEAMS
847 || code == OMP_SIMD);
848 s = C_OMP_CLAUSE_SPLIT_TEAMS;
850 else if ((mask & (OMP_CLAUSE_MASK_1
851 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
853 /* This must be #pragma omp distribute simd. */
854 gcc_assert (code == OMP_SIMD);
855 s = C_OMP_CLAUSE_SPLIT_TEAMS;
857 else
859 /* This must be #pragma omp for simd. */
860 gcc_assert (code == OMP_SIMD);
861 s = C_OMP_CLAUSE_SPLIT_FOR;
863 break;
864 /* Lastprivate is allowed on for, sections and simd. In
865 parallel {for{, simd},sections} we actually want to put it on
866 parallel rather than for or sections. */
867 case OMP_CLAUSE_LASTPRIVATE:
868 if (code == OMP_FOR || code == OMP_SECTIONS)
870 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
871 != 0)
872 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
873 else
874 s = C_OMP_CLAUSE_SPLIT_FOR;
875 break;
877 gcc_assert (code == OMP_SIMD);
878 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
880 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
881 OMP_CLAUSE_LASTPRIVATE);
882 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
883 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
884 != 0)
885 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
886 else
887 s = C_OMP_CLAUSE_SPLIT_FOR;
888 OMP_CLAUSE_CHAIN (c) = cclauses[s];
889 cclauses[s] = c;
891 s = C_OMP_CLAUSE_SPLIT_SIMD;
892 break;
893 /* Shared and default clauses are allowed on private and teams. */
894 case OMP_CLAUSE_SHARED:
895 case OMP_CLAUSE_DEFAULT:
896 if (code == OMP_TEAMS)
898 s = C_OMP_CLAUSE_SPLIT_TEAMS;
899 break;
901 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
902 != 0)
904 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
905 OMP_CLAUSE_CODE (clauses));
906 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
907 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
908 else
909 OMP_CLAUSE_DEFAULT_KIND (c)
910 = OMP_CLAUSE_DEFAULT_KIND (clauses);
911 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
912 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
915 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
916 break;
917 /* Reduction is allowed on simd, for, parallel, sections and teams.
918 Duplicate it on all of them, but omit on for or sections if
919 parallel is present. */
920 case OMP_CLAUSE_REDUCTION:
921 if (code == OMP_SIMD)
923 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
924 OMP_CLAUSE_REDUCTION);
925 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
926 OMP_CLAUSE_REDUCTION_CODE (c)
927 = OMP_CLAUSE_REDUCTION_CODE (clauses);
928 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
929 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
930 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
931 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
933 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
935 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
936 != 0)
938 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
939 OMP_CLAUSE_REDUCTION);
940 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
941 OMP_CLAUSE_REDUCTION_CODE (c)
942 = OMP_CLAUSE_REDUCTION_CODE (clauses);
943 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
944 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
945 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
946 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
947 s = C_OMP_CLAUSE_SPLIT_TEAMS;
949 else if ((mask & (OMP_CLAUSE_MASK_1
950 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
951 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
952 else
953 s = C_OMP_CLAUSE_SPLIT_FOR;
955 else if (code == OMP_SECTIONS)
956 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
957 else
958 s = C_OMP_CLAUSE_SPLIT_TEAMS;
959 break;
960 case OMP_CLAUSE_IF:
961 /* FIXME: This is currently being discussed. */
962 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
963 != 0)
964 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
965 else
966 s = C_OMP_CLAUSE_SPLIT_TARGET;
967 break;
968 default:
969 gcc_unreachable ();
971 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
972 cclauses[s] = clauses;
977 /* qsort callback to compare #pragma omp declare simd clauses. */
979 static int
980 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
982 tree a = *(const tree *) p;
983 tree b = *(const tree *) q;
984 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
986 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
987 return -1;
988 return 1;
990 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
991 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
992 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
994 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
995 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
996 if (c < d)
997 return 1;
998 if (c > d)
999 return -1;
1001 return 0;
1004 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1005 CLAUSES on FNDECL into argument indexes and sort them. */
1007 tree
1008 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1010 tree c;
1011 vec<tree> clvec = vNULL;
1013 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1015 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1016 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1017 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1019 tree decl = OMP_CLAUSE_DECL (c);
1020 tree arg;
1021 int idx;
1022 for (arg = parms, idx = 0; arg;
1023 arg = TREE_CHAIN (arg), idx++)
1024 if (arg == decl)
1025 break;
1026 if (arg == NULL_TREE)
1028 error_at (OMP_CLAUSE_LOCATION (c),
1029 "%qD is not an function argument", decl);
1030 continue;
1032 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1034 clvec.safe_push (c);
1036 if (!clvec.is_empty ())
1038 unsigned int len = clvec.length (), i;
1039 clvec.qsort (c_omp_declare_simd_clause_cmp);
1040 clauses = clvec[0];
1041 for (i = 0; i < len; i++)
1042 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1044 clvec.release ();
1045 return clauses;
1048 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1050 void
1051 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1053 tree c;
1055 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1056 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1057 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1058 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1060 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1061 tree arg;
1062 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1063 arg = TREE_CHAIN (arg), i++)
1064 if (i == idx)
1065 break;
1066 gcc_assert (arg);
1067 OMP_CLAUSE_DECL (c) = arg;
1071 /* True if OpenMP sharing attribute of DECL is predetermined. */
1073 enum omp_clause_default_kind
1074 c_omp_predetermined_sharing (tree decl)
1076 /* Variables with const-qualified type having no mutable member
1077 are predetermined shared. */
1078 if (TREE_READONLY (decl))
1079 return OMP_CLAUSE_DEFAULT_SHARED;
1081 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;