PR rtl-optimization/82913
[official-gcc.git] / gcc / c-family / c-omp.c
blob8e045093adb00a7dd80cb98eece93d3e71caacd1
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2017 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "options.h"
28 #include "c-common.h"
29 #include "gimple-expr.h"
30 #include "c-pragma.h"
31 #include "omp-general.h"
32 #include "gomp-constants.h"
35 /* Complete a #pragma oacc wait construct. LOC is the location of
36 the #pragma. */
38 tree
39 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
41 const int nparms = list_length (parms);
42 tree stmt, t;
43 vec<tree, va_gc> *args;
45 vec_alloc (args, nparms + 2);
46 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
48 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
49 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
50 else
51 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
53 args->quick_push (t);
54 args->quick_push (build_int_cst (integer_type_node, nparms));
56 for (t = parms; t; t = TREE_CHAIN (t))
58 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
59 args->quick_push (build_int_cst (integer_type_node,
60 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
61 else
62 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
65 stmt = build_call_expr_loc_vec (loc, stmt, args);
67 vec_free (args);
69 return stmt;
72 /* Complete a #pragma omp master construct. STMT is the structured-block
73 that follows the pragma. LOC is the l*/
75 tree
76 c_finish_omp_master (location_t loc, tree stmt)
78 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
79 SET_EXPR_LOCATION (t, loc);
80 return t;
83 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
84 that follows the pragma. LOC is the l*/
86 tree
87 c_finish_omp_taskgroup (location_t loc, tree stmt)
89 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
90 SET_EXPR_LOCATION (t, loc);
91 return t;
94 /* Complete a #pragma omp critical construct. STMT is the structured-block
95 that follows the pragma, NAME is the identifier in the pragma, or null
96 if it was omitted. LOC is the location of the #pragma. */
98 tree
99 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
101 tree stmt = make_node (OMP_CRITICAL);
102 TREE_TYPE (stmt) = void_type_node;
103 OMP_CRITICAL_BODY (stmt) = body;
104 OMP_CRITICAL_NAME (stmt) = name;
105 OMP_CRITICAL_CLAUSES (stmt) = clauses;
106 SET_EXPR_LOCATION (stmt, loc);
107 return add_stmt (stmt);
110 /* Complete a #pragma omp ordered construct. STMT is the structured-block
111 that follows the pragma. LOC is the location of the #pragma. */
113 tree
114 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
116 tree t = make_node (OMP_ORDERED);
117 TREE_TYPE (t) = void_type_node;
118 OMP_ORDERED_BODY (t) = stmt;
119 if (!flag_openmp /* flag_openmp_simd */
120 && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
121 || OMP_CLAUSE_CHAIN (clauses)))
122 clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
123 OMP_ORDERED_CLAUSES (t) = clauses;
124 SET_EXPR_LOCATION (t, loc);
125 return add_stmt (t);
129 /* Complete a #pragma omp barrier construct. LOC is the location of
130 the #pragma. */
132 void
133 c_finish_omp_barrier (location_t loc)
135 tree x;
137 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
138 x = build_call_expr_loc (loc, x, 0);
139 add_stmt (x);
143 /* Complete a #pragma omp taskwait construct. LOC is the location of the
144 pragma. */
146 void
147 c_finish_omp_taskwait (location_t loc)
149 tree x;
151 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
152 x = build_call_expr_loc (loc, x, 0);
153 add_stmt (x);
157 /* Complete a #pragma omp taskyield construct. LOC is the location of the
158 pragma. */
160 void
161 c_finish_omp_taskyield (location_t loc)
163 tree x;
165 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
166 x = build_call_expr_loc (loc, x, 0);
167 add_stmt (x);
171 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
172 the expression to be implemented atomically is LHS opcode= RHS.
173 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
174 opcode= RHS with the new or old content of LHS returned.
175 LOC is the location of the atomic statement. The value returned
176 is either error_mark_node (if the construct was erroneous) or an
177 OMP_ATOMIC* node which should be added to the current statement
178 tree with add_stmt. If TEST is set, avoid calling save_expr
179 or create_tmp_var*. */
181 tree
182 c_finish_omp_atomic (location_t loc, enum tree_code code,
183 enum tree_code opcode, tree lhs, tree rhs,
184 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst,
185 bool test)
187 tree x, type, addr, pre = NULL_TREE;
188 HOST_WIDE_INT bitpos = 0, bitsize = 0;
190 if (lhs == error_mark_node || rhs == error_mark_node
191 || v == error_mark_node || lhs1 == error_mark_node
192 || rhs1 == error_mark_node)
193 return error_mark_node;
195 /* ??? According to one reading of the OpenMP spec, complex type are
196 supported, but there are no atomic stores for any architecture.
197 But at least icc 9.0 doesn't support complex types here either.
198 And lets not even talk about vector types... */
199 type = TREE_TYPE (lhs);
200 if (!INTEGRAL_TYPE_P (type)
201 && !POINTER_TYPE_P (type)
202 && !SCALAR_FLOAT_TYPE_P (type))
204 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
205 return error_mark_node;
207 if (TYPE_ATOMIC (type))
209 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
210 return error_mark_node;
213 if (opcode == RDIV_EXPR)
214 opcode = TRUNC_DIV_EXPR;
216 /* ??? Validate that rhs does not overlap lhs. */
217 tree blhs = NULL;
218 if (TREE_CODE (lhs) == COMPONENT_REF
219 && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
220 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
221 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
223 tree field = TREE_OPERAND (lhs, 1);
224 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
225 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
226 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
227 bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
228 - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
229 else
230 bitpos = 0;
231 bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
232 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
233 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
234 bitsize = tree_to_shwi (DECL_SIZE (field));
235 blhs = lhs;
236 type = TREE_TYPE (repr);
237 lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
238 repr, TREE_OPERAND (lhs, 2));
241 /* Take and save the address of the lhs. From then on we'll reference it
242 via indirection. */
243 addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
244 if (addr == error_mark_node)
245 return error_mark_node;
246 if (!test)
247 addr = save_expr (addr);
248 if (!test
249 && TREE_CODE (addr) != SAVE_EXPR
250 && (TREE_CODE (addr) != ADDR_EXPR
251 || !VAR_P (TREE_OPERAND (addr, 0))))
253 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
254 it even after unsharing function body. */
255 tree var = create_tmp_var_raw (TREE_TYPE (addr));
256 DECL_CONTEXT (var) = current_function_decl;
257 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
259 tree orig_lhs = lhs;
260 lhs = build_indirect_ref (loc, addr, RO_NULL);
261 tree new_lhs = lhs;
263 if (code == OMP_ATOMIC_READ)
265 x = build1 (OMP_ATOMIC_READ, type, addr);
266 SET_EXPR_LOCATION (x, loc);
267 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
268 if (blhs)
269 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
270 bitsize_int (bitsize), bitsize_int (bitpos));
271 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
272 loc, x, NULL_TREE);
275 /* There are lots of warnings, errors, and conversions that need to happen
276 in the course of interpreting a statement. Use the normal mechanisms
277 to do this, and then take it apart again. */
278 if (blhs)
280 lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
281 bitsize_int (bitsize), bitsize_int (bitpos));
282 if (swapped)
283 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
284 else if (opcode != NOP_EXPR)
285 rhs = build_binary_op (loc, opcode, lhs, rhs, true);
286 opcode = NOP_EXPR;
288 else if (swapped)
290 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
291 opcode = NOP_EXPR;
293 bool save = in_late_binary_op;
294 in_late_binary_op = true;
295 x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
296 loc, rhs, NULL_TREE);
297 in_late_binary_op = save;
298 if (x == error_mark_node)
299 return error_mark_node;
300 if (TREE_CODE (x) == COMPOUND_EXPR)
302 pre = TREE_OPERAND (x, 0);
303 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
304 x = TREE_OPERAND (x, 1);
306 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
307 rhs = TREE_OPERAND (x, 1);
309 if (blhs)
310 rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
311 rhs, bitsize_int (bitpos));
313 /* Punt the actual generation of atomic operations to common code. */
314 if (code == OMP_ATOMIC)
315 type = void_type_node;
316 x = build2 (code, type, addr, rhs);
317 SET_EXPR_LOCATION (x, loc);
318 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
320 /* Generally it is hard to prove lhs1 and lhs are the same memory
321 location, just diagnose different variables. */
322 if (rhs1
323 && VAR_P (rhs1)
324 && VAR_P (orig_lhs)
325 && rhs1 != orig_lhs
326 && !test)
328 if (code == OMP_ATOMIC)
329 error_at (loc, "%<#pragma omp atomic update%> uses two different "
330 "variables for memory");
331 else
332 error_at (loc, "%<#pragma omp atomic capture%> uses two different "
333 "variables for memory");
334 return error_mark_node;
337 if (lhs1
338 && lhs1 != orig_lhs
339 && TREE_CODE (lhs1) == COMPONENT_REF
340 && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
341 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
342 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
344 tree field = TREE_OPERAND (lhs1, 1);
345 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
346 lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
347 repr, TREE_OPERAND (lhs1, 2));
349 if (rhs1
350 && rhs1 != orig_lhs
351 && TREE_CODE (rhs1) == COMPONENT_REF
352 && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
353 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
354 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
356 tree field = TREE_OPERAND (rhs1, 1);
357 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
358 rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
359 repr, TREE_OPERAND (rhs1, 2));
362 if (code != OMP_ATOMIC)
364 /* Generally it is hard to prove lhs1 and lhs are the same memory
365 location, just diagnose different variables. */
366 if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
368 if (lhs1 != orig_lhs && !test)
370 error_at (loc, "%<#pragma omp atomic capture%> uses two "
371 "different variables for memory");
372 return error_mark_node;
375 if (blhs)
376 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
377 bitsize_int (bitsize), bitsize_int (bitpos));
378 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
379 loc, x, NULL_TREE);
380 if (rhs1 && rhs1 != orig_lhs)
382 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
383 if (rhs1addr == error_mark_node)
384 return error_mark_node;
385 x = omit_one_operand_loc (loc, type, x, rhs1addr);
387 if (lhs1 && lhs1 != orig_lhs)
389 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
390 if (lhs1addr == error_mark_node)
391 return error_mark_node;
392 if (code == OMP_ATOMIC_CAPTURE_OLD)
393 x = omit_one_operand_loc (loc, type, x, lhs1addr);
394 else
396 if (!test)
397 x = save_expr (x);
398 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
402 else if (rhs1 && rhs1 != orig_lhs)
404 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
405 if (rhs1addr == error_mark_node)
406 return error_mark_node;
407 x = omit_one_operand_loc (loc, type, x, rhs1addr);
410 if (pre)
411 x = omit_one_operand_loc (loc, type, x, pre);
412 return x;
416 /* Complete a #pragma omp flush construct. We don't do anything with
417 the variable list that the syntax allows. LOC is the location of
418 the #pragma. */
420 void
421 c_finish_omp_flush (location_t loc)
423 tree x;
425 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
426 x = build_call_expr_loc (loc, x, 0);
427 add_stmt (x);
431 /* Check and canonicalize OMP_FOR increment expression.
432 Helper function for c_finish_omp_for. */
434 static tree
435 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
437 tree t;
439 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
440 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
441 return error_mark_node;
443 if (exp == decl)
444 return build_int_cst (TREE_TYPE (exp), 0);
446 switch (TREE_CODE (exp))
448 CASE_CONVERT:
449 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
450 if (t != error_mark_node)
451 return fold_convert_loc (loc, TREE_TYPE (exp), t);
452 break;
453 case MINUS_EXPR:
454 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
455 if (t != error_mark_node)
456 return fold_build2_loc (loc, MINUS_EXPR,
457 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
458 break;
459 case PLUS_EXPR:
460 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
461 if (t != error_mark_node)
462 return fold_build2_loc (loc, PLUS_EXPR,
463 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
464 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
465 if (t != error_mark_node)
466 return fold_build2_loc (loc, PLUS_EXPR,
467 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
468 break;
469 case COMPOUND_EXPR:
471 /* cp_build_modify_expr forces preevaluation of the RHS to make
472 sure that it is evaluated before the lvalue-rvalue conversion
473 is applied to the LHS. Reconstruct the original expression. */
474 tree op0 = TREE_OPERAND (exp, 0);
475 if (TREE_CODE (op0) == TARGET_EXPR
476 && !VOID_TYPE_P (TREE_TYPE (op0)))
478 tree op1 = TREE_OPERAND (exp, 1);
479 tree temp = TARGET_EXPR_SLOT (op0);
480 if (BINARY_CLASS_P (op1)
481 && TREE_OPERAND (op1, 1) == temp)
483 op1 = copy_node (op1);
484 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
485 return check_omp_for_incr_expr (loc, op1, decl);
488 break;
490 default:
491 break;
494 return error_mark_node;
497 /* If the OMP_FOR increment expression in INCR is of pointer type,
498 canonicalize it into an expression handled by gimplify_omp_for()
499 and return it. DECL is the iteration variable. */
501 static tree
502 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
504 if (POINTER_TYPE_P (TREE_TYPE (decl))
505 && TREE_OPERAND (incr, 1))
507 tree t = fold_convert_loc (loc,
508 sizetype, TREE_OPERAND (incr, 1));
510 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
511 || TREE_CODE (incr) == PREDECREMENT_EXPR)
512 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
513 t = fold_build_pointer_plus (decl, t);
514 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
516 return incr;
519 /* Validate and generate OMP_FOR.
520 DECLV is a vector of iteration variables, for each collapsed loop.
522 ORIG_DECLV, if non-NULL, is a vector with the original iteration
523 variables (prior to any transformations, by say, C++ iterators).
525 INITV, CONDV and INCRV are vectors containing initialization
526 expressions, controlling predicates and increment expressions.
527 BODY is the body of the loop and PRE_BODY statements that go before
528 the loop. */
530 tree
531 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
532 tree orig_declv, tree initv, tree condv, tree incrv,
533 tree body, tree pre_body)
535 location_t elocus;
536 bool fail = false;
537 int i;
539 if ((code == CILK_SIMD || code == CILK_FOR)
540 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
541 fail = true;
543 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
544 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
545 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
546 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
548 tree decl = TREE_VEC_ELT (declv, i);
549 tree init = TREE_VEC_ELT (initv, i);
550 tree cond = TREE_VEC_ELT (condv, i);
551 tree incr = TREE_VEC_ELT (incrv, i);
553 elocus = locus;
554 if (EXPR_HAS_LOCATION (init))
555 elocus = EXPR_LOCATION (init);
557 /* Validate the iteration variable. */
558 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
559 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
561 error_at (elocus, "invalid type for iteration variable %qE", decl);
562 fail = true;
564 else if (TYPE_ATOMIC (TREE_TYPE (decl)))
566 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
567 fail = true;
568 /* _Atomic iterator confuses stuff too much, so we risk ICE
569 trying to diagnose it further. */
570 continue;
573 /* In the case of "for (int i = 0...)", init will be a decl. It should
574 have a DECL_INITIAL that we can turn into an assignment. */
575 if (init == decl)
577 elocus = DECL_SOURCE_LOCATION (decl);
579 init = DECL_INITIAL (decl);
580 if (init == NULL)
582 error_at (elocus, "%qE is not initialized", decl);
583 init = integer_zero_node;
584 fail = true;
586 DECL_INITIAL (decl) = NULL_TREE;
588 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
589 /* FIXME diagnostics: This should
590 be the location of the INIT. */
591 elocus,
592 init,
593 NULL_TREE);
595 if (init != error_mark_node)
597 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
598 gcc_assert (TREE_OPERAND (init, 0) == decl);
601 if (cond == NULL_TREE)
603 error_at (elocus, "missing controlling predicate");
604 fail = true;
606 else
608 bool cond_ok = false;
610 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
611 evaluation of the vla VAR_DECL. We need to readd
612 them to the non-decl operand. See PR45784. */
613 while (TREE_CODE (cond) == COMPOUND_EXPR)
614 cond = TREE_OPERAND (cond, 1);
616 if (EXPR_HAS_LOCATION (cond))
617 elocus = EXPR_LOCATION (cond);
619 if (TREE_CODE (cond) == LT_EXPR
620 || TREE_CODE (cond) == LE_EXPR
621 || TREE_CODE (cond) == GT_EXPR
622 || TREE_CODE (cond) == GE_EXPR
623 || TREE_CODE (cond) == NE_EXPR
624 || TREE_CODE (cond) == EQ_EXPR)
626 tree op0 = TREE_OPERAND (cond, 0);
627 tree op1 = TREE_OPERAND (cond, 1);
629 /* 2.5.1. The comparison in the condition is computed in
630 the type of DECL, otherwise the behavior is undefined.
632 For example:
633 long n; int i;
634 i < n;
636 according to ISO will be evaluated as:
637 (long)i < n;
639 We want to force:
640 i < (int)n; */
641 if (TREE_CODE (op0) == NOP_EXPR
642 && decl == TREE_OPERAND (op0, 0))
644 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
645 TREE_OPERAND (cond, 1)
646 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
647 TREE_OPERAND (cond, 1));
649 else if (TREE_CODE (op1) == NOP_EXPR
650 && decl == TREE_OPERAND (op1, 0))
652 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
653 TREE_OPERAND (cond, 0)
654 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
655 TREE_OPERAND (cond, 0));
658 if (decl == TREE_OPERAND (cond, 0))
659 cond_ok = true;
660 else if (decl == TREE_OPERAND (cond, 1))
662 TREE_SET_CODE (cond,
663 swap_tree_comparison (TREE_CODE (cond)));
664 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
665 TREE_OPERAND (cond, 0) = decl;
666 cond_ok = true;
669 if (TREE_CODE (cond) == NE_EXPR
670 || TREE_CODE (cond) == EQ_EXPR)
672 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
674 if (code != CILK_SIMD && code != CILK_FOR)
675 cond_ok = false;
677 else if (operand_equal_p (TREE_OPERAND (cond, 1),
678 TYPE_MIN_VALUE (TREE_TYPE (decl)),
680 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
681 ? GT_EXPR : LE_EXPR);
682 else if (operand_equal_p (TREE_OPERAND (cond, 1),
683 TYPE_MAX_VALUE (TREE_TYPE (decl)),
685 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
686 ? LT_EXPR : GE_EXPR);
687 else if (code != CILK_SIMD && code != CILK_FOR)
688 cond_ok = false;
691 if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
693 tree ce = NULL_TREE, *pce = &ce;
694 tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
695 for (tree c = TREE_VEC_ELT (condv, i); c != cond;
696 c = TREE_OPERAND (c, 1))
698 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
699 TREE_OPERAND (cond, 1));
700 pce = &TREE_OPERAND (*pce, 1);
702 TREE_OPERAND (cond, 1) = ce;
703 TREE_VEC_ELT (condv, i) = cond;
707 if (!cond_ok)
709 error_at (elocus, "invalid controlling predicate");
710 fail = true;
714 if (incr == NULL_TREE)
716 error_at (elocus, "missing increment expression");
717 fail = true;
719 else
721 bool incr_ok = false;
723 if (EXPR_HAS_LOCATION (incr))
724 elocus = EXPR_LOCATION (incr);
726 /* Check all the valid increment expressions: v++, v--, ++v, --v,
727 v = v + incr, v = incr + v and v = v - incr. */
728 switch (TREE_CODE (incr))
730 case POSTINCREMENT_EXPR:
731 case PREINCREMENT_EXPR:
732 case POSTDECREMENT_EXPR:
733 case PREDECREMENT_EXPR:
734 if (TREE_OPERAND (incr, 0) != decl)
735 break;
737 incr_ok = true;
738 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
739 break;
741 case COMPOUND_EXPR:
742 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
743 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
744 break;
745 incr = TREE_OPERAND (incr, 1);
746 /* FALLTHRU */
747 case MODIFY_EXPR:
748 if (TREE_OPERAND (incr, 0) != decl)
749 break;
750 if (TREE_OPERAND (incr, 1) == decl)
751 break;
752 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
753 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
754 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
755 incr_ok = true;
756 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
757 || (TREE_CODE (TREE_OPERAND (incr, 1))
758 == POINTER_PLUS_EXPR))
759 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
760 incr_ok = true;
761 else
763 tree t = check_omp_for_incr_expr (elocus,
764 TREE_OPERAND (incr, 1),
765 decl);
766 if (t != error_mark_node)
768 incr_ok = true;
769 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
770 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
773 break;
775 default:
776 break;
778 if (!incr_ok)
780 error_at (elocus, "invalid increment expression");
781 fail = true;
785 TREE_VEC_ELT (initv, i) = init;
786 TREE_VEC_ELT (incrv, i) = incr;
789 if (fail)
790 return NULL;
791 else
793 tree t = make_node (code);
795 TREE_TYPE (t) = void_type_node;
796 OMP_FOR_INIT (t) = initv;
797 OMP_FOR_COND (t) = condv;
798 OMP_FOR_INCR (t) = incrv;
799 OMP_FOR_BODY (t) = body;
800 OMP_FOR_PRE_BODY (t) = pre_body;
801 OMP_FOR_ORIG_DECLS (t) = orig_declv;
803 SET_EXPR_LOCATION (t, locus);
804 return t;
808 /* Type for passing data in between c_omp_check_loop_iv and
809 c_omp_check_loop_iv_r. */
811 struct c_omp_check_loop_iv_data
813 tree declv;
814 bool fail;
815 location_t stmt_loc;
816 location_t expr_loc;
817 int kind;
818 walk_tree_lh lh;
819 hash_set<tree> *ppset;
822 /* Helper function called via walk_tree, to diagnose uses
823 of associated loop IVs inside of lb, b and incr expressions
824 of OpenMP loops. */
826 static tree
827 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
829 struct c_omp_check_loop_iv_data *d
830 = (struct c_omp_check_loop_iv_data *) data;
831 if (DECL_P (*tp))
833 int i;
834 for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
835 if (*tp == TREE_VEC_ELT (d->declv, i))
837 location_t loc = d->expr_loc;
838 if (loc == UNKNOWN_LOCATION)
839 loc = d->stmt_loc;
840 switch (d->kind)
842 case 0:
843 error_at (loc, "initializer expression refers to "
844 "iteration variable %qD", *tp);
845 break;
846 case 1:
847 error_at (loc, "condition expression refers to "
848 "iteration variable %qD", *tp);
849 break;
850 case 2:
851 error_at (loc, "increment expression refers to "
852 "iteration variable %qD", *tp);
853 break;
855 d->fail = true;
858 /* Don't walk dtors added by C++ wrap_cleanups_r. */
859 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
860 && TRY_CATCH_IS_CLEANUP (*tp))
862 *walk_subtrees = 0;
863 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
864 d->ppset, d->lh);
867 return NULL_TREE;
870 /* Diagnose invalid references to loop iterators in lb, b and incr
871 expressions. */
873 bool
874 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
876 hash_set<tree> pset;
877 struct c_omp_check_loop_iv_data data;
878 int i;
880 data.declv = declv;
881 data.fail = false;
882 data.stmt_loc = EXPR_LOCATION (stmt);
883 data.lh = lh;
884 data.ppset = &pset;
885 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
887 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
888 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
889 tree decl = TREE_OPERAND (init, 0);
890 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
891 gcc_assert (COMPARISON_CLASS_P (cond));
892 gcc_assert (TREE_OPERAND (cond, 0) == decl);
893 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
894 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
895 data.kind = 0;
896 walk_tree_1 (&TREE_OPERAND (init, 1),
897 c_omp_check_loop_iv_r, &data, &pset, lh);
898 /* Don't warn for C++ random access iterators here, the
899 expression then involves the subtraction and always refers
900 to the original value. The C++ FE needs to warn on those
901 earlier. */
902 if (decl == TREE_VEC_ELT (declv, i))
904 data.expr_loc = EXPR_LOCATION (cond);
905 data.kind = 1;
906 walk_tree_1 (&TREE_OPERAND (cond, 1),
907 c_omp_check_loop_iv_r, &data, &pset, lh);
909 if (TREE_CODE (incr) == MODIFY_EXPR)
911 gcc_assert (TREE_OPERAND (incr, 0) == decl);
912 incr = TREE_OPERAND (incr, 1);
913 data.kind = 2;
914 if (TREE_CODE (incr) == PLUS_EXPR
915 && TREE_OPERAND (incr, 1) == decl)
917 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
918 walk_tree_1 (&TREE_OPERAND (incr, 0),
919 c_omp_check_loop_iv_r, &data, &pset, lh);
921 else
923 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
924 walk_tree_1 (&TREE_OPERAND (incr, 1),
925 c_omp_check_loop_iv_r, &data, &pset, lh);
929 return !data.fail;
932 /* Similar, but allows to check the init or cond expressions individually. */
934 bool
935 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl,
936 tree init, tree cond, walk_tree_lh lh)
938 hash_set<tree> pset;
939 struct c_omp_check_loop_iv_data data;
941 data.declv = declv;
942 data.fail = false;
943 data.stmt_loc = stmt_loc;
944 data.lh = lh;
945 data.ppset = &pset;
946 if (init)
948 data.expr_loc = EXPR_LOCATION (init);
949 data.kind = 0;
950 walk_tree_1 (&init,
951 c_omp_check_loop_iv_r, &data, &pset, lh);
953 if (cond)
955 gcc_assert (COMPARISON_CLASS_P (cond));
956 data.expr_loc = EXPR_LOCATION (init);
957 data.kind = 1;
958 if (TREE_OPERAND (cond, 0) == decl)
959 walk_tree_1 (&TREE_OPERAND (cond, 1),
960 c_omp_check_loop_iv_r, &data, &pset, lh);
961 else
962 walk_tree_1 (&TREE_OPERAND (cond, 0),
963 c_omp_check_loop_iv_r, &data, &pset, lh);
965 return !data.fail;
968 /* This function splits clauses for OpenACC combined loop
969 constructs. OpenACC combined loop constructs are:
970 #pragma acc kernels loop
971 #pragma acc parallel loop */
973 tree
974 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
975 bool is_parallel)
977 tree next, loop_clauses, nc;
979 loop_clauses = *not_loop_clauses = NULL_TREE;
980 for (; clauses ; clauses = next)
982 next = OMP_CLAUSE_CHAIN (clauses);
984 switch (OMP_CLAUSE_CODE (clauses))
986 /* Loop clauses. */
987 case OMP_CLAUSE_COLLAPSE:
988 case OMP_CLAUSE_TILE:
989 case OMP_CLAUSE_GANG:
990 case OMP_CLAUSE_WORKER:
991 case OMP_CLAUSE_VECTOR:
992 case OMP_CLAUSE_AUTO:
993 case OMP_CLAUSE_SEQ:
994 case OMP_CLAUSE_INDEPENDENT:
995 case OMP_CLAUSE_PRIVATE:
996 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
997 loop_clauses = clauses;
998 break;
1000 /* Reductions must be duplicated on both constructs. */
1001 case OMP_CLAUSE_REDUCTION:
1002 if (is_parallel)
1004 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1005 OMP_CLAUSE_REDUCTION);
1006 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
1007 OMP_CLAUSE_REDUCTION_CODE (nc)
1008 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1009 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
1010 *not_loop_clauses = nc;
1013 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1014 loop_clauses = clauses;
1015 break;
1017 /* Parallel/kernels clauses. */
1018 default:
1019 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
1020 *not_loop_clauses = clauses;
1021 break;
1025 return loop_clauses;
1028 /* This function attempts to split or duplicate clauses for OpenMP
1029 combined/composite constructs. Right now there are 21 different
1030 constructs. CODE is the innermost construct in the combined construct,
1031 and MASK allows to determine which constructs are combined together,
1032 as every construct has at least one clause that no other construct
1033 has (except for OMP_SECTIONS, but that can be only combined with parallel).
1034 OpenMP combined/composite constructs are:
1035 #pragma omp distribute parallel for
1036 #pragma omp distribute parallel for simd
1037 #pragma omp distribute simd
1038 #pragma omp for simd
1039 #pragma omp parallel for
1040 #pragma omp parallel for simd
1041 #pragma omp parallel sections
1042 #pragma omp target parallel
1043 #pragma omp target parallel for
1044 #pragma omp target parallel for simd
1045 #pragma omp target teams
1046 #pragma omp target teams distribute
1047 #pragma omp target teams distribute parallel for
1048 #pragma omp target teams distribute parallel for simd
1049 #pragma omp target teams distribute simd
1050 #pragma omp target simd
1051 #pragma omp taskloop simd
1052 #pragma omp teams distribute
1053 #pragma omp teams distribute parallel for
1054 #pragma omp teams distribute parallel for simd
1055 #pragma omp teams distribute simd */
1057 void
1058 c_omp_split_clauses (location_t loc, enum tree_code code,
1059 omp_clause_mask mask, tree clauses, tree *cclauses)
1061 tree next, c;
1062 enum c_omp_clause_split s;
1063 int i;
1065 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
1066 cclauses[i] = NULL;
1067 /* Add implicit nowait clause on
1068 #pragma omp parallel {for,for simd,sections}. */
1069 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1070 switch (code)
1072 case OMP_FOR:
1073 case OMP_SIMD:
1074 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
1075 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1076 break;
1077 case OMP_SECTIONS:
1078 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
1079 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1080 break;
1081 default:
1082 break;
1085 for (; clauses ; clauses = next)
1087 next = OMP_CLAUSE_CHAIN (clauses);
1089 switch (OMP_CLAUSE_CODE (clauses))
1091 /* First the clauses that are unique to some constructs. */
1092 case OMP_CLAUSE_DEVICE:
1093 case OMP_CLAUSE_MAP:
1094 case OMP_CLAUSE_IS_DEVICE_PTR:
1095 case OMP_CLAUSE_DEFAULTMAP:
1096 case OMP_CLAUSE_DEPEND:
1097 s = C_OMP_CLAUSE_SPLIT_TARGET;
1098 break;
1099 case OMP_CLAUSE_NUM_TEAMS:
1100 case OMP_CLAUSE_THREAD_LIMIT:
1101 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1102 break;
1103 case OMP_CLAUSE_DIST_SCHEDULE:
1104 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1105 break;
1106 case OMP_CLAUSE_COPYIN:
1107 case OMP_CLAUSE_NUM_THREADS:
1108 case OMP_CLAUSE_PROC_BIND:
1109 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1110 break;
1111 case OMP_CLAUSE_ORDERED:
1112 s = C_OMP_CLAUSE_SPLIT_FOR;
1113 break;
1114 case OMP_CLAUSE_SCHEDULE:
1115 s = C_OMP_CLAUSE_SPLIT_FOR;
1116 if (code != OMP_SIMD)
1117 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
1118 break;
1119 case OMP_CLAUSE_SAFELEN:
1120 case OMP_CLAUSE_SIMDLEN:
1121 case OMP_CLAUSE_ALIGNED:
1122 s = C_OMP_CLAUSE_SPLIT_SIMD;
1123 break;
1124 case OMP_CLAUSE_GRAINSIZE:
1125 case OMP_CLAUSE_NUM_TASKS:
1126 case OMP_CLAUSE_FINAL:
1127 case OMP_CLAUSE_UNTIED:
1128 case OMP_CLAUSE_MERGEABLE:
1129 case OMP_CLAUSE_NOGROUP:
1130 case OMP_CLAUSE_PRIORITY:
1131 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1132 break;
1133 /* Duplicate this to all of taskloop, distribute, for and simd. */
1134 case OMP_CLAUSE_COLLAPSE:
1135 if (code == OMP_SIMD)
1137 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1138 | (OMP_CLAUSE_MASK_1
1139 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1140 | (OMP_CLAUSE_MASK_1
1141 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
1143 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1144 OMP_CLAUSE_COLLAPSE);
1145 OMP_CLAUSE_COLLAPSE_EXPR (c)
1146 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1147 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1148 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1150 else
1152 /* This must be #pragma omp target simd */
1153 s = C_OMP_CLAUSE_SPLIT_SIMD;
1154 break;
1157 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1159 if ((mask & (OMP_CLAUSE_MASK_1
1160 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1162 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1163 OMP_CLAUSE_COLLAPSE);
1164 OMP_CLAUSE_COLLAPSE_EXPR (c)
1165 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1166 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1167 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1168 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1170 else
1171 s = C_OMP_CLAUSE_SPLIT_FOR;
1173 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1174 != 0)
1175 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1176 else
1177 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1178 break;
1179 /* Private clause is supported on all constructs,
1180 it is enough to put it on the innermost one. For
1181 #pragma omp {for,sections} put it on parallel though,
1182 as that's what we did for OpenMP 3.1. */
1183 case OMP_CLAUSE_PRIVATE:
1184 switch (code)
1186 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
1187 case OMP_FOR: case OMP_SECTIONS:
1188 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1189 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
1190 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
1191 default: gcc_unreachable ();
1193 break;
1194 /* Firstprivate clause is supported on all constructs but
1195 simd. Put it on the outermost of those and duplicate on teams
1196 and parallel. */
1197 case OMP_CLAUSE_FIRSTPRIVATE:
1198 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1199 != 0)
1201 if (code == OMP_SIMD
1202 && (mask & ((OMP_CLAUSE_MASK_1
1203 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1204 | (OMP_CLAUSE_MASK_1
1205 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
1207 /* This must be #pragma omp target simd. */
1208 s = C_OMP_CLAUSE_SPLIT_TARGET;
1209 break;
1211 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1212 OMP_CLAUSE_FIRSTPRIVATE);
1213 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1214 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1215 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1217 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1218 != 0)
1220 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
1221 | (OMP_CLAUSE_MASK_1
1222 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
1224 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1225 OMP_CLAUSE_FIRSTPRIVATE);
1226 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1227 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1228 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1229 if ((mask & (OMP_CLAUSE_MASK_1
1230 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
1231 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1232 else
1233 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1235 else
1236 /* This must be
1237 #pragma omp parallel{, for{, simd}, sections}
1239 #pragma omp target parallel. */
1240 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1242 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1243 != 0)
1245 /* This must be one of
1246 #pragma omp {,target }teams distribute
1247 #pragma omp target teams
1248 #pragma omp {,target }teams distribute simd. */
1249 gcc_assert (code == OMP_DISTRIBUTE
1250 || code == OMP_TEAMS
1251 || code == OMP_SIMD);
1252 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1254 else if ((mask & (OMP_CLAUSE_MASK_1
1255 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1257 /* This must be #pragma omp distribute simd. */
1258 gcc_assert (code == OMP_SIMD);
1259 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1261 else if ((mask & (OMP_CLAUSE_MASK_1
1262 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1264 /* This must be #pragma omp taskloop simd. */
1265 gcc_assert (code == OMP_SIMD);
1266 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1268 else
1270 /* This must be #pragma omp for simd. */
1271 gcc_assert (code == OMP_SIMD);
1272 s = C_OMP_CLAUSE_SPLIT_FOR;
1274 break;
1275 /* Lastprivate is allowed on distribute, for, sections and simd. In
1276 parallel {for{, simd},sections} we actually want to put it on
1277 parallel rather than for or sections. */
1278 case OMP_CLAUSE_LASTPRIVATE:
1279 if (code == OMP_DISTRIBUTE)
1281 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1282 break;
1284 if ((mask & (OMP_CLAUSE_MASK_1
1285 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1287 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1288 OMP_CLAUSE_LASTPRIVATE);
1289 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1290 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
1291 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
1293 if (code == OMP_FOR || code == OMP_SECTIONS)
1295 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1296 != 0)
1297 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1298 else
1299 s = C_OMP_CLAUSE_SPLIT_FOR;
1300 break;
1302 gcc_assert (code == OMP_SIMD);
1303 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1305 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1306 OMP_CLAUSE_LASTPRIVATE);
1307 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1308 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1309 != 0)
1310 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1311 else
1312 s = C_OMP_CLAUSE_SPLIT_FOR;
1313 OMP_CLAUSE_CHAIN (c) = cclauses[s];
1314 cclauses[s] = c;
1316 s = C_OMP_CLAUSE_SPLIT_SIMD;
1317 break;
1318 /* Shared and default clauses are allowed on parallel, teams and
1319 taskloop. */
1320 case OMP_CLAUSE_SHARED:
1321 case OMP_CLAUSE_DEFAULT:
1322 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1323 != 0)
1325 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1326 break;
1328 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1329 != 0)
1331 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1332 == 0)
1334 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1335 break;
1337 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1338 OMP_CLAUSE_CODE (clauses));
1339 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1340 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1341 else
1342 OMP_CLAUSE_DEFAULT_KIND (c)
1343 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1344 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1345 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1347 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1348 break;
1349 /* Reduction is allowed on simd, for, parallel, sections and teams.
1350 Duplicate it on all of them, but omit on for or sections if
1351 parallel is present. */
1352 case OMP_CLAUSE_REDUCTION:
1353 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1355 if (code == OMP_SIMD)
1357 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1358 OMP_CLAUSE_REDUCTION);
1359 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1360 OMP_CLAUSE_REDUCTION_CODE (c)
1361 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1362 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1363 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1364 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1365 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1366 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1367 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1369 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1370 != 0)
1372 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1373 OMP_CLAUSE_REDUCTION);
1374 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1375 OMP_CLAUSE_REDUCTION_CODE (c)
1376 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1377 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1378 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1379 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1380 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1381 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1382 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1383 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1385 else if ((mask & (OMP_CLAUSE_MASK_1
1386 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1387 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1388 else
1389 s = C_OMP_CLAUSE_SPLIT_FOR;
1391 else if (code == OMP_SECTIONS || code == OMP_PARALLEL)
1392 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1393 else if (code == OMP_SIMD)
1394 s = C_OMP_CLAUSE_SPLIT_SIMD;
1395 else
1396 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1397 break;
1398 case OMP_CLAUSE_IF:
1399 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1400 != 0)
1401 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1402 else if ((mask & (OMP_CLAUSE_MASK_1
1403 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1405 if ((mask & (OMP_CLAUSE_MASK_1
1406 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
1408 if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL)
1409 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1410 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET)
1411 s = C_OMP_CLAUSE_SPLIT_TARGET;
1412 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK)
1414 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1415 OMP_CLAUSE_IF);
1416 OMP_CLAUSE_IF_MODIFIER (c)
1417 = OMP_CLAUSE_IF_MODIFIER (clauses);
1418 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
1419 OMP_CLAUSE_CHAIN (c)
1420 = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1421 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1422 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1424 else
1426 error_at (OMP_CLAUSE_LOCATION (clauses),
1427 "expected %<parallel%> or %<target%> %<if%> "
1428 "clause modifier");
1429 continue;
1432 else
1433 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1435 else
1436 s = C_OMP_CLAUSE_SPLIT_TARGET;
1437 break;
1438 case OMP_CLAUSE_LINEAR:
1439 /* Linear clause is allowed on simd and for. Put it on the
1440 innermost construct. */
1441 if (code == OMP_SIMD)
1442 s = C_OMP_CLAUSE_SPLIT_SIMD;
1443 else
1444 s = C_OMP_CLAUSE_SPLIT_FOR;
1445 break;
1446 case OMP_CLAUSE_NOWAIT:
1447 /* Nowait clause is allowed on target, for and sections, but
1448 is not allowed on parallel for or parallel sections. Therefore,
1449 put it on target construct if present, because that can only
1450 be combined with parallel for{, simd} and not with for{, simd},
1451 otherwise to the worksharing construct. */
1452 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1453 != 0)
1454 s = C_OMP_CLAUSE_SPLIT_TARGET;
1455 else
1456 s = C_OMP_CLAUSE_SPLIT_FOR;
1457 break;
1458 default:
1459 gcc_unreachable ();
1461 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
1462 cclauses[s] = clauses;
1465 if (!flag_checking)
1466 return;
1468 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
1469 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
1470 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
1471 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
1472 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
1473 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
1474 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
1475 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
1476 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1477 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
1478 && code != OMP_SECTIONS)
1479 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
1480 if (code != OMP_SIMD)
1481 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
1485 /* qsort callback to compare #pragma omp declare simd clauses. */
1487 static int
1488 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
1490 tree a = *(const tree *) p;
1491 tree b = *(const tree *) q;
1492 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
1494 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
1495 return -1;
1496 return 1;
1498 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
1499 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
1500 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
1502 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
1503 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
1504 if (c < d)
1505 return 1;
1506 if (c > d)
1507 return -1;
1509 return 0;
1512 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1513 CLAUSES on FNDECL into argument indexes and sort them. */
1515 tree
1516 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1518 tree c;
1519 vec<tree> clvec = vNULL;
1521 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1523 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1524 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1525 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1527 tree decl = OMP_CLAUSE_DECL (c);
1528 tree arg;
1529 int idx;
1530 for (arg = parms, idx = 0; arg;
1531 arg = TREE_CHAIN (arg), idx++)
1532 if (arg == decl)
1533 break;
1534 if (arg == NULL_TREE)
1536 error_at (OMP_CLAUSE_LOCATION (c),
1537 "%qD is not an function argument", decl);
1538 continue;
1540 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1541 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1542 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1544 decl = OMP_CLAUSE_LINEAR_STEP (c);
1545 for (arg = parms, idx = 0; arg;
1546 arg = TREE_CHAIN (arg), idx++)
1547 if (arg == decl)
1548 break;
1549 if (arg == NULL_TREE)
1551 error_at (OMP_CLAUSE_LOCATION (c),
1552 "%qD is not an function argument", decl);
1553 continue;
1555 OMP_CLAUSE_LINEAR_STEP (c)
1556 = build_int_cst (integer_type_node, idx);
1559 clvec.safe_push (c);
1561 if (!clvec.is_empty ())
1563 unsigned int len = clvec.length (), i;
1564 clvec.qsort (c_omp_declare_simd_clause_cmp);
1565 clauses = clvec[0];
1566 for (i = 0; i < len; i++)
1567 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1569 else
1570 clauses = NULL_TREE;
1571 clvec.release ();
1572 return clauses;
1575 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1577 void
1578 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1580 tree c;
1582 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1583 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1584 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1585 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1587 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1588 tree arg;
1589 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1590 arg = TREE_CHAIN (arg), i++)
1591 if (i == idx)
1592 break;
1593 gcc_assert (arg);
1594 OMP_CLAUSE_DECL (c) = arg;
1595 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1596 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1598 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
1599 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1600 arg = TREE_CHAIN (arg), i++)
1601 if (i == idx)
1602 break;
1603 gcc_assert (arg);
1604 OMP_CLAUSE_LINEAR_STEP (c) = arg;
1609 /* True if OpenMP sharing attribute of DECL is predetermined. */
1611 enum omp_clause_default_kind
1612 c_omp_predetermined_sharing (tree decl)
1614 /* Variables with const-qualified type having no mutable member
1615 are predetermined shared. */
1616 if (TREE_READONLY (decl))
1617 return OMP_CLAUSE_DEFAULT_SHARED;
1619 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;