1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
41 #include "tree-pass.h"
44 #include "splay-tree.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context
*outer
;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map
;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
94 /* What to do with variables with implicitly determined sharing
96 enum omp_clause_default_kind default_kind
;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
103 /* True if this parallel directive is nested within another. */
108 struct omp_for_data_loop
110 tree v
, n1
, n2
, step
;
111 enum tree_code cond_code
;
114 /* A structure describing the main elements of a parallel loop. */
118 struct omp_for_data_loop loop
;
123 bool have_nowait
, have_ordered
;
124 enum omp_clause_schedule_kind sched_kind
;
125 struct omp_for_data_loop
*loops
;
129 static splay_tree all_contexts
;
130 static int taskreg_nesting_level
;
131 struct omp_region
*root_omp_region
;
132 static bitmap task_shared_vars
;
134 static void scan_omp (gimple_seq
, omp_context
*);
135 static tree
scan_omp_1_op (tree
*, int *, void *);
137 #define WALK_SUBSTMTS \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
149 scan_omp_op (tree
*tp
, omp_context
*ctx
)
151 struct walk_stmt_info wi
;
153 memset (&wi
, 0, sizeof (wi
));
155 wi
.want_locations
= true;
157 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
160 static void lower_omp (gimple_seq
, omp_context
*);
161 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
162 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
167 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
169 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
170 if (OMP_CLAUSE_CODE (clauses
) == kind
)
176 /* Return true if CTX is for an omp parallel. */
179 is_parallel_ctx (omp_context
*ctx
)
181 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
185 /* Return true if CTX is for an omp task. */
188 is_task_ctx (omp_context
*ctx
)
190 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
194 /* Return true if CTX is for an omp parallel or omp task. */
197 is_taskreg_ctx (omp_context
*ctx
)
199 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
204 /* Return true if REGION is a combined parallel+workshare region. */
207 is_combined_parallel (struct omp_region
*region
)
209 return region
->is_combined_parallel
;
213 /* Extract the header elements of parallel loop FOR_STMT and store
217 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
218 struct omp_for_data_loop
*loops
)
220 tree t
, var
, *collapse_iter
, *collapse_count
;
221 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
222 struct omp_for_data_loop
*loop
;
224 struct omp_for_data_loop dummy_loop
;
225 location_t loc
= gimple_location (for_stmt
);
227 fd
->for_stmt
= for_stmt
;
229 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
230 if (fd
->collapse
> 1)
233 fd
->loops
= &fd
->loop
;
235 fd
->have_nowait
= fd
->have_ordered
= false;
236 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
237 fd
->chunk_size
= NULL_TREE
;
238 collapse_iter
= NULL
;
239 collapse_count
= NULL
;
241 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
242 switch (OMP_CLAUSE_CODE (t
))
244 case OMP_CLAUSE_NOWAIT
:
245 fd
->have_nowait
= true;
247 case OMP_CLAUSE_ORDERED
:
248 fd
->have_ordered
= true;
250 case OMP_CLAUSE_SCHEDULE
:
251 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
252 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
254 case OMP_CLAUSE_COLLAPSE
:
255 if (fd
->collapse
> 1)
257 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
258 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
270 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
271 gcc_assert (fd
->chunk_size
== NULL
);
273 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
274 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
275 gcc_assert (fd
->chunk_size
== NULL
);
276 else if (fd
->chunk_size
== NULL
)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
283 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
284 ? integer_zero_node
: integer_one_node
;
287 for (i
= 0; i
< fd
->collapse
; i
++)
289 if (fd
->collapse
== 1)
291 else if (loops
!= NULL
)
297 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
298 gcc_assert (SSA_VAR_P (loop
->v
));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
301 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
302 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
304 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
305 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
306 switch (loop
->cond_code
)
312 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
313 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, 1);
315 loop
->n2
= fold_build2_loc (loc
,
316 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
317 build_int_cst (TREE_TYPE (loop
->n2
), 1));
318 loop
->cond_code
= LT_EXPR
;
321 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
322 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, -1);
324 loop
->n2
= fold_build2_loc (loc
,
325 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
326 build_int_cst (TREE_TYPE (loop
->n2
), 1));
327 loop
->cond_code
= GT_EXPR
;
333 t
= gimple_omp_for_incr (for_stmt
, i
);
334 gcc_assert (TREE_OPERAND (t
, 0) == var
);
335 switch (TREE_CODE (t
))
338 case POINTER_PLUS_EXPR
:
339 loop
->step
= TREE_OPERAND (t
, 1);
342 loop
->step
= TREE_OPERAND (t
, 1);
343 loop
->step
= fold_build1_loc (loc
,
344 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
351 if (iter_type
!= long_long_unsigned_type_node
)
353 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
354 iter_type
= long_long_unsigned_type_node
;
355 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
356 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
357 >= TYPE_PRECISION (iter_type
))
361 if (loop
->cond_code
== LT_EXPR
)
362 n
= fold_build2_loc (loc
,
363 PLUS_EXPR
, TREE_TYPE (loop
->v
),
364 loop
->n2
, loop
->step
);
367 if (TREE_CODE (n
) != INTEGER_CST
368 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
369 iter_type
= long_long_unsigned_type_node
;
371 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
372 > TYPE_PRECISION (iter_type
))
376 if (loop
->cond_code
== LT_EXPR
)
379 n2
= fold_build2_loc (loc
,
380 PLUS_EXPR
, TREE_TYPE (loop
->v
),
381 loop
->n2
, loop
->step
);
385 n1
= fold_build2_loc (loc
,
386 MINUS_EXPR
, TREE_TYPE (loop
->v
),
387 loop
->n2
, loop
->step
);
390 if (TREE_CODE (n1
) != INTEGER_CST
391 || TREE_CODE (n2
) != INTEGER_CST
392 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
393 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
394 iter_type
= long_long_unsigned_type_node
;
398 if (collapse_count
&& *collapse_count
== NULL
)
400 if ((i
== 0 || count
!= NULL_TREE
)
401 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
402 && TREE_CONSTANT (loop
->n1
)
403 && TREE_CONSTANT (loop
->n2
)
404 && TREE_CODE (loop
->step
) == INTEGER_CST
)
406 tree itype
= TREE_TYPE (loop
->v
);
408 if (POINTER_TYPE_P (itype
))
410 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
411 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
412 t
= fold_build2_loc (loc
,
414 fold_convert_loc (loc
, itype
, loop
->step
), t
);
415 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
416 fold_convert_loc (loc
, itype
, loop
->n2
));
417 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
418 fold_convert_loc (loc
, itype
, loop
->n1
));
419 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
420 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
421 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
422 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
423 fold_convert_loc (loc
, itype
,
426 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
427 fold_convert_loc (loc
, itype
, loop
->step
));
428 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
429 if (count
!= NULL_TREE
)
430 count
= fold_build2_loc (loc
,
431 MULT_EXPR
, long_long_unsigned_type_node
,
435 if (TREE_CODE (count
) != INTEGER_CST
)
445 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
446 iter_type
= long_long_unsigned_type_node
;
448 iter_type
= long_integer_type_node
;
450 else if (collapse_iter
&& *collapse_iter
!= NULL
)
451 iter_type
= TREE_TYPE (*collapse_iter
);
452 fd
->iter_type
= iter_type
;
453 if (collapse_iter
&& *collapse_iter
== NULL
)
454 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
455 if (collapse_count
&& *collapse_count
== NULL
)
458 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
460 *collapse_count
= create_tmp_var (iter_type
, ".count");
463 if (fd
->collapse
> 1)
465 fd
->loop
.v
= *collapse_iter
;
466 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
467 fd
->loop
.n2
= *collapse_count
;
468 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
469 fd
->loop
.cond_code
= LT_EXPR
;
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
485 #pragma omp parallel for schedule (guided, i * 4)
490 # BLOCK 2 (PAR_ENTRY_BB)
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
498 #pragma omp for schedule (guided, D.1598)
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
517 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
519 struct omp_for_data fd
;
520 gimple ws_stmt
= last_stmt (ws_entry_bb
);
522 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
525 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
527 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
529 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
531 if (fd
.iter_type
!= long_integer_type_node
)
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
539 if (!is_gimple_min_invariant (fd
.loop
.n1
)
540 || !is_gimple_min_invariant (fd
.loop
.n2
)
541 || !is_gimple_min_invariant (fd
.loop
.step
)
542 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
553 static VEC(tree
,gc
) *
554 get_ws_args_for (gimple ws_stmt
)
557 location_t loc
= gimple_location (ws_stmt
);
558 VEC(tree
,gc
) *ws_args
;
560 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
562 struct omp_for_data fd
;
564 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
566 ws_args
= VEC_alloc (tree
, gc
, 3 + (fd
.chunk_size
!= 0));
568 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
569 VEC_quick_push (tree
, ws_args
, t
);
571 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
572 VEC_quick_push (tree
, ws_args
, t
);
574 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
575 VEC_quick_push (tree
, ws_args
, t
);
579 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
580 VEC_quick_push (tree
, ws_args
, t
);
585 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
591 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
592 ws_args
= VEC_alloc (tree
, gc
, 1);
593 VEC_quick_push (tree
, ws_args
, t
);
601 /* Discover whether REGION is a combined parallel+workshare region. */
604 determine_parallel_type (struct omp_region
*region
)
606 basic_block par_entry_bb
, par_exit_bb
;
607 basic_block ws_entry_bb
, ws_exit_bb
;
609 if (region
== NULL
|| region
->inner
== NULL
610 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
611 || region
->inner
->cont
== NULL
)
614 /* We only support parallel+for and parallel+sections. */
615 if (region
->type
!= GIMPLE_OMP_PARALLEL
616 || (region
->inner
->type
!= GIMPLE_OMP_FOR
617 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb
= region
->entry
;
623 par_exit_bb
= region
->exit
;
624 ws_entry_bb
= region
->inner
->entry
;
625 ws_exit_bb
= region
->inner
->exit
;
627 if (single_succ (par_entry_bb
) == ws_entry_bb
628 && single_succ (ws_exit_bb
) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb
)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
631 || (last_and_only_stmt (ws_entry_bb
)
632 && last_and_only_stmt (par_exit_bb
))))
634 gimple ws_stmt
= last_stmt (ws_entry_bb
);
636 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
648 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
650 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
653 region
->is_combined_parallel
= false;
654 region
->inner
->is_combined_parallel
= false;
659 region
->is_combined_parallel
= true;
660 region
->inner
->is_combined_parallel
= true;
661 region
->ws_args
= get_ws_args_for (ws_stmt
);
666 /* Return true if EXPR is variable sized. */
669 is_variable_sized (const_tree expr
)
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
674 /* Return true if DECL is a reference type. */
677 is_reference (tree decl
)
679 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
687 lookup_decl (tree var
, omp_context
*ctx
)
690 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
695 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
698 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
699 return n
? *n
: NULL_TREE
;
703 lookup_field (tree var
, omp_context
*ctx
)
706 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
707 return (tree
) n
->value
;
711 lookup_sfield (tree var
, omp_context
*ctx
)
714 n
= splay_tree_lookup (ctx
->sfield_map
715 ? ctx
->sfield_map
: ctx
->field_map
,
716 (splay_tree_key
) var
);
717 return (tree
) n
->value
;
721 maybe_lookup_field (tree var
, omp_context
*ctx
)
724 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
725 return n
? (tree
) n
->value
: NULL_TREE
;
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
732 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
755 /* Do not use copy-in/copy-out for variables that have their
757 if (TREE_ADDRESSABLE (decl
))
760 /* Disallow copy-in/out in nested parallel if
761 decl is shared in outer parallel, otherwise
762 each thread could store the shared variable
763 in its own copy-in location, making the
764 variable no longer really shared. */
765 if (!TREE_READONLY (decl
) && shared_ctx
->is_nested
)
769 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
770 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
777 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
778 c
; c
= OMP_CLAUSE_CHAIN (c
))
779 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
780 && OMP_CLAUSE_DECL (c
) == decl
)
784 goto maybe_mark_addressable_and_ret
;
788 /* For tasks avoid using copy-in/out, unless they are readonly
789 (in which case just copy-in is used). As tasks can be
790 deferred or executed in different thread, when GOMP_task
791 returns, the task hasn't necessarily terminated. */
792 if (!TREE_READONLY (decl
) && is_task_ctx (shared_ctx
))
795 maybe_mark_addressable_and_ret
:
796 outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
797 if (is_gimple_reg (outer
))
799 /* Taking address of OUTER in lower_send_shared_vars
800 might need regimplification of everything that uses the
802 if (!task_shared_vars
)
803 task_shared_vars
= BITMAP_ALLOC (NULL
);
804 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
805 TREE_ADDRESSABLE (outer
) = 1;
814 /* Create a new VAR_DECL and copy information from VAR to it. */
817 copy_var_decl (tree var
, tree name
, tree type
)
819 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
821 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
822 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
823 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
824 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
825 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
826 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
827 TREE_USED (copy
) = 1;
828 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
833 /* Construct a new automatic decl similar to VAR. */
836 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
838 tree copy
= copy_var_decl (var
, name
, type
);
840 DECL_CONTEXT (copy
) = current_function_decl
;
841 DECL_CHAIN (copy
) = ctx
->block_vars
;
842 ctx
->block_vars
= copy
;
848 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
850 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
853 /* Build tree nodes to access the field for VAR on the receiver side. */
856 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
858 tree x
, field
= lookup_field (var
, ctx
);
860 /* If the receiver record type was remapped in the child function,
861 remap the field into the new record type. */
862 x
= maybe_lookup_field (field
, ctx
);
866 x
= build_simple_mem_ref (ctx
->receiver_decl
);
867 x
= build3 (COMPONENT_REF
, TREE_TYPE (field
), x
, field
, NULL
);
869 x
= build_simple_mem_ref (x
);
874 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
875 of a parallel, this is a component reference; for workshare constructs
876 this is some variable. */
879 build_outer_var_ref (tree var
, omp_context
*ctx
)
883 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
885 else if (is_variable_sized (var
))
887 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
888 x
= build_outer_var_ref (x
, ctx
);
889 x
= build_simple_mem_ref (x
);
891 else if (is_taskreg_ctx (ctx
))
893 bool by_ref
= use_pointer_for_field (var
, NULL
);
894 x
= build_receiver_ref (var
, by_ref
, ctx
);
897 x
= lookup_decl (var
, ctx
->outer
);
898 else if (is_reference (var
))
899 /* This can happen with orphaned constructs. If var is reference, it is
900 possible it is shared and as such valid. */
905 if (is_reference (var
))
906 x
= build_simple_mem_ref (x
);
911 /* Build tree nodes to access the field for VAR on the sender side. */
914 build_sender_ref (tree var
, omp_context
*ctx
)
916 tree field
= lookup_sfield (var
, ctx
);
917 return build3 (COMPONENT_REF
, TREE_TYPE (field
),
918 ctx
->sender_decl
, field
, NULL
);
921 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
924 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
926 tree field
, type
, sfield
= NULL_TREE
;
928 gcc_assert ((mask
& 1) == 0
929 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
930 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
931 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
933 type
= TREE_TYPE (var
);
935 type
= build_pointer_type (type
);
936 else if ((mask
& 3) == 1 && is_reference (var
))
937 type
= TREE_TYPE (type
);
939 field
= build_decl (DECL_SOURCE_LOCATION (var
),
940 FIELD_DECL
, DECL_NAME (var
), type
);
942 /* Remember what variable this field was created for. This does have a
943 side effect of making dwarf2out ignore this member, so for helpful
944 debugging we clear it later in delete_omp_context. */
945 DECL_ABSTRACT_ORIGIN (field
) = var
;
946 if (type
== TREE_TYPE (var
))
948 DECL_ALIGN (field
) = DECL_ALIGN (var
);
949 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
950 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
953 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
957 insert_field_into_struct (ctx
->record_type
, field
);
958 if (ctx
->srecord_type
)
960 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
961 FIELD_DECL
, DECL_NAME (var
), type
);
962 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
963 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
964 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
965 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
966 insert_field_into_struct (ctx
->srecord_type
, sfield
);
971 if (ctx
->srecord_type
== NULL_TREE
)
975 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
976 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
977 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
979 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
980 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
981 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
982 insert_field_into_struct (ctx
->srecord_type
, sfield
);
983 splay_tree_insert (ctx
->sfield_map
,
984 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
985 (splay_tree_value
) sfield
);
989 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
990 : ctx
->srecord_type
, field
);
994 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
995 (splay_tree_value
) field
);
996 if ((mask
& 2) && ctx
->sfield_map
)
997 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
998 (splay_tree_value
) sfield
);
1002 install_var_local (tree var
, omp_context
*ctx
)
1004 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1005 insert_decl_map (&ctx
->cb
, var
, new_var
);
1009 /* Adjust the replacement for DECL in CTX for the new context. This means
1010 copying the DECL_VALUE_EXPR, and fixing up the type. */
1013 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1015 tree new_decl
, size
;
1017 new_decl
= lookup_decl (decl
, ctx
);
1019 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1021 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1022 && DECL_HAS_VALUE_EXPR_P (decl
))
1024 tree ve
= DECL_VALUE_EXPR (decl
);
1025 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1026 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1027 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1030 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1032 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1033 if (size
== error_mark_node
)
1034 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1035 DECL_SIZE (new_decl
) = size
;
1037 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1038 if (size
== error_mark_node
)
1039 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1040 DECL_SIZE_UNIT (new_decl
) = size
;
1044 /* The callback for remap_decl. Search all containing contexts for a
1045 mapping of the variable; this avoids having to duplicate the splay
1046 tree ahead of time. We know a mapping doesn't already exist in the
1047 given context. Create new mappings to implement default semantics. */
1050 omp_copy_decl (tree var
, copy_body_data
*cb
)
1052 omp_context
*ctx
= (omp_context
*) cb
;
1055 if (TREE_CODE (var
) == LABEL_DECL
)
1057 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1058 DECL_CONTEXT (new_var
) = current_function_decl
;
1059 insert_decl_map (&ctx
->cb
, var
, new_var
);
1063 while (!is_taskreg_ctx (ctx
))
1068 new_var
= maybe_lookup_decl (var
, ctx
);
1073 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1076 return error_mark_node
;
1080 /* Return the parallel region associated with STMT. */
1082 /* Debugging dumps for parallel regions. */
1083 void dump_omp_region (FILE *, struct omp_region
*, int);
1084 void debug_omp_region (struct omp_region
*);
1085 void debug_all_omp_regions (void);
1087 /* Dump the parallel region tree rooted at REGION. */
1090 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1092 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1093 gimple_code_name
[region
->type
]);
1096 dump_omp_region (file
, region
->inner
, indent
+ 4);
1100 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1101 region
->cont
->index
);
1105 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1106 region
->exit
->index
);
1108 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1111 dump_omp_region (file
, region
->next
, indent
);
1115 debug_omp_region (struct omp_region
*region
)
1117 dump_omp_region (stderr
, region
, 0);
1121 debug_all_omp_regions (void)
1123 dump_omp_region (stderr
, root_omp_region
, 0);
1127 /* Create a new parallel region starting at STMT inside region PARENT. */
1130 new_omp_region (basic_block bb
, enum gimple_code type
,
1131 struct omp_region
*parent
)
1133 struct omp_region
*region
= XCNEW (struct omp_region
);
1135 region
->outer
= parent
;
1137 region
->type
= type
;
1141 /* This is a nested region. Add it to the list of inner
1142 regions in PARENT. */
1143 region
->next
= parent
->inner
;
1144 parent
->inner
= region
;
1148 /* This is a toplevel region. Add it to the list of toplevel
1149 regions in ROOT_OMP_REGION. */
1150 region
->next
= root_omp_region
;
1151 root_omp_region
= region
;
1157 /* Release the memory associated with the region tree rooted at REGION. */
1160 free_omp_region_1 (struct omp_region
*region
)
1162 struct omp_region
*i
, *n
;
1164 for (i
= region
->inner
; i
; i
= n
)
1167 free_omp_region_1 (i
);
1173 /* Release the memory for the entire omp region tree. */
1176 free_omp_regions (void)
1178 struct omp_region
*r
, *n
;
1179 for (r
= root_omp_region
; r
; r
= n
)
1182 free_omp_region_1 (r
);
1184 root_omp_region
= NULL
;
1188 /* Create a new context, with OUTER_CTX being the surrounding context. */
1190 static omp_context
*
1191 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1193 omp_context
*ctx
= XCNEW (omp_context
);
1195 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1196 (splay_tree_value
) ctx
);
1201 ctx
->outer
= outer_ctx
;
1202 ctx
->cb
= outer_ctx
->cb
;
1203 ctx
->cb
.block
= NULL
;
1204 ctx
->depth
= outer_ctx
->depth
+ 1;
1208 ctx
->cb
.src_fn
= current_function_decl
;
1209 ctx
->cb
.dst_fn
= current_function_decl
;
1210 ctx
->cb
.src_node
= cgraph_get_node (current_function_decl
);
1211 gcc_checking_assert (ctx
->cb
.src_node
);
1212 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1213 ctx
->cb
.src_cfun
= cfun
;
1214 ctx
->cb
.copy_decl
= omp_copy_decl
;
1215 ctx
->cb
.eh_lp_nr
= 0;
1216 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1220 ctx
->cb
.decl_map
= pointer_map_create ();
1225 static gimple_seq
maybe_catch_exception (gimple_seq
);
1227 /* Finalize task copyfn. */
1230 finalize_task_copyfn (gimple task_stmt
)
1232 struct function
*child_cfun
;
1233 tree child_fn
, old_fn
;
1234 gimple_seq seq
, new_seq
;
1237 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1238 if (child_fn
== NULL_TREE
)
1241 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
1245 = cfun
->curr_properties
;
1247 old_fn
= current_function_decl
;
1248 push_cfun (child_cfun
);
1249 current_function_decl
= child_fn
;
1250 bind
= gimplify_body (&DECL_SAVED_TREE (child_fn
), child_fn
, false);
1251 seq
= gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq
, bind
);
1253 new_seq
= maybe_catch_exception (seq
);
1256 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1257 seq
= gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq
, bind
);
1260 gimple_set_body (child_fn
, seq
);
1262 current_function_decl
= old_fn
;
1264 cgraph_add_new_function (child_fn
, false);
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1271 delete_omp_context (splay_tree_value value
)
1273 omp_context
*ctx
= (omp_context
*) value
;
1275 pointer_map_destroy (ctx
->cb
.decl_map
);
1278 splay_tree_delete (ctx
->field_map
);
1279 if (ctx
->sfield_map
)
1280 splay_tree_delete (ctx
->sfield_map
);
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx
->record_type
)
1287 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1288 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1290 if (ctx
->srecord_type
)
1293 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1294 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1297 if (is_task_ctx (ctx
))
1298 finalize_task_copyfn (ctx
->stmt
);
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1307 fixup_child_record_type (omp_context
*ctx
)
1309 tree f
, type
= ctx
->record_type
;
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1316 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1320 tree name
, new_fields
= NULL
;
1322 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1323 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1324 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1325 TYPE_DECL
, name
, type
);
1326 TYPE_NAME (type
) = name
;
1328 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1330 tree new_f
= copy_node (f
);
1331 DECL_CONTEXT (new_f
) = type
;
1332 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1333 DECL_CHAIN (new_f
) = new_fields
;
1334 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1335 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1337 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1344 (splay_tree_value
) new_f
);
1346 TYPE_FIELDS (type
) = nreverse (new_fields
);
1350 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1357 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1360 bool scan_array_reductions
= false;
1362 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1366 switch (OMP_CLAUSE_CODE (c
))
1368 case OMP_CLAUSE_PRIVATE
:
1369 decl
= OMP_CLAUSE_DECL (c
);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1372 else if (!is_variable_sized (decl
))
1373 install_var_local (decl
, ctx
);
1376 case OMP_CLAUSE_SHARED
:
1377 gcc_assert (is_taskreg_ctx (ctx
));
1378 decl
= OMP_CLAUSE_DECL (c
);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1380 || !is_variable_sized (decl
));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1385 by_ref
= use_pointer_for_field (decl
, ctx
);
1386 if (! TREE_READONLY (decl
)
1387 || TREE_ADDRESSABLE (decl
)
1389 || is_reference (decl
))
1391 install_var_field (decl
, by_ref
, 3, ctx
);
1392 install_var_local (decl
, ctx
);
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1399 case OMP_CLAUSE_LASTPRIVATE
:
1400 /* Let the corresponding firstprivate clause create
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1406 case OMP_CLAUSE_FIRSTPRIVATE
:
1407 case OMP_CLAUSE_REDUCTION
:
1408 decl
= OMP_CLAUSE_DECL (c
);
1410 if (is_variable_sized (decl
))
1412 if (is_task_ctx (ctx
))
1413 install_var_field (decl
, false, 1, ctx
);
1416 else if (is_taskreg_ctx (ctx
))
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1420 by_ref
= use_pointer_for_field (decl
, NULL
);
1422 if (is_task_ctx (ctx
)
1423 && (global
|| by_ref
|| is_reference (decl
)))
1425 install_var_field (decl
, false, 1, ctx
);
1427 install_var_field (decl
, by_ref
, 2, ctx
);
1430 install_var_field (decl
, by_ref
, 3, ctx
);
1432 install_var_local (decl
, ctx
);
1435 case OMP_CLAUSE_COPYPRIVATE
:
1436 case OMP_CLAUSE_COPYIN
:
1437 decl
= OMP_CLAUSE_DECL (c
);
1438 by_ref
= use_pointer_for_field (decl
, NULL
);
1439 install_var_field (decl
, by_ref
, 3, ctx
);
1442 case OMP_CLAUSE_DEFAULT
:
1443 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1446 case OMP_CLAUSE_FINAL
:
1448 case OMP_CLAUSE_NUM_THREADS
:
1449 case OMP_CLAUSE_SCHEDULE
:
1451 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1454 case OMP_CLAUSE_NOWAIT
:
1455 case OMP_CLAUSE_ORDERED
:
1456 case OMP_CLAUSE_COLLAPSE
:
1457 case OMP_CLAUSE_UNTIED
:
1458 case OMP_CLAUSE_MERGEABLE
:
1466 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1468 switch (OMP_CLAUSE_CODE (c
))
1470 case OMP_CLAUSE_LASTPRIVATE
:
1471 /* Let the corresponding firstprivate clause create
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1474 scan_array_reductions
= true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1479 case OMP_CLAUSE_PRIVATE
:
1480 case OMP_CLAUSE_FIRSTPRIVATE
:
1481 case OMP_CLAUSE_REDUCTION
:
1482 decl
= OMP_CLAUSE_DECL (c
);
1483 if (is_variable_sized (decl
))
1484 install_var_local (decl
, ctx
);
1485 fixup_remapped_decl (decl
, ctx
,
1486 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1488 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1490 scan_array_reductions
= true;
1493 case OMP_CLAUSE_SHARED
:
1494 decl
= OMP_CLAUSE_DECL (c
);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1496 fixup_remapped_decl (decl
, ctx
, false);
1499 case OMP_CLAUSE_COPYPRIVATE
:
1500 case OMP_CLAUSE_COPYIN
:
1501 case OMP_CLAUSE_DEFAULT
:
1503 case OMP_CLAUSE_NUM_THREADS
:
1504 case OMP_CLAUSE_SCHEDULE
:
1505 case OMP_CLAUSE_NOWAIT
:
1506 case OMP_CLAUSE_ORDERED
:
1507 case OMP_CLAUSE_COLLAPSE
:
1508 case OMP_CLAUSE_UNTIED
:
1509 case OMP_CLAUSE_FINAL
:
1510 case OMP_CLAUSE_MERGEABLE
:
1518 if (scan_array_reductions
)
1519 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1520 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1521 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1523 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1526 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1527 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1528 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1531 /* Create a new name for omp child function. Returns an identifier. */
1533 static GTY(()) unsigned int tmp_ompfn_id_num
;
1536 create_omp_child_function_name (bool task_copy
)
1538 return (clone_function_name (current_function_decl
,
1539 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1542 /* Build a decl for the omp child function. It'll not contain a body
1543 yet, just the bare decl. */
1546 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1548 tree decl
, type
, name
, t
;
1550 name
= create_omp_child_function_name (task_copy
);
1552 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1553 ptr_type_node
, NULL_TREE
);
1555 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1557 decl
= build_decl (gimple_location (ctx
->stmt
),
1558 FUNCTION_DECL
, name
, type
);
1561 ctx
->cb
.dst_fn
= decl
;
1563 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1565 TREE_STATIC (decl
) = 1;
1566 TREE_USED (decl
) = 1;
1567 DECL_ARTIFICIAL (decl
) = 1;
1568 DECL_NAMELESS (decl
) = 1;
1569 DECL_IGNORED_P (decl
) = 0;
1570 TREE_PUBLIC (decl
) = 0;
1571 DECL_UNINLINABLE (decl
) = 1;
1572 DECL_EXTERNAL (decl
) = 0;
1573 DECL_CONTEXT (decl
) = NULL_TREE
;
1574 DECL_INITIAL (decl
) = make_node (BLOCK
);
1576 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1577 RESULT_DECL
, NULL_TREE
, void_type_node
);
1578 DECL_ARTIFICIAL (t
) = 1;
1579 DECL_IGNORED_P (t
) = 1;
1580 DECL_CONTEXT (t
) = decl
;
1581 DECL_RESULT (decl
) = t
;
1583 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1584 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1585 DECL_ARTIFICIAL (t
) = 1;
1586 DECL_NAMELESS (t
) = 1;
1587 DECL_ARG_TYPE (t
) = ptr_type_node
;
1588 DECL_CONTEXT (t
) = current_function_decl
;
1590 DECL_ARGUMENTS (decl
) = t
;
1592 ctx
->receiver_decl
= t
;
1595 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1596 PARM_DECL
, get_identifier (".omp_data_o"),
1598 DECL_ARTIFICIAL (t
) = 1;
1599 DECL_NAMELESS (t
) = 1;
1600 DECL_ARG_TYPE (t
) = ptr_type_node
;
1601 DECL_CONTEXT (t
) = current_function_decl
;
1603 TREE_ADDRESSABLE (t
) = 1;
1604 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1605 DECL_ARGUMENTS (decl
) = t
;
1608 /* Allocate memory for the function structure. The call to
1609 allocate_struct_function clobbers CFUN, so we need to restore
1611 push_struct_function (decl
);
1612 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1617 /* Scan an OpenMP parallel directive. */
1620 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1624 gimple stmt
= gsi_stmt (*gsi
);
1626 /* Ignore parallel directives with empty bodies, unless there
1627 are copyin clauses. */
1629 && empty_body_p (gimple_omp_body (stmt
))
1630 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1631 OMP_CLAUSE_COPYIN
) == NULL
)
1633 gsi_replace (gsi
, gimple_build_nop (), false);
1637 ctx
= new_omp_context (stmt
, outer_ctx
);
1638 if (taskreg_nesting_level
> 1)
1639 ctx
->is_nested
= true;
1640 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1641 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1642 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1643 name
= create_tmp_var_name (".omp_data_s");
1644 name
= build_decl (gimple_location (stmt
),
1645 TYPE_DECL
, name
, ctx
->record_type
);
1646 DECL_ARTIFICIAL (name
) = 1;
1647 DECL_NAMELESS (name
) = 1;
1648 TYPE_NAME (ctx
->record_type
) = name
;
1649 create_omp_child_function (ctx
, false);
1650 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1652 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1653 scan_omp (gimple_omp_body (stmt
), ctx
);
1655 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1656 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1659 layout_type (ctx
->record_type
);
1660 fixup_child_record_type (ctx
);
1664 /* Scan an OpenMP task directive. */
1667 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1671 gimple stmt
= gsi_stmt (*gsi
);
1672 location_t loc
= gimple_location (stmt
);
1674 /* Ignore task directives with empty bodies. */
1676 && empty_body_p (gimple_omp_body (stmt
)))
1678 gsi_replace (gsi
, gimple_build_nop (), false);
1682 ctx
= new_omp_context (stmt
, outer_ctx
);
1683 if (taskreg_nesting_level
> 1)
1684 ctx
->is_nested
= true;
1685 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1686 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1687 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1688 name
= create_tmp_var_name (".omp_data_s");
1689 name
= build_decl (gimple_location (stmt
),
1690 TYPE_DECL
, name
, ctx
->record_type
);
1691 DECL_ARTIFICIAL (name
) = 1;
1692 DECL_NAMELESS (name
) = 1;
1693 TYPE_NAME (ctx
->record_type
) = name
;
1694 create_omp_child_function (ctx
, false);
1695 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1697 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1699 if (ctx
->srecord_type
)
1701 name
= create_tmp_var_name (".omp_data_a");
1702 name
= build_decl (gimple_location (stmt
),
1703 TYPE_DECL
, name
, ctx
->srecord_type
);
1704 DECL_ARTIFICIAL (name
) = 1;
1705 DECL_NAMELESS (name
) = 1;
1706 TYPE_NAME (ctx
->srecord_type
) = name
;
1707 create_omp_child_function (ctx
, true);
1710 scan_omp (gimple_omp_body (stmt
), ctx
);
1712 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1714 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1715 t
= build_int_cst (long_integer_type_node
, 0);
1716 gimple_omp_task_set_arg_size (stmt
, t
);
1717 t
= build_int_cst (long_integer_type_node
, 1);
1718 gimple_omp_task_set_arg_align (stmt
, t
);
1722 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1723 /* Move VLA fields to the end. */
1724 p
= &TYPE_FIELDS (ctx
->record_type
);
1726 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1727 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1730 *p
= TREE_CHAIN (*p
);
1731 TREE_CHAIN (*q
) = NULL_TREE
;
1732 q
= &TREE_CHAIN (*q
);
1735 p
= &DECL_CHAIN (*p
);
1737 layout_type (ctx
->record_type
);
1738 fixup_child_record_type (ctx
);
1739 if (ctx
->srecord_type
)
1740 layout_type (ctx
->srecord_type
);
1741 t
= fold_convert_loc (loc
, long_integer_type_node
,
1742 TYPE_SIZE_UNIT (ctx
->record_type
));
1743 gimple_omp_task_set_arg_size (stmt
, t
);
1744 t
= build_int_cst (long_integer_type_node
,
1745 TYPE_ALIGN_UNIT (ctx
->record_type
));
1746 gimple_omp_task_set_arg_align (stmt
, t
);
1751 /* Scan an OpenMP loop directive. */
1754 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1759 ctx
= new_omp_context (stmt
, outer_ctx
);
1761 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1763 scan_omp (gimple_omp_for_pre_body (stmt
), ctx
);
1764 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1766 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1767 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1768 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1769 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1771 scan_omp (gimple_omp_body (stmt
), ctx
);
1774 /* Scan an OpenMP sections directive. */
1777 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1781 ctx
= new_omp_context (stmt
, outer_ctx
);
1782 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1783 scan_omp (gimple_omp_body (stmt
), ctx
);
1786 /* Scan an OpenMP single directive. */
1789 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1794 ctx
= new_omp_context (stmt
, outer_ctx
);
1795 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1796 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1797 name
= create_tmp_var_name (".omp_copy_s");
1798 name
= build_decl (gimple_location (stmt
),
1799 TYPE_DECL
, name
, ctx
->record_type
);
1800 TYPE_NAME (ctx
->record_type
) = name
;
1802 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1803 scan_omp (gimple_omp_body (stmt
), ctx
);
1805 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1806 ctx
->record_type
= NULL
;
1808 layout_type (ctx
->record_type
);
1812 /* Check OpenMP nesting restrictions. */
1814 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1816 switch (gimple_code (stmt
))
1818 case GIMPLE_OMP_FOR
:
1819 case GIMPLE_OMP_SECTIONS
:
1820 case GIMPLE_OMP_SINGLE
:
1822 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1823 switch (gimple_code (ctx
->stmt
))
1825 case GIMPLE_OMP_FOR
:
1826 case GIMPLE_OMP_SECTIONS
:
1827 case GIMPLE_OMP_SINGLE
:
1828 case GIMPLE_OMP_ORDERED
:
1829 case GIMPLE_OMP_MASTER
:
1830 case GIMPLE_OMP_TASK
:
1831 if (is_gimple_call (stmt
))
1833 warning (0, "barrier region may not be closely nested inside "
1834 "of work-sharing, critical, ordered, master or "
1835 "explicit task region");
1838 warning (0, "work-sharing region may not be closely nested inside "
1839 "of work-sharing, critical, ordered, master or explicit "
1842 case GIMPLE_OMP_PARALLEL
:
1848 case GIMPLE_OMP_MASTER
:
1849 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1850 switch (gimple_code (ctx
->stmt
))
1852 case GIMPLE_OMP_FOR
:
1853 case GIMPLE_OMP_SECTIONS
:
1854 case GIMPLE_OMP_SINGLE
:
1855 case GIMPLE_OMP_TASK
:
1856 warning (0, "master region may not be closely nested inside "
1857 "of work-sharing or explicit task region");
1859 case GIMPLE_OMP_PARALLEL
:
1865 case GIMPLE_OMP_ORDERED
:
1866 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1867 switch (gimple_code (ctx
->stmt
))
1869 case GIMPLE_OMP_CRITICAL
:
1870 case GIMPLE_OMP_TASK
:
1871 warning (0, "ordered region may not be closely nested inside "
1872 "of critical or explicit task region");
1874 case GIMPLE_OMP_FOR
:
1875 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1876 OMP_CLAUSE_ORDERED
) == NULL
)
1877 warning (0, "ordered region must be closely nested inside "
1878 "a loop region with an ordered clause");
1880 case GIMPLE_OMP_PARALLEL
:
1886 case GIMPLE_OMP_CRITICAL
:
1887 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1888 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1889 && (gimple_omp_critical_name (stmt
)
1890 == gimple_omp_critical_name (ctx
->stmt
)))
1892 warning (0, "critical region may not be nested inside a critical "
1893 "region with the same name");
1903 /* Helper function scan_omp.
1905 Callback for walk_tree or operators in walk_gimple_stmt used to
1906 scan for OpenMP directives in TP. */
1909 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1911 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1912 omp_context
*ctx
= (omp_context
*) wi
->info
;
1915 switch (TREE_CODE (t
))
1922 *tp
= remap_decl (t
, &ctx
->cb
);
1926 if (ctx
&& TYPE_P (t
))
1927 *tp
= remap_type (t
, &ctx
->cb
);
1928 else if (!DECL_P (t
))
1933 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
1934 if (tem
!= TREE_TYPE (t
))
1936 if (TREE_CODE (t
) == INTEGER_CST
)
1937 *tp
= build_int_cst_wide (tem
,
1938 TREE_INT_CST_LOW (t
),
1939 TREE_INT_CST_HIGH (t
));
1941 TREE_TYPE (t
) = tem
;
1952 /* Helper function for scan_omp.
1954 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1955 the current statement in GSI. */
1958 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1959 struct walk_stmt_info
*wi
)
1961 gimple stmt
= gsi_stmt (*gsi
);
1962 omp_context
*ctx
= (omp_context
*) wi
->info
;
1964 if (gimple_has_location (stmt
))
1965 input_location
= gimple_location (stmt
);
1967 /* Check the OpenMP nesting restrictions. */
1970 if (is_gimple_omp (stmt
))
1971 check_omp_nesting_restrictions (stmt
, ctx
);
1972 else if (is_gimple_call (stmt
))
1974 tree fndecl
= gimple_call_fndecl (stmt
);
1975 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
1976 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
1977 check_omp_nesting_restrictions (stmt
, ctx
);
1981 *handled_ops_p
= true;
1983 switch (gimple_code (stmt
))
1985 case GIMPLE_OMP_PARALLEL
:
1986 taskreg_nesting_level
++;
1987 scan_omp_parallel (gsi
, ctx
);
1988 taskreg_nesting_level
--;
1991 case GIMPLE_OMP_TASK
:
1992 taskreg_nesting_level
++;
1993 scan_omp_task (gsi
, ctx
);
1994 taskreg_nesting_level
--;
1997 case GIMPLE_OMP_FOR
:
1998 scan_omp_for (stmt
, ctx
);
2001 case GIMPLE_OMP_SECTIONS
:
2002 scan_omp_sections (stmt
, ctx
);
2005 case GIMPLE_OMP_SINGLE
:
2006 scan_omp_single (stmt
, ctx
);
2009 case GIMPLE_OMP_SECTION
:
2010 case GIMPLE_OMP_MASTER
:
2011 case GIMPLE_OMP_ORDERED
:
2012 case GIMPLE_OMP_CRITICAL
:
2013 ctx
= new_omp_context (stmt
, ctx
);
2014 scan_omp (gimple_omp_body (stmt
), ctx
);
2021 *handled_ops_p
= false;
2023 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2024 insert_decl_map (&ctx
->cb
, var
, var
);
2028 *handled_ops_p
= false;
2036 /* Scan all the statements starting at the current statement. CTX
2037 contains context information about the OpenMP directives and
2038 clauses found during the scan. */
2041 scan_omp (gimple_seq body
, omp_context
*ctx
)
2043 location_t saved_location
;
2044 struct walk_stmt_info wi
;
2046 memset (&wi
, 0, sizeof (wi
));
2048 wi
.want_locations
= true;
2050 saved_location
= input_location
;
2051 walk_gimple_seq (body
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2052 input_location
= saved_location
;
2055 /* Re-gimplification and code generation routines. */
2057 /* Build a call to GOMP_barrier. */
2060 build_omp_barrier (void)
2062 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
), 0);
2065 /* If a context was created for STMT when it was scanned, return it. */
2067 static omp_context
*
2068 maybe_lookup_ctx (gimple stmt
)
2071 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2072 return n
? (omp_context
*) n
->value
: NULL
;
2076 /* Find the mapping for DECL in CTX or the immediately enclosing
2077 context that has a mapping for DECL.
2079 If CTX is a nested parallel directive, we may have to use the decl
2080 mappings created in CTX's parent context. Suppose that we have the
2081 following parallel nesting (variable UIDs showed for clarity):
2084 #omp parallel shared(iD.1562) -> outer parallel
2085 iD.1562 = iD.1562 + 1;
2087 #omp parallel shared (iD.1562) -> inner parallel
2088 iD.1562 = iD.1562 - 1;
2090 Each parallel structure will create a distinct .omp_data_s structure
2091 for copying iD.1562 in/out of the directive:
2093 outer parallel .omp_data_s.1.i -> iD.1562
2094 inner parallel .omp_data_s.2.i -> iD.1562
2096 A shared variable mapping will produce a copy-out operation before
2097 the parallel directive and a copy-in operation after it. So, in
2098 this case we would have:
2101 .omp_data_o.1.i = iD.1562;
2102 #omp parallel shared(iD.1562) -> outer parallel
2103 .omp_data_i.1 = &.omp_data_o.1
2104 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2106 .omp_data_o.2.i = iD.1562; -> **
2107 #omp parallel shared(iD.1562) -> inner parallel
2108 .omp_data_i.2 = &.omp_data_o.2
2109 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2112 ** This is a problem. The symbol iD.1562 cannot be referenced
2113 inside the body of the outer parallel region. But since we are
2114 emitting this copy operation while expanding the inner parallel
2115 directive, we need to access the CTX structure of the outer
2116 parallel directive to get the correct mapping:
2118 .omp_data_o.2.i = .omp_data_i.1->i
2120 Since there may be other workshare or parallel directives enclosing
2121 the parallel directive, it may be necessary to walk up the context
2122 parent chain. This is not a problem in general because nested
2123 parallelism happens only rarely. */
2126 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2131 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2132 t
= maybe_lookup_decl (decl
, up
);
2134 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2136 return t
? t
: decl
;
2140 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2141 in outer contexts. */
2144 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2149 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2150 t
= maybe_lookup_decl (decl
, up
);
2152 return t
? t
: decl
;
2156 /* Construct the initialization value for reduction CLAUSE. */
2159 omp_reduction_init (tree clause
, tree type
)
2161 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2162 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2169 case TRUTH_ORIF_EXPR
:
2170 case TRUTH_XOR_EXPR
:
2172 return build_zero_cst (type
);
2175 case TRUTH_AND_EXPR
:
2176 case TRUTH_ANDIF_EXPR
:
2178 return fold_convert_loc (loc
, type
, integer_one_node
);
2181 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2184 if (SCALAR_FLOAT_TYPE_P (type
))
2186 REAL_VALUE_TYPE max
, min
;
2187 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2190 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2193 real_maxval (&min
, 1, TYPE_MODE (type
));
2194 return build_real (type
, min
);
2198 gcc_assert (INTEGRAL_TYPE_P (type
));
2199 return TYPE_MIN_VALUE (type
);
2203 if (SCALAR_FLOAT_TYPE_P (type
))
2205 REAL_VALUE_TYPE max
;
2206 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2209 real_maxval (&max
, 0, TYPE_MODE (type
));
2210 return build_real (type
, max
);
2214 gcc_assert (INTEGRAL_TYPE_P (type
));
2215 return TYPE_MAX_VALUE (type
);
2223 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2224 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2225 private variables. Initialization statements go in ILIST, while calls
2226 to destructors go in DLIST. */
2229 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2232 gimple_stmt_iterator diter
;
2233 tree c
, dtor
, copyin_seq
, x
, ptr
;
2234 bool copyin_by_ref
= false;
2235 bool lastprivate_firstprivate
= false;
2238 *dlist
= gimple_seq_alloc ();
2239 diter
= gsi_start (*dlist
);
2242 /* Do all the fixed sized types in the first pass, and the variable sized
2243 types in the second pass. This makes sure that the scalar arguments to
2244 the variable sized types are processed before we use them in the
2245 variable sized operations. */
2246 for (pass
= 0; pass
< 2; ++pass
)
2248 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2250 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2253 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2257 case OMP_CLAUSE_PRIVATE
:
2258 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2261 case OMP_CLAUSE_SHARED
:
2262 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2264 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2267 case OMP_CLAUSE_FIRSTPRIVATE
:
2268 case OMP_CLAUSE_COPYIN
:
2269 case OMP_CLAUSE_REDUCTION
:
2271 case OMP_CLAUSE_LASTPRIVATE
:
2272 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2274 lastprivate_firstprivate
= true;
2283 new_var
= var
= OMP_CLAUSE_DECL (c
);
2284 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2285 new_var
= lookup_decl (var
, ctx
);
2287 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2292 else if (is_variable_sized (var
))
2294 /* For variable sized types, we need to allocate the
2295 actual storage here. Call alloca and store the
2296 result in the pointer decl that we created elsewhere. */
2300 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2305 ptr
= DECL_VALUE_EXPR (new_var
);
2306 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2307 ptr
= TREE_OPERAND (ptr
, 0);
2308 gcc_assert (DECL_P (ptr
));
2309 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2311 /* void *tmp = __builtin_alloca */
2312 atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2313 stmt
= gimple_build_call (atmp
, 1, x
);
2314 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2315 gimple_add_tmp_var (tmp
);
2316 gimple_call_set_lhs (stmt
, tmp
);
2318 gimple_seq_add_stmt (ilist
, stmt
);
2320 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2321 gimplify_assign (ptr
, x
, ilist
);
2324 else if (is_reference (var
))
2326 /* For references that are being privatized for Fortran,
2327 allocate new backing storage for the new pointer
2328 variable. This allows us to avoid changing all the
2329 code that expects a pointer to something that expects
2330 a direct variable. Note that this doesn't apply to
2331 C++, since reference types are disallowed in data
2332 sharing clauses there, except for NRV optimized
2337 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2338 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2340 x
= build_receiver_ref (var
, false, ctx
);
2341 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2343 else if (TREE_CONSTANT (x
))
2345 const char *name
= NULL
;
2346 if (DECL_NAME (var
))
2347 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2349 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2351 gimple_add_tmp_var (x
);
2352 TREE_ADDRESSABLE (x
) = 1;
2353 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2357 tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2358 x
= build_call_expr_loc (clause_loc
, atmp
, 1, x
);
2361 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2362 gimplify_assign (new_var
, x
, ilist
);
2364 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2366 else if (c_kind
== OMP_CLAUSE_REDUCTION
2367 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2375 switch (OMP_CLAUSE_CODE (c
))
2377 case OMP_CLAUSE_SHARED
:
2378 /* Shared global vars are just accessed directly. */
2379 if (is_global_var (new_var
))
2381 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2382 needs to be delayed until after fixup_child_record_type so
2383 that we get the correct type during the dereference. */
2384 by_ref
= use_pointer_for_field (var
, ctx
);
2385 x
= build_receiver_ref (var
, by_ref
, ctx
);
2386 SET_DECL_VALUE_EXPR (new_var
, x
);
2387 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2389 /* ??? If VAR is not passed by reference, and the variable
2390 hasn't been initialized yet, then we'll get a warning for
2391 the store into the omp_data_s structure. Ideally, we'd be
2392 able to notice this and not store anything at all, but
2393 we're generating code too early. Suppress the warning. */
2395 TREE_NO_WARNING (var
) = 1;
2398 case OMP_CLAUSE_LASTPRIVATE
:
2399 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2403 case OMP_CLAUSE_PRIVATE
:
2404 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2405 x
= build_outer_var_ref (var
, ctx
);
2406 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2408 if (is_task_ctx (ctx
))
2409 x
= build_receiver_ref (var
, false, ctx
);
2411 x
= build_outer_var_ref (var
, ctx
);
2415 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2417 gimplify_and_add (x
, ilist
);
2421 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2424 gimple_seq tseq
= NULL
;
2427 gimplify_stmt (&dtor
, &tseq
);
2428 gsi_insert_seq_before (&diter
, tseq
, GSI_SAME_STMT
);
2432 case OMP_CLAUSE_FIRSTPRIVATE
:
2433 if (is_task_ctx (ctx
))
2435 if (is_reference (var
) || is_variable_sized (var
))
2437 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2439 || use_pointer_for_field (var
, NULL
))
2441 x
= build_receiver_ref (var
, false, ctx
);
2442 SET_DECL_VALUE_EXPR (new_var
, x
);
2443 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2447 x
= build_outer_var_ref (var
, ctx
);
2448 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2449 gimplify_and_add (x
, ilist
);
2453 case OMP_CLAUSE_COPYIN
:
2454 by_ref
= use_pointer_for_field (var
, NULL
);
2455 x
= build_receiver_ref (var
, by_ref
, ctx
);
2456 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2457 append_to_statement_list (x
, ©in_seq
);
2458 copyin_by_ref
|= by_ref
;
2461 case OMP_CLAUSE_REDUCTION
:
2462 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2464 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2465 x
= build_outer_var_ref (var
, ctx
);
2467 if (is_reference (var
))
2468 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2469 SET_DECL_VALUE_EXPR (placeholder
, x
);
2470 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2471 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2472 gimple_seq_add_seq (ilist
,
2473 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2474 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2475 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2479 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2480 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2481 gimplify_assign (new_var
, x
, ilist
);
2491 /* The copyin sequence is not to be executed by the main thread, since
2492 that would result in self-copies. Perhaps not visible to scalars,
2493 but it certainly is to C++ operator=. */
2496 x
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
),
2498 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2499 build_int_cst (TREE_TYPE (x
), 0));
2500 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2501 gimplify_and_add (x
, ilist
);
2504 /* If any copyin variable is passed by reference, we must ensure the
2505 master thread doesn't modify it before it is copied over in all
2506 threads. Similarly for variables in both firstprivate and
2507 lastprivate clauses we need to ensure the lastprivate copying
2508 happens after firstprivate copying in all threads. */
2509 if (copyin_by_ref
|| lastprivate_firstprivate
)
2510 gimplify_and_add (build_omp_barrier (), ilist
);
2514 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2515 both parallel and workshare constructs. PREDICATE may be NULL if it's
2519 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2522 tree x
, c
, label
= NULL
;
2523 bool par_clauses
= false;
2525 /* Early exit if there are no lastprivate clauses. */
2526 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
2527 if (clauses
== NULL
)
2529 /* If this was a workshare clause, see if it had been combined
2530 with its parallel. In that case, look for the clauses on the
2531 parallel statement itself. */
2532 if (is_parallel_ctx (ctx
))
2536 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2539 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2540 OMP_CLAUSE_LASTPRIVATE
);
2541 if (clauses
== NULL
)
2549 tree label_true
, arm1
, arm2
;
2551 label
= create_artificial_label (UNKNOWN_LOCATION
);
2552 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2553 arm1
= TREE_OPERAND (predicate
, 0);
2554 arm2
= TREE_OPERAND (predicate
, 1);
2555 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2556 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2557 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2559 gimple_seq_add_stmt (stmt_list
, stmt
);
2560 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2563 for (c
= clauses
; c
;)
2566 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2568 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2570 var
= OMP_CLAUSE_DECL (c
);
2571 new_var
= lookup_decl (var
, ctx
);
2573 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2575 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2576 gimple_seq_add_seq (stmt_list
,
2577 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2579 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2581 x
= build_outer_var_ref (var
, ctx
);
2582 if (is_reference (var
))
2583 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2584 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2585 gimplify_and_add (x
, stmt_list
);
2587 c
= OMP_CLAUSE_CHAIN (c
);
2588 if (c
== NULL
&& !par_clauses
)
2590 /* If this was a workshare clause, see if it had been combined
2591 with its parallel. In that case, continue looking for the
2592 clauses also on the parallel statement itself. */
2593 if (is_parallel_ctx (ctx
))
2597 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2600 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2601 OMP_CLAUSE_LASTPRIVATE
);
2607 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
2611 /* Generate code to implement the REDUCTION clauses. */
2614 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
2616 gimple_seq sub_seq
= NULL
;
2621 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2622 update in that case, otherwise use a lock. */
2623 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
2624 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
2626 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2628 /* Never use OMP_ATOMIC for array reductions. */
2638 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2640 tree var
, ref
, new_var
;
2641 enum tree_code code
;
2642 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2644 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
2647 var
= OMP_CLAUSE_DECL (c
);
2648 new_var
= lookup_decl (var
, ctx
);
2649 if (is_reference (var
))
2650 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2651 ref
= build_outer_var_ref (var
, ctx
);
2652 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2654 /* reduction(-:var) sums up the partial results, so it acts
2655 identically to reduction(+:var). */
2656 if (code
== MINUS_EXPR
)
2661 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
2663 addr
= save_expr (addr
);
2664 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
2665 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
2666 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
2667 gimplify_and_add (x
, stmt_seqp
);
2671 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2673 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2675 if (is_reference (var
))
2676 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
2677 SET_DECL_VALUE_EXPR (placeholder
, ref
);
2678 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2679 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2680 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
2681 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
2682 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
2686 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
2687 ref
= build_outer_var_ref (var
, ctx
);
2688 gimplify_assign (ref
, x
, &sub_seq
);
2692 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
),
2694 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2696 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
2698 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
),
2700 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2704 /* Generate code to implement the COPYPRIVATE clauses. */
2707 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
2712 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2714 tree var
, new_var
, ref
, x
;
2716 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2718 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2721 var
= OMP_CLAUSE_DECL (c
);
2722 by_ref
= use_pointer_for_field (var
, NULL
);
2724 ref
= build_sender_ref (var
, ctx
);
2725 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
2728 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
2729 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
2731 gimplify_assign (ref
, x
, slist
);
2733 ref
= build_receiver_ref (var
, false, ctx
);
2736 ref
= fold_convert_loc (clause_loc
,
2737 build_pointer_type (TREE_TYPE (new_var
)),
2739 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2741 if (is_reference (var
))
2743 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
2744 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
2745 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2747 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
2748 gimplify_and_add (x
, rlist
);
2753 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2754 and REDUCTION from the sender (aka parent) side. */
2757 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
2762 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2764 tree val
, ref
, x
, var
;
2765 bool by_ref
, do_in
= false, do_out
= false;
2766 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2768 switch (OMP_CLAUSE_CODE (c
))
2770 case OMP_CLAUSE_PRIVATE
:
2771 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2774 case OMP_CLAUSE_FIRSTPRIVATE
:
2775 case OMP_CLAUSE_COPYIN
:
2776 case OMP_CLAUSE_LASTPRIVATE
:
2777 case OMP_CLAUSE_REDUCTION
:
2783 val
= OMP_CLAUSE_DECL (c
);
2784 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2786 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2787 && is_global_var (var
))
2789 if (is_variable_sized (val
))
2791 by_ref
= use_pointer_for_field (val
, NULL
);
2793 switch (OMP_CLAUSE_CODE (c
))
2795 case OMP_CLAUSE_PRIVATE
:
2796 case OMP_CLAUSE_FIRSTPRIVATE
:
2797 case OMP_CLAUSE_COPYIN
:
2801 case OMP_CLAUSE_LASTPRIVATE
:
2802 if (by_ref
|| is_reference (val
))
2804 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2811 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
2816 case OMP_CLAUSE_REDUCTION
:
2818 do_out
= !(by_ref
|| is_reference (val
));
2827 ref
= build_sender_ref (val
, ctx
);
2828 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
2829 gimplify_assign (ref
, x
, ilist
);
2830 if (is_task_ctx (ctx
))
2831 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
2836 ref
= build_sender_ref (val
, ctx
);
2837 gimplify_assign (var
, ref
, olist
);
2842 /* Generate code to implement SHARED from the sender (aka parent)
2843 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2844 list things that got automatically shared. */
2847 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
2849 tree var
, ovar
, nvar
, f
, x
, record_type
;
2851 if (ctx
->record_type
== NULL
)
2854 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
2855 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
2857 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2858 nvar
= maybe_lookup_decl (ovar
, ctx
);
2859 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2862 /* If CTX is a nested parallel directive. Find the immediately
2863 enclosing parallel or workshare construct that contains a
2864 mapping for OVAR. */
2865 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2867 if (use_pointer_for_field (ovar
, ctx
))
2869 x
= build_sender_ref (ovar
, ctx
);
2870 var
= build_fold_addr_expr (var
);
2871 gimplify_assign (x
, var
, ilist
);
2875 x
= build_sender_ref (ovar
, ctx
);
2876 gimplify_assign (x
, var
, ilist
);
2878 if (!TREE_READONLY (var
)
2879 /* We don't need to receive a new reference to a result
2880 or parm decl. In fact we may not store to it as we will
2881 invalidate any pending RSO and generate wrong gimple
2883 && !((TREE_CODE (var
) == RESULT_DECL
2884 || TREE_CODE (var
) == PARM_DECL
)
2885 && DECL_BY_REFERENCE (var
)))
2887 x
= build_sender_ref (ovar
, ctx
);
2888 gimplify_assign (var
, x
, olist
);
2895 /* A convenience function to build an empty GIMPLE_COND with just the
2899 gimple_build_cond_empty (tree cond
)
2901 enum tree_code pred_code
;
2904 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
2905 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
2909 /* Build the function calls to GOMP_parallel_start etc to actually
2910 generate the parallel operation. REGION is the parallel region
2911 being expanded. BB is the block where to insert the code. WS_ARGS
2912 will be set if this is a call to a combined parallel+workshare
2913 construct, it contains the list of additional arguments needed by
2914 the workshare construct. */
2917 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2918 gimple entry_stmt
, VEC(tree
,gc
) *ws_args
)
2920 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2921 gimple_stmt_iterator gsi
;
2923 enum built_in_function start_ix
;
2925 location_t clause_loc
;
2928 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
2930 /* Determine what flavor of GOMP_parallel_start we will be
2932 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2933 if (is_combined_parallel (region
))
2935 switch (region
->inner
->type
)
2937 case GIMPLE_OMP_FOR
:
2938 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
2939 start_ix2
= ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2940 + (region
->inner
->sched_kind
2941 == OMP_CLAUSE_SCHEDULE_RUNTIME
2942 ? 3 : region
->inner
->sched_kind
));
2943 start_ix
= (enum built_in_function
)start_ix2
;
2945 case GIMPLE_OMP_SECTIONS
:
2946 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2953 /* By default, the value of NUM_THREADS is zero (selected at run time)
2954 and there is no conditional. */
2956 val
= build_int_cst (unsigned_type_node
, 0);
2958 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2960 cond
= OMP_CLAUSE_IF_EXPR (c
);
2962 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2965 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2966 clause_loc
= OMP_CLAUSE_LOCATION (c
);
2969 clause_loc
= gimple_location (entry_stmt
);
2971 /* Ensure 'val' is of the correct type. */
2972 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
2974 /* If we found the clause 'if (cond)', build either
2975 (cond != 0) or (cond ? val : 1u). */
2978 gimple_stmt_iterator gsi
;
2980 cond
= gimple_boolify (cond
);
2982 if (integer_zerop (val
))
2983 val
= fold_build2_loc (clause_loc
,
2984 EQ_EXPR
, unsigned_type_node
, cond
,
2985 build_int_cst (TREE_TYPE (cond
), 0));
2988 basic_block cond_bb
, then_bb
, else_bb
;
2989 edge e
, e_then
, e_else
;
2990 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
2992 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
2993 if (gimple_in_ssa_p (cfun
))
2995 tmp_then
= make_ssa_name (tmp_var
, NULL
);
2996 tmp_else
= make_ssa_name (tmp_var
, NULL
);
2997 tmp_join
= make_ssa_name (tmp_var
, NULL
);
3006 e
= split_block (bb
, NULL
);
3011 then_bb
= create_empty_bb (cond_bb
);
3012 else_bb
= create_empty_bb (then_bb
);
3013 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
3014 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
3016 stmt
= gimple_build_cond_empty (cond
);
3017 gsi
= gsi_start_bb (cond_bb
);
3018 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3020 gsi
= gsi_start_bb (then_bb
);
3021 stmt
= gimple_build_assign (tmp_then
, val
);
3022 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3024 gsi
= gsi_start_bb (else_bb
);
3025 stmt
= gimple_build_assign
3026 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
3027 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3029 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3030 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3031 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3032 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3034 if (gimple_in_ssa_p (cfun
))
3036 gimple phi
= create_phi_node (tmp_join
, bb
);
3037 SSA_NAME_DEF_STMT (tmp_join
) = phi
;
3038 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3039 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3045 gsi
= gsi_start_bb (bb
);
3046 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3047 false, GSI_CONTINUE_LINKING
);
3050 gsi
= gsi_last_bb (bb
);
3051 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3053 t1
= null_pointer_node
;
3055 t1
= build_fold_addr_expr (t
);
3056 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3058 args
= VEC_alloc (tree
, gc
, 3 + VEC_length (tree
, ws_args
));
3059 VEC_quick_push (tree
, args
, t2
);
3060 VEC_quick_push (tree
, args
, t1
);
3061 VEC_quick_push (tree
, args
, val
);
3062 VEC_splice (tree
, args
, ws_args
);
3064 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
3065 builtin_decl_explicit (start_ix
), args
);
3067 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3068 false, GSI_CONTINUE_LINKING
);
3070 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3072 t
= null_pointer_node
;
3074 t
= build_fold_addr_expr (t
);
3075 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3076 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3077 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3078 false, GSI_CONTINUE_LINKING
);
3080 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3081 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END
),
3083 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3084 false, GSI_CONTINUE_LINKING
);
3088 /* Build the function call to GOMP_task to actually
3089 generate the task operation. BB is the block where to insert the code. */
3092 expand_task_call (basic_block bb
, gimple entry_stmt
)
3094 tree t
, t1
, t2
, t3
, flags
, cond
, c
, c2
, clauses
;
3095 gimple_stmt_iterator gsi
;
3096 location_t loc
= gimple_location (entry_stmt
);
3098 clauses
= gimple_omp_task_clauses (entry_stmt
);
3100 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3102 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3104 cond
= boolean_true_node
;
3106 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3107 c2
= find_omp_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
3108 flags
= build_int_cst (unsigned_type_node
,
3109 (c
? 1 : 0) + (c2
? 4 : 0));
3111 c
= find_omp_clause (clauses
, OMP_CLAUSE_FINAL
);
3114 c
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c
));
3115 c
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, c
,
3116 build_int_cst (unsigned_type_node
, 2),
3117 build_int_cst (unsigned_type_node
, 0));
3118 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, c
);
3121 gsi
= gsi_last_bb (bb
);
3122 t
= gimple_omp_task_data_arg (entry_stmt
);
3124 t2
= null_pointer_node
;
3126 t2
= build_fold_addr_expr_loc (loc
, t
);
3127 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3128 t
= gimple_omp_task_copy_fn (entry_stmt
);
3130 t3
= null_pointer_node
;
3132 t3
= build_fold_addr_expr_loc (loc
, t
);
3134 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
3136 gimple_omp_task_arg_size (entry_stmt
),
3137 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3139 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3140 false, GSI_CONTINUE_LINKING
);
3144 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3145 catch handler and return it. This prevents programs from violating the
3146 structured block semantics with throws. */
3149 maybe_catch_exception (gimple_seq body
)
3154 if (!flag_exceptions
)
3157 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
3158 decl
= lang_hooks
.eh_protect_cleanup_actions ();
3160 decl
= builtin_decl_explicit (BUILT_IN_TRAP
);
3162 g
= gimple_build_eh_must_not_throw (decl
);
3163 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3166 return gimple_seq_alloc_with_stmt (g
);
3169 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3172 vec2chain (VEC(tree
,gc
) *v
)
3174 tree chain
= NULL_TREE
, t
;
3177 FOR_EACH_VEC_ELT_REVERSE (tree
, v
, ix
, t
)
3179 DECL_CHAIN (t
) = chain
;
3187 /* Remove barriers in REGION->EXIT's block. Note that this is only
3188 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3189 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3190 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3194 remove_exit_barrier (struct omp_region
*region
)
3196 gimple_stmt_iterator gsi
;
3197 basic_block exit_bb
;
3201 int any_addressable_vars
= -1;
3203 exit_bb
= region
->exit
;
3205 /* If the parallel region doesn't return, we don't have REGION->EXIT
3210 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3211 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3212 statements that can appear in between are extremely limited -- no
3213 memory operations at all. Here, we allow nothing at all, so the
3214 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3215 gsi
= gsi_last_bb (exit_bb
);
3216 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3218 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3221 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3223 gsi
= gsi_last_bb (e
->src
);
3224 if (gsi_end_p (gsi
))
3226 stmt
= gsi_stmt (gsi
);
3227 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3228 && !gimple_omp_return_nowait_p (stmt
))
3230 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3231 in many cases. If there could be tasks queued, the barrier
3232 might be needed to let the tasks run before some local
3233 variable of the parallel that the task uses as shared
3234 runs out of scope. The task can be spawned either
3235 from within current function (this would be easy to check)
3236 or from some function it calls and gets passed an address
3237 of such a variable. */
3238 if (any_addressable_vars
< 0)
3240 gimple parallel_stmt
= last_stmt (region
->entry
);
3241 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3242 tree local_decls
, block
, decl
;
3245 any_addressable_vars
= 0;
3246 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
3247 if (TREE_ADDRESSABLE (decl
))
3249 any_addressable_vars
= 1;
3252 for (block
= gimple_block (stmt
);
3253 !any_addressable_vars
3255 && TREE_CODE (block
) == BLOCK
;
3256 block
= BLOCK_SUPERCONTEXT (block
))
3258 for (local_decls
= BLOCK_VARS (block
);
3260 local_decls
= DECL_CHAIN (local_decls
))
3261 if (TREE_ADDRESSABLE (local_decls
))
3263 any_addressable_vars
= 1;
3266 if (block
== gimple_block (parallel_stmt
))
3270 if (!any_addressable_vars
)
3271 gimple_omp_return_set_nowait (stmt
);
3277 remove_exit_barriers (struct omp_region
*region
)
3279 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3280 remove_exit_barrier (region
);
3284 region
= region
->inner
;
3285 remove_exit_barriers (region
);
3286 while (region
->next
)
3288 region
= region
->next
;
3289 remove_exit_barriers (region
);
3294 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3295 calls. These can't be declared as const functions, but
3296 within one parallel body they are constant, so they can be
3297 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3298 which are declared const. Similarly for task body, except
3299 that in untied task omp_get_thread_num () can change at any task
3300 scheduling point. */
3303 optimize_omp_library_calls (gimple entry_stmt
)
3306 gimple_stmt_iterator gsi
;
3307 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3308 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
3309 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3310 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
3311 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3312 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3313 OMP_CLAUSE_UNTIED
) != NULL
);
3316 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3318 gimple call
= gsi_stmt (gsi
);
3321 if (is_gimple_call (call
)
3322 && (decl
= gimple_call_fndecl (call
))
3323 && DECL_EXTERNAL (decl
)
3324 && TREE_PUBLIC (decl
)
3325 && DECL_INITIAL (decl
) == NULL
)
3329 if (DECL_NAME (decl
) == thr_num_id
)
3331 /* In #pragma omp task untied omp_get_thread_num () can change
3332 during the execution of the task region. */
3335 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3337 else if (DECL_NAME (decl
) == num_thr_id
)
3338 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3342 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3343 || gimple_call_num_args (call
) != 0)
3346 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3349 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3350 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3351 TREE_TYPE (TREE_TYPE (built_in
))))
3354 gimple_call_set_fndecl (call
, built_in
);
3359 /* Expand the OpenMP parallel or task directive starting at REGION. */
3362 expand_omp_taskreg (struct omp_region
*region
)
3364 basic_block entry_bb
, exit_bb
, new_bb
;
3365 struct function
*child_cfun
;
3366 tree child_fn
, block
, t
;
3368 gimple_stmt_iterator gsi
;
3369 gimple entry_stmt
, stmt
;
3371 VEC(tree
,gc
) *ws_args
;
3373 entry_stmt
= last_stmt (region
->entry
);
3374 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3375 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3376 /* If this function has been already instrumented, make sure
3377 the child function isn't instrumented again. */
3378 child_cfun
->after_tree_profile
= cfun
->after_tree_profile
;
3380 entry_bb
= region
->entry
;
3381 exit_bb
= region
->exit
;
3383 if (is_combined_parallel (region
))
3384 ws_args
= region
->ws_args
;
3388 if (child_cfun
->cfg
)
3390 /* Due to inlining, it may happen that we have already outlined
3391 the region, in which case all we need to do is make the
3392 sub-graph unreachable and emit the parallel call. */
3393 edge entry_succ_e
, exit_succ_e
;
3394 gimple_stmt_iterator gsi
;
3396 entry_succ_e
= single_succ_edge (entry_bb
);
3398 gsi
= gsi_last_bb (entry_bb
);
3399 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3400 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3401 gsi_remove (&gsi
, true);
3406 exit_succ_e
= single_succ_edge (exit_bb
);
3407 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3409 remove_edge_and_dominated_blocks (entry_succ_e
);
3413 unsigned srcidx
, dstidx
, num
;
3415 /* If the parallel region needs data sent from the parent
3416 function, then the very first statement (except possible
3417 tree profile counter updates) of the parallel body
3418 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3419 &.OMP_DATA_O is passed as an argument to the child function,
3420 we need to replace it with the argument as seen by the child
3423 In most cases, this will end up being the identity assignment
3424 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3425 a function call that has been inlined, the original PARM_DECL
3426 .OMP_DATA_I may have been converted into a different local
3427 variable. In which case, we need to keep the assignment. */
3428 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3430 basic_block entry_succ_bb
= single_succ (entry_bb
);
3431 gimple_stmt_iterator gsi
;
3433 gimple parcopy_stmt
= NULL
;
3435 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3439 gcc_assert (!gsi_end_p (gsi
));
3440 stmt
= gsi_stmt (gsi
);
3441 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3444 if (gimple_num_ops (stmt
) == 2)
3446 tree arg
= gimple_assign_rhs1 (stmt
);
3448 /* We're ignore the subcode because we're
3449 effectively doing a STRIP_NOPS. */
3451 if (TREE_CODE (arg
) == ADDR_EXPR
3452 && TREE_OPERAND (arg
, 0)
3453 == gimple_omp_taskreg_data_arg (entry_stmt
))
3455 parcopy_stmt
= stmt
;
3461 gcc_assert (parcopy_stmt
!= NULL
);
3462 arg
= DECL_ARGUMENTS (child_fn
);
3464 if (!gimple_in_ssa_p (cfun
))
3466 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3467 gsi_remove (&gsi
, true);
3470 /* ?? Is setting the subcode really necessary ?? */
3471 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3472 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3477 /* If we are in ssa form, we must load the value from the default
3478 definition of the argument. That should not be defined now,
3479 since the argument is not used uninitialized. */
3480 gcc_assert (gimple_default_def (cfun
, arg
) == NULL
);
3481 narg
= make_ssa_name (arg
, gimple_build_nop ());
3482 set_default_def (arg
, narg
);
3483 /* ?? Is setting the subcode really necessary ?? */
3484 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3485 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3486 update_stmt (parcopy_stmt
);
3490 /* Declare local variables needed in CHILD_CFUN. */
3491 block
= DECL_INITIAL (child_fn
);
3492 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
3493 /* The gimplifier could record temporaries in parallel/task block
3494 rather than in containing function's local_decls chain,
3495 which would mean cgraph missed finalizing them. Do it now. */
3496 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
3497 if (TREE_CODE (t
) == VAR_DECL
3499 && !DECL_EXTERNAL (t
))
3500 varpool_finalize_decl (t
);
3501 DECL_SAVED_TREE (child_fn
) = NULL
;
3502 gimple_set_body (child_fn
, bb_seq (single_succ (entry_bb
)));
3503 TREE_USED (block
) = 1;
3505 /* Reset DECL_CONTEXT on function arguments. */
3506 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
3507 DECL_CONTEXT (t
) = child_fn
;
3509 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3510 so that it can be moved to the child function. */
3511 gsi
= gsi_last_bb (entry_bb
);
3512 stmt
= gsi_stmt (gsi
);
3513 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3514 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3515 gsi_remove (&gsi
, true);
3516 e
= split_block (entry_bb
, stmt
);
3518 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3520 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3523 gsi
= gsi_last_bb (exit_bb
);
3524 gcc_assert (!gsi_end_p (gsi
)
3525 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3526 stmt
= gimple_build_return (NULL
);
3527 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3528 gsi_remove (&gsi
, true);
3531 /* Move the parallel region into CHILD_CFUN. */
3533 if (gimple_in_ssa_p (cfun
))
3535 push_cfun (child_cfun
);
3536 init_tree_ssa (child_cfun
);
3537 init_ssa_operands ();
3538 cfun
->gimple_df
->in_ssa_p
= true;
3543 block
= gimple_block (entry_stmt
);
3545 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3547 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3549 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3550 num
= VEC_length (tree
, child_cfun
->local_decls
);
3551 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
3553 t
= VEC_index (tree
, child_cfun
->local_decls
, srcidx
);
3554 if (DECL_CONTEXT (t
) == cfun
->decl
)
3556 if (srcidx
!= dstidx
)
3557 VEC_replace (tree
, child_cfun
->local_decls
, dstidx
, t
);
3561 VEC_truncate (tree
, child_cfun
->local_decls
, dstidx
);
3563 /* Inform the callgraph about the new function. */
3564 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
3565 = cfun
->curr_properties
;
3566 cgraph_add_new_function (child_fn
, true);
3568 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3569 fixed in a following pass. */
3570 push_cfun (child_cfun
);
3571 save_current
= current_function_decl
;
3572 current_function_decl
= child_fn
;
3574 optimize_omp_library_calls (entry_stmt
);
3575 rebuild_cgraph_edges ();
3577 /* Some EH regions might become dead, see PR34608. If
3578 pass_cleanup_cfg isn't the first pass to happen with the
3579 new child, these dead EH edges might cause problems.
3580 Clean them up now. */
3581 if (flag_exceptions
)
3584 bool changed
= false;
3587 changed
|= gimple_purge_dead_eh_edges (bb
);
3589 cleanup_tree_cfg ();
3591 if (gimple_in_ssa_p (cfun
))
3592 update_ssa (TODO_update_ssa
);
3593 current_function_decl
= save_current
;
3597 /* Emit a library call to launch the children threads. */
3598 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
3599 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
3601 expand_task_call (new_bb
, entry_stmt
);
3602 update_ssa (TODO_update_ssa_only_virtuals
);
3606 /* A subroutine of expand_omp_for. Generate code for a parallel
3607 loop with any schedule. Given parameters:
3609 for (V = N1; V cond N2; V += STEP) BODY;
3611 where COND is "<" or ">", we generate pseudocode
3613 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3614 if (more) goto L0; else goto L3;
3621 if (V cond iend) goto L1; else goto L2;
3623 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3626 If this is a combined omp parallel loop, instead of the call to
3627 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3629 For collapsed loops, given parameters:
3631 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3632 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3633 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3636 we generate pseudocode
3642 count3 = (adj + N32 - N31) / STEP3;
3647 count2 = (adj + N22 - N21) / STEP2;
3652 count1 = (adj + N12 - N11) / STEP1;
3653 count = count1 * count2 * count3;
3654 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3655 if (more) goto L0; else goto L3;
3659 V3 = N31 + (T % count3) * STEP3;
3661 V2 = N21 + (T % count2) * STEP2;
3663 V1 = N11 + T * STEP1;
3668 if (V < iend) goto L10; else goto L2;
3671 if (V3 cond3 N32) goto L1; else goto L11;
3675 if (V2 cond2 N22) goto L1; else goto L12;
3681 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3687 expand_omp_for_generic (struct omp_region
*region
,
3688 struct omp_for_data
*fd
,
3689 enum built_in_function start_fn
,
3690 enum built_in_function next_fn
)
3692 tree type
, istart0
, iend0
, iend
;
3693 tree t
, vmain
, vback
, bias
= NULL_TREE
;
3694 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
3695 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
3696 gimple_stmt_iterator gsi
;
3698 bool in_combined_parallel
= is_combined_parallel (region
);
3699 bool broken_loop
= region
->cont
== NULL
;
3701 tree
*counts
= NULL
;
3704 gcc_assert (!broken_loop
|| !in_combined_parallel
);
3705 gcc_assert (fd
->iter_type
== long_integer_type_node
3706 || !in_combined_parallel
);
3708 type
= TREE_TYPE (fd
->loop
.v
);
3709 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
3710 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
3711 TREE_ADDRESSABLE (istart0
) = 1;
3712 TREE_ADDRESSABLE (iend0
) = 1;
3713 if (gimple_in_ssa_p (cfun
))
3715 add_referenced_var (istart0
);
3716 add_referenced_var (iend0
);
3719 /* See if we need to bias by LLONG_MIN. */
3720 if (fd
->iter_type
== long_long_unsigned_type_node
3721 && TREE_CODE (type
) == INTEGER_TYPE
3722 && !TYPE_UNSIGNED (type
))
3726 if (fd
->loop
.cond_code
== LT_EXPR
)
3729 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3733 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3736 if (TREE_CODE (n1
) != INTEGER_CST
3737 || TREE_CODE (n2
) != INTEGER_CST
3738 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
3739 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
3742 entry_bb
= region
->entry
;
3743 cont_bb
= region
->cont
;
3745 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3746 gcc_assert (broken_loop
3747 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
3748 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3749 l1_bb
= single_succ (l0_bb
);
3752 l2_bb
= create_empty_bb (cont_bb
);
3753 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
3754 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3758 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3759 exit_bb
= region
->exit
;
3761 gsi
= gsi_last_bb (entry_bb
);
3763 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3764 if (fd
->collapse
> 1)
3766 /* collapsed loops need work for expansion in SSA form. */
3767 gcc_assert (!gimple_in_ssa_p (cfun
));
3768 counts
= (tree
*) alloca (fd
->collapse
* sizeof (tree
));
3769 for (i
= 0; i
< fd
->collapse
; i
++)
3771 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
3773 if (POINTER_TYPE_P (itype
))
3774 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
3775 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
3777 t
= fold_build2 (PLUS_EXPR
, itype
,
3778 fold_convert (itype
, fd
->loops
[i
].step
), t
);
3779 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
3780 fold_convert (itype
, fd
->loops
[i
].n2
));
3781 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
3782 fold_convert (itype
, fd
->loops
[i
].n1
));
3783 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
3784 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3785 fold_build1 (NEGATE_EXPR
, itype
, t
),
3786 fold_build1 (NEGATE_EXPR
, itype
,
3787 fold_convert (itype
,
3788 fd
->loops
[i
].step
)));
3790 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
3791 fold_convert (itype
, fd
->loops
[i
].step
));
3792 t
= fold_convert (type
, t
);
3793 if (TREE_CODE (t
) == INTEGER_CST
)
3797 counts
[i
] = create_tmp_var (type
, ".count");
3798 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3799 true, GSI_SAME_STMT
);
3800 stmt
= gimple_build_assign (counts
[i
], t
);
3801 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3803 if (SSA_VAR_P (fd
->loop
.n2
))
3809 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
3810 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3811 true, GSI_SAME_STMT
);
3813 stmt
= gimple_build_assign (fd
->loop
.n2
, t
);
3814 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3818 if (in_combined_parallel
)
3820 /* In a combined parallel loop, emit a call to
3821 GOMP_loop_foo_next. */
3822 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
3823 build_fold_addr_expr (istart0
),
3824 build_fold_addr_expr (iend0
));
3828 tree t0
, t1
, t2
, t3
, t4
;
3829 /* If this is not a combined parallel loop, emit a call to
3830 GOMP_loop_foo_start in ENTRY_BB. */
3831 t4
= build_fold_addr_expr (iend0
);
3832 t3
= build_fold_addr_expr (istart0
);
3833 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
3834 if (POINTER_TYPE_P (type
)
3835 && TYPE_PRECISION (type
) != TYPE_PRECISION (fd
->iter_type
))
3837 /* Avoid casting pointers to integer of a different size. */
3839 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
3840 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n2
));
3841 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n1
));
3845 t1
= fold_convert (fd
->iter_type
, fd
->loop
.n2
);
3846 t0
= fold_convert (fd
->iter_type
, fd
->loop
.n1
);
3850 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
3851 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
3853 if (fd
->iter_type
== long_integer_type_node
)
3857 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3858 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3859 6, t0
, t1
, t2
, t
, t3
, t4
);
3862 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3863 5, t0
, t1
, t2
, t3
, t4
);
3871 /* The GOMP_loop_ull_*start functions have additional boolean
3872 argument, true for < loops and false for > loops.
3873 In Fortran, the C bool type can be different from
3874 boolean_type_node. */
3875 bfn_decl
= builtin_decl_explicit (start_fn
);
3876 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
3877 t5
= build_int_cst (c_bool_type
,
3878 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
3881 tree bfn_decl
= builtin_decl_explicit (start_fn
);
3882 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3883 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
3886 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3887 6, t5
, t0
, t1
, t2
, t3
, t4
);
3890 if (TREE_TYPE (t
) != boolean_type_node
)
3891 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3892 t
, build_int_cst (TREE_TYPE (t
), 0));
3893 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3894 true, GSI_SAME_STMT
);
3895 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3897 /* Remove the GIMPLE_OMP_FOR statement. */
3898 gsi_remove (&gsi
, true);
3900 /* Iteration setup for sequential loop goes in L0_BB. */
3901 gsi
= gsi_start_bb (l0_bb
);
3904 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3905 if (POINTER_TYPE_P (type
))
3906 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3908 t
= fold_convert (type
, t
);
3909 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3910 false, GSI_CONTINUE_LINKING
);
3911 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
3912 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3916 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3917 if (POINTER_TYPE_P (type
))
3918 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3920 t
= fold_convert (type
, t
);
3921 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3922 false, GSI_CONTINUE_LINKING
);
3923 if (fd
->collapse
> 1)
3925 tree tem
= create_tmp_var (type
, ".tem");
3927 stmt
= gimple_build_assign (tem
, fd
->loop
.v
);
3928 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3929 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3931 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
;
3933 if (POINTER_TYPE_P (vtype
))
3934 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (vtype
), 0);
3935 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
3936 t
= fold_convert (itype
, t
);
3937 t
= fold_build2 (MULT_EXPR
, itype
, t
,
3938 fold_convert (itype
, fd
->loops
[i
].step
));
3939 if (POINTER_TYPE_P (vtype
))
3940 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
3942 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
3943 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3944 false, GSI_CONTINUE_LINKING
);
3945 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3946 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3949 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
3950 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3951 false, GSI_CONTINUE_LINKING
);
3952 stmt
= gimple_build_assign (tem
, t
);
3953 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3960 /* Code to control the increment and predicate for the sequential
3961 loop goes in the CONT_BB. */
3962 gsi
= gsi_last_bb (cont_bb
);
3963 stmt
= gsi_stmt (gsi
);
3964 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
3965 vmain
= gimple_omp_continue_control_use (stmt
);
3966 vback
= gimple_omp_continue_control_def (stmt
);
3968 if (POINTER_TYPE_P (type
))
3969 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
3971 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3972 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3973 true, GSI_SAME_STMT
);
3974 stmt
= gimple_build_assign (vback
, t
);
3975 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3977 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, iend
);
3978 stmt
= gimple_build_cond_empty (t
);
3979 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3981 /* Remove GIMPLE_OMP_CONTINUE. */
3982 gsi_remove (&gsi
, true);
3984 if (fd
->collapse
> 1)
3986 basic_block last_bb
, bb
;
3989 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3991 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
3993 bb
= create_empty_bb (last_bb
);
3994 gsi
= gsi_start_bb (bb
);
3996 if (i
< fd
->collapse
- 1)
3998 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
3999 e
->probability
= REG_BR_PROB_BASE
/ 8;
4001 t
= fd
->loops
[i
+ 1].n1
;
4002 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4003 false, GSI_CONTINUE_LINKING
);
4004 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
4005 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4010 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
4012 if (POINTER_TYPE_P (vtype
))
4013 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
4015 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
,
4017 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4018 false, GSI_CONTINUE_LINKING
);
4019 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4020 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4024 t
= fd
->loops
[i
].n2
;
4025 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4026 false, GSI_CONTINUE_LINKING
);
4027 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4029 stmt
= gimple_build_cond_empty (t
);
4030 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4031 e
= make_edge (bb
, l1_bb
, EDGE_TRUE_VALUE
);
4032 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4035 make_edge (bb
, l1_bb
, EDGE_FALLTHRU
);
4040 /* Emit code to get the next parallel iteration in L2_BB. */
4041 gsi
= gsi_start_bb (l2_bb
);
4043 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
4044 build_fold_addr_expr (istart0
),
4045 build_fold_addr_expr (iend0
));
4046 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4047 false, GSI_CONTINUE_LINKING
);
4048 if (TREE_TYPE (t
) != boolean_type_node
)
4049 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4050 t
, build_int_cst (TREE_TYPE (t
), 0));
4051 stmt
= gimple_build_cond_empty (t
);
4052 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4055 /* Add the loop cleanup function. */
4056 gsi
= gsi_last_bb (exit_bb
);
4057 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4058 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
4060 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
4061 stmt
= gimple_build_call (t
, 0);
4062 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4063 gsi_remove (&gsi
, true);
4065 /* Connect the new blocks. */
4066 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4067 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4073 e
= find_edge (cont_bb
, l3_bb
);
4074 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4076 phis
= phi_nodes (l3_bb
);
4077 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4079 gimple phi
= gsi_stmt (gsi
);
4080 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4081 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4085 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4086 if (fd
->collapse
> 1)
4088 e
= find_edge (cont_bb
, l1_bb
);
4090 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4094 e
= find_edge (cont_bb
, l1_bb
);
4095 e
->flags
= EDGE_TRUE_VALUE
;
4097 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4098 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4099 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4101 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4102 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4103 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4104 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4105 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4106 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4107 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4108 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4113 /* A subroutine of expand_omp_for. Generate code for a parallel
4114 loop with static schedule and no specified chunk size. Given
4117 for (V = N1; V cond N2; V += STEP) BODY;
4119 where COND is "<" or ">", we generate pseudocode
4125 if ((__typeof (V)) -1 > 0 && cond is >)
4126 n = -(adj + N2 - N1) / -STEP;
4128 n = (adj + N2 - N1) / STEP;
4131 if (threadid < tt) goto L3; else goto L4;
4136 s0 = q * threadid + tt;
4139 if (s0 >= e0) goto L2; else goto L0;
4145 if (V cond e) goto L1;
4150 expand_omp_for_static_nochunk (struct omp_region
*region
,
4151 struct omp_for_data
*fd
)
4153 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
4154 tree type
, itype
, vmain
, vback
;
4155 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
4156 basic_block body_bb
, cont_bb
;
4158 gimple_stmt_iterator gsi
;
4162 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4163 if (POINTER_TYPE_P (type
))
4164 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4166 entry_bb
= region
->entry
;
4167 cont_bb
= region
->cont
;
4168 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4169 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4170 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4171 body_bb
= single_succ (seq_start_bb
);
4172 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4173 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4174 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4175 exit_bb
= region
->exit
;
4177 /* Iteration space partitioning goes in ENTRY_BB. */
4178 gsi
= gsi_last_bb (entry_bb
);
4179 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4181 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4182 t
= fold_convert (itype
, t
);
4183 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4184 true, GSI_SAME_STMT
);
4186 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4187 t
= fold_convert (itype
, t
);
4188 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4189 true, GSI_SAME_STMT
);
4192 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4193 true, NULL_TREE
, true, GSI_SAME_STMT
);
4195 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4196 true, NULL_TREE
, true, GSI_SAME_STMT
);
4198 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4199 true, NULL_TREE
, true, GSI_SAME_STMT
);
4201 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4202 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4203 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4204 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4205 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4206 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4207 fold_build1 (NEGATE_EXPR
, itype
, t
),
4208 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4210 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4211 t
= fold_convert (itype
, t
);
4212 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4214 q
= create_tmp_var (itype
, "q");
4215 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4216 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4217 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
4219 tt
= create_tmp_var (itype
, "tt");
4220 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
4221 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4222 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
4224 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
4225 stmt
= gimple_build_cond_empty (t
);
4226 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4228 second_bb
= split_block (entry_bb
, stmt
)->dest
;
4229 gsi
= gsi_last_bb (second_bb
);
4230 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4232 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
4234 stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, q
, q
,
4235 build_int_cst (itype
, 1));
4236 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4238 third_bb
= split_block (second_bb
, stmt
)->dest
;
4239 gsi
= gsi_last_bb (third_bb
);
4240 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4242 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4243 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
4244 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4246 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4247 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4249 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4250 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4252 /* Remove the GIMPLE_OMP_FOR statement. */
4253 gsi_remove (&gsi
, true);
4255 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4256 gsi
= gsi_start_bb (seq_start_bb
);
4258 t
= fold_convert (itype
, s0
);
4259 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4260 if (POINTER_TYPE_P (type
))
4261 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4263 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4264 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4265 false, GSI_CONTINUE_LINKING
);
4266 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4267 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4269 t
= fold_convert (itype
, e0
);
4270 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4271 if (POINTER_TYPE_P (type
))
4272 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4274 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4275 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4276 false, GSI_CONTINUE_LINKING
);
4278 /* The code controlling the sequential loop replaces the
4279 GIMPLE_OMP_CONTINUE. */
4280 gsi
= gsi_last_bb (cont_bb
);
4281 stmt
= gsi_stmt (gsi
);
4282 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4283 vmain
= gimple_omp_continue_control_use (stmt
);
4284 vback
= gimple_omp_continue_control_def (stmt
);
4286 if (POINTER_TYPE_P (type
))
4287 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
4289 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4290 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4291 true, GSI_SAME_STMT
);
4292 stmt
= gimple_build_assign (vback
, t
);
4293 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4295 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, e
);
4296 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4298 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4299 gsi_remove (&gsi
, true);
4301 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4302 gsi
= gsi_last_bb (exit_bb
);
4303 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4304 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
4305 false, GSI_SAME_STMT
);
4306 gsi_remove (&gsi
, true);
4308 /* Connect all the blocks. */
4309 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
4310 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
4311 ep
= find_edge (entry_bb
, second_bb
);
4312 ep
->flags
= EDGE_TRUE_VALUE
;
4313 ep
->probability
= REG_BR_PROB_BASE
/ 4;
4314 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
4315 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
4317 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4318 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4320 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
4321 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
4322 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
4323 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4324 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4325 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4326 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4330 /* A subroutine of expand_omp_for. Generate code for a parallel
4331 loop with static schedule and a specified chunk size. Given
4334 for (V = N1; V cond N2; V += STEP) BODY;
4336 where COND is "<" or ">", we generate pseudocode
4342 if ((__typeof (V)) -1 > 0 && cond is >)
4343 n = -(adj + N2 - N1) / -STEP;
4345 n = (adj + N2 - N1) / STEP;
4347 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4348 here so that V is defined
4349 if the loop is not entered
4351 s0 = (trip * nthreads + threadid) * CHUNK;
4352 e0 = min(s0 + CHUNK, n);
4353 if (s0 < n) goto L1; else goto L4;
4360 if (V cond e) goto L2; else goto L3;
4368 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
4370 tree n
, s0
, e0
, e
, t
;
4371 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
4372 tree type
, itype
, v_main
, v_back
, v_extra
;
4373 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
4374 basic_block trip_update_bb
, cont_bb
, fin_bb
;
4375 gimple_stmt_iterator si
;
4379 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4380 if (POINTER_TYPE_P (type
))
4381 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4383 entry_bb
= region
->entry
;
4384 se
= split_block (entry_bb
, last_stmt (entry_bb
));
4386 iter_part_bb
= se
->dest
;
4387 cont_bb
= region
->cont
;
4388 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
4389 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
4390 == FALLTHRU_EDGE (cont_bb
)->dest
);
4391 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
4392 body_bb
= single_succ (seq_start_bb
);
4393 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4394 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4395 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4396 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
4397 exit_bb
= region
->exit
;
4399 /* Trip and adjustment setup goes in ENTRY_BB. */
4400 si
= gsi_last_bb (entry_bb
);
4401 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
4403 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4404 t
= fold_convert (itype
, t
);
4405 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4406 true, GSI_SAME_STMT
);
4408 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4409 t
= fold_convert (itype
, t
);
4410 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4411 true, GSI_SAME_STMT
);
4414 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
4415 true, NULL_TREE
, true, GSI_SAME_STMT
);
4417 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
4418 true, NULL_TREE
, true, GSI_SAME_STMT
);
4420 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
4421 true, NULL_TREE
, true, GSI_SAME_STMT
);
4423 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
4424 true, NULL_TREE
, true, GSI_SAME_STMT
);
4426 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4427 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4428 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4429 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4430 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4431 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4432 fold_build1 (NEGATE_EXPR
, itype
, t
),
4433 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4435 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4436 t
= fold_convert (itype
, t
);
4437 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4438 true, GSI_SAME_STMT
);
4440 trip_var
= create_tmp_var (itype
, ".trip");
4441 if (gimple_in_ssa_p (cfun
))
4443 add_referenced_var (trip_var
);
4444 trip_init
= make_ssa_name (trip_var
, NULL
);
4445 trip_main
= make_ssa_name (trip_var
, NULL
);
4446 trip_back
= make_ssa_name (trip_var
, NULL
);
4450 trip_init
= trip_var
;
4451 trip_main
= trip_var
;
4452 trip_back
= trip_var
;
4455 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
4456 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4458 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
4459 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4460 if (POINTER_TYPE_P (type
))
4461 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4463 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4464 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4465 true, GSI_SAME_STMT
);
4467 /* Remove the GIMPLE_OMP_FOR. */
4468 gsi_remove (&si
, true);
4470 /* Iteration space partitioning goes in ITER_PART_BB. */
4471 si
= gsi_last_bb (iter_part_bb
);
4473 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
4474 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
4475 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
4476 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4477 false, GSI_CONTINUE_LINKING
);
4479 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
4480 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4481 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4482 false, GSI_CONTINUE_LINKING
);
4484 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
4485 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
4487 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4488 si
= gsi_start_bb (seq_start_bb
);
4490 t
= fold_convert (itype
, s0
);
4491 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4492 if (POINTER_TYPE_P (type
))
4493 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4495 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4496 t
= force_gimple_operand_gsi (&si
, t
, false, NULL_TREE
,
4497 false, GSI_CONTINUE_LINKING
);
4498 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4499 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4501 t
= fold_convert (itype
, e0
);
4502 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4503 if (POINTER_TYPE_P (type
))
4504 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4506 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4507 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4508 false, GSI_CONTINUE_LINKING
);
4510 /* The code controlling the sequential loop goes in CONT_BB,
4511 replacing the GIMPLE_OMP_CONTINUE. */
4512 si
= gsi_last_bb (cont_bb
);
4513 stmt
= gsi_stmt (si
);
4514 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4515 v_main
= gimple_omp_continue_control_use (stmt
);
4516 v_back
= gimple_omp_continue_control_def (stmt
);
4518 if (POINTER_TYPE_P (type
))
4519 t
= fold_build_pointer_plus (v_main
, fd
->loop
.step
);
4521 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
4522 stmt
= gimple_build_assign (v_back
, t
);
4523 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4525 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, v_back
, e
);
4526 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4528 /* Remove GIMPLE_OMP_CONTINUE. */
4529 gsi_remove (&si
, true);
4531 /* Trip update code goes into TRIP_UPDATE_BB. */
4532 si
= gsi_start_bb (trip_update_bb
);
4534 t
= build_int_cst (itype
, 1);
4535 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4536 stmt
= gimple_build_assign (trip_back
, t
);
4537 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4539 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4540 si
= gsi_last_bb (exit_bb
);
4541 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
4542 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4543 false, GSI_SAME_STMT
);
4544 gsi_remove (&si
, true);
4546 /* Connect the new blocks. */
4547 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4548 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4550 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4551 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
4553 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
4555 if (gimple_in_ssa_p (cfun
))
4557 gimple_stmt_iterator psi
;
4560 edge_var_map_vector head
;
4564 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4565 remove arguments of the phi nodes in fin_bb. We need to create
4566 appropriate phi nodes in iter_part_bb instead. */
4567 se
= single_pred_edge (fin_bb
);
4568 re
= single_succ_edge (trip_update_bb
);
4569 head
= redirect_edge_var_map_vector (re
);
4570 ene
= single_succ_edge (entry_bb
);
4572 psi
= gsi_start_phis (fin_bb
);
4573 for (i
= 0; !gsi_end_p (psi
) && VEC_iterate (edge_var_map
, head
, i
, vm
);
4574 gsi_next (&psi
), ++i
)
4577 source_location locus
;
4579 phi
= gsi_stmt (psi
);
4580 t
= gimple_phi_result (phi
);
4581 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4582 nphi
= create_phi_node (t
, iter_part_bb
);
4583 SSA_NAME_DEF_STMT (t
) = nphi
;
4585 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4586 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4588 /* A special case -- fd->loop.v is not yet computed in
4589 iter_part_bb, we need to use v_extra instead. */
4590 if (t
== fd
->loop
.v
)
4592 add_phi_arg (nphi
, t
, ene
, locus
);
4593 locus
= redirect_edge_var_map_location (vm
);
4594 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
4596 gcc_assert (!gsi_end_p (psi
) && i
== VEC_length (edge_var_map
, head
));
4597 redirect_edge_var_map_clear (re
);
4600 psi
= gsi_start_phis (fin_bb
);
4601 if (gsi_end_p (psi
))
4603 remove_phi_node (&psi
, false);
4606 /* Make phi node for trip. */
4607 phi
= create_phi_node (trip_main
, iter_part_bb
);
4608 SSA_NAME_DEF_STMT (trip_main
) = phi
;
4609 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4611 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4615 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4616 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4617 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4618 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4619 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4620 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4621 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4622 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4623 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4627 /* Expand the OpenMP loop defined by REGION. */
4630 expand_omp_for (struct omp_region
*region
)
4632 struct omp_for_data fd
;
4633 struct omp_for_data_loop
*loops
;
4636 = (struct omp_for_data_loop
*)
4637 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
4638 * sizeof (struct omp_for_data_loop
));
4639 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
4640 region
->sched_kind
= fd
.sched_kind
;
4642 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
4643 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4644 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4647 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
4648 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4649 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4652 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
4655 && region
->cont
!= NULL
)
4657 if (fd
.chunk_size
== NULL
)
4658 expand_omp_for_static_nochunk (region
, &fd
);
4660 expand_omp_for_static_chunk (region
, &fd
);
4664 int fn_index
, start_ix
, next_ix
;
4666 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4667 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
4668 ? 3 : fd
.sched_kind
;
4669 fn_index
+= fd
.have_ordered
* 4;
4670 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
4671 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
4672 if (fd
.iter_type
== long_long_unsigned_type_node
)
4674 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4675 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
4676 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4677 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
4679 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
4680 (enum built_in_function
) next_ix
);
4683 update_ssa (TODO_update_ssa_only_virtuals
);
4687 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4689 v = GOMP_sections_start (n);
4706 v = GOMP_sections_next ();
4711 If this is a combined parallel sections, replace the call to
4712 GOMP_sections_start with call to GOMP_sections_next. */
4715 expand_omp_sections (struct omp_region
*region
)
4717 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
4718 VEC (tree
,heap
) *label_vec
;
4720 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
4721 gimple_stmt_iterator si
, switch_si
;
4722 gimple sections_stmt
, stmt
, cont
;
4725 struct omp_region
*inner
;
4727 bool exit_reachable
= region
->cont
!= NULL
;
4729 gcc_assert (exit_reachable
== (region
->exit
!= NULL
));
4730 entry_bb
= region
->entry
;
4731 l0_bb
= single_succ (entry_bb
);
4732 l1_bb
= region
->cont
;
4733 l2_bb
= region
->exit
;
4736 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
4737 l2
= gimple_block_label (l2_bb
);
4740 /* This can happen if there are reductions. */
4741 len
= EDGE_COUNT (l0_bb
->succs
);
4742 gcc_assert (len
> 0);
4743 e
= EDGE_SUCC (l0_bb
, len
- 1);
4744 si
= gsi_last_bb (e
->dest
);
4747 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4748 l2
= gimple_block_label (e
->dest
);
4750 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
4752 si
= gsi_last_bb (e
->dest
);
4754 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4756 l2
= gimple_block_label (e
->dest
);
4761 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
4765 default_bb
= create_empty_bb (l0_bb
);
4766 l2
= gimple_block_label (default_bb
);
4769 /* We will build a switch() with enough cases for all the
4770 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4771 and a default case to abort if something goes wrong. */
4772 len
= EDGE_COUNT (l0_bb
->succs
);
4774 /* Use VEC_quick_push on label_vec throughout, since we know the size
4776 label_vec
= VEC_alloc (tree
, heap
, len
);
4778 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4779 GIMPLE_OMP_SECTIONS statement. */
4780 si
= gsi_last_bb (entry_bb
);
4781 sections_stmt
= gsi_stmt (si
);
4782 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
4783 vin
= gimple_omp_sections_control (sections_stmt
);
4784 if (!is_combined_parallel (region
))
4786 /* If we are not inside a combined parallel+sections region,
4787 call GOMP_sections_start. */
4788 t
= build_int_cst (unsigned_type_node
,
4789 exit_reachable
? len
- 1 : len
);
4790 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
4791 stmt
= gimple_build_call (u
, 1, t
);
4795 /* Otherwise, call GOMP_sections_next. */
4796 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4797 stmt
= gimple_build_call (u
, 0);
4799 gimple_call_set_lhs (stmt
, vin
);
4800 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4801 gsi_remove (&si
, true);
4803 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4805 switch_si
= gsi_last_bb (l0_bb
);
4806 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
4809 cont
= last_stmt (l1_bb
);
4810 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
4811 vmain
= gimple_omp_continue_control_use (cont
);
4812 vnext
= gimple_omp_continue_control_def (cont
);
4823 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
4824 VEC_quick_push (tree
, label_vec
, t
);
4828 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4829 for (inner
= region
->inner
, casei
= 1;
4831 inner
= inner
->next
, i
++, casei
++)
4833 basic_block s_entry_bb
, s_exit_bb
;
4835 /* Skip optional reduction region. */
4836 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
4843 s_entry_bb
= inner
->entry
;
4844 s_exit_bb
= inner
->exit
;
4846 t
= gimple_block_label (s_entry_bb
);
4847 u
= build_int_cst (unsigned_type_node
, casei
);
4848 u
= build_case_label (u
, NULL
, t
);
4849 VEC_quick_push (tree
, label_vec
, u
);
4851 si
= gsi_last_bb (s_entry_bb
);
4852 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
4853 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
4854 gsi_remove (&si
, true);
4855 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
4857 if (s_exit_bb
== NULL
)
4860 si
= gsi_last_bb (s_exit_bb
);
4861 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4862 gsi_remove (&si
, true);
4864 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
4867 /* Error handling code goes in DEFAULT_BB. */
4868 t
= gimple_block_label (default_bb
);
4869 u
= build_case_label (NULL
, NULL
, t
);
4870 make_edge (l0_bb
, default_bb
, 0);
4872 stmt
= gimple_build_switch_vec (vmain
, u
, label_vec
);
4873 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
4874 gsi_remove (&switch_si
, true);
4875 VEC_free (tree
, heap
, label_vec
);
4877 si
= gsi_start_bb (default_bb
);
4878 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
4879 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4885 /* Code to get the next section goes in L1_BB. */
4886 si
= gsi_last_bb (l1_bb
);
4887 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
4889 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4890 stmt
= gimple_build_call (bfn_decl
, 0);
4891 gimple_call_set_lhs (stmt
, vnext
);
4892 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4893 gsi_remove (&si
, true);
4895 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
4897 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4898 si
= gsi_last_bb (l2_bb
);
4899 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
4900 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
4902 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
4903 stmt
= gimple_build_call (t
, 0);
4904 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4905 gsi_remove (&si
, true);
4908 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
4912 /* Expand code for an OpenMP single directive. We've already expanded
4913 much of the code, here we simply place the GOMP_barrier call. */
4916 expand_omp_single (struct omp_region
*region
)
4918 basic_block entry_bb
, exit_bb
;
4919 gimple_stmt_iterator si
;
4920 bool need_barrier
= false;
4922 entry_bb
= region
->entry
;
4923 exit_bb
= region
->exit
;
4925 si
= gsi_last_bb (entry_bb
);
4926 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4927 be removed. We need to ensure that the thread that entered the single
4928 does not exit before the data is copied out by the other threads. */
4929 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
4930 OMP_CLAUSE_COPYPRIVATE
))
4931 need_barrier
= true;
4932 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
4933 gsi_remove (&si
, true);
4934 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4936 si
= gsi_last_bb (exit_bb
);
4937 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
4938 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4939 false, GSI_SAME_STMT
);
4940 gsi_remove (&si
, true);
4941 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4945 /* Generic expansion for OpenMP synchronization directives: master,
4946 ordered and critical. All we need to do here is remove the entry
4947 and exit markers for REGION. */
4950 expand_omp_synch (struct omp_region
*region
)
4952 basic_block entry_bb
, exit_bb
;
4953 gimple_stmt_iterator si
;
4955 entry_bb
= region
->entry
;
4956 exit_bb
= region
->exit
;
4958 si
= gsi_last_bb (entry_bb
);
4959 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
4960 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
4961 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
4962 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
4963 gsi_remove (&si
, true);
4964 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4968 si
= gsi_last_bb (exit_bb
);
4969 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4970 gsi_remove (&si
, true);
4971 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4975 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4976 operation as a normal volatile load. */
4979 expand_omp_atomic_load (basic_block load_bb
, tree addr
, tree loaded_val
)
4988 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4989 operation as a normal volatile store. */
4992 expand_omp_atomic_store (basic_block load_bb
, tree addr
)
5000 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5001 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
5002 size of the data type, and thus usable to find the index of the builtin
5003 decl. Returns false if the expression is not of the proper form. */
5006 expand_omp_atomic_fetch_op (basic_block load_bb
,
5007 tree addr
, tree loaded_val
,
5008 tree stored_val
, int index
)
5010 enum built_in_function oldbase
, newbase
, tmpbase
;
5011 tree decl
, itype
, call
;
5012 direct_optab optab
, oldoptab
, newoptab
;
5014 basic_block store_bb
= single_succ (load_bb
);
5015 gimple_stmt_iterator gsi
;
5018 bool need_old
, need_new
;
5020 /* We expect to find the following sequences:
5023 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5026 val = tmp OP something; (or: something OP tmp)
5027 GIMPLE_OMP_STORE (val)
5029 ???FIXME: Allow a more flexible sequence.
5030 Perhaps use data flow to pick the statements.
5034 gsi
= gsi_after_labels (store_bb
);
5035 stmt
= gsi_stmt (gsi
);
5036 loc
= gimple_location (stmt
);
5037 if (!is_gimple_assign (stmt
))
5040 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
5042 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
5043 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
5044 gcc_checking_assert (!need_old
|| !need_new
);
5046 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
5049 /* Check for one of the supported fetch-op operations. */
5050 switch (gimple_assign_rhs_code (stmt
))
5053 case POINTER_PLUS_EXPR
:
5054 oldbase
= BUILT_IN_SYNC_FETCH_AND_ADD_N
;
5055 newbase
= BUILT_IN_SYNC_ADD_AND_FETCH_N
;
5056 optab
= sync_add_optab
;
5057 oldoptab
= sync_old_add_optab
;
5058 newoptab
= sync_new_add_optab
;
5061 oldbase
= BUILT_IN_SYNC_FETCH_AND_SUB_N
;
5062 newbase
= BUILT_IN_SYNC_SUB_AND_FETCH_N
;
5063 optab
= sync_add_optab
;
5064 oldoptab
= sync_old_add_optab
;
5065 newoptab
= sync_new_add_optab
;
5068 oldbase
= BUILT_IN_SYNC_FETCH_AND_AND_N
;
5069 newbase
= BUILT_IN_SYNC_AND_AND_FETCH_N
;
5070 optab
= sync_and_optab
;
5071 oldoptab
= sync_old_and_optab
;
5072 newoptab
= sync_new_and_optab
;
5075 oldbase
= BUILT_IN_SYNC_FETCH_AND_OR_N
;
5076 newbase
= BUILT_IN_SYNC_OR_AND_FETCH_N
;
5077 optab
= sync_ior_optab
;
5078 oldoptab
= sync_old_ior_optab
;
5079 newoptab
= sync_new_ior_optab
;
5082 oldbase
= BUILT_IN_SYNC_FETCH_AND_XOR_N
;
5083 newbase
= BUILT_IN_SYNC_XOR_AND_FETCH_N
;
5084 optab
= sync_xor_optab
;
5085 oldoptab
= sync_old_xor_optab
;
5086 newoptab
= sync_new_xor_optab
;
5091 /* Make sure the expression is of the proper form. */
5092 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
5093 rhs
= gimple_assign_rhs2 (stmt
);
5094 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
5095 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
5096 rhs
= gimple_assign_rhs1 (stmt
);
5100 tmpbase
= ((enum built_in_function
)
5101 ((need_new
? newbase
: oldbase
) + index
+ 1));
5102 decl
= builtin_decl_explicit (tmpbase
);
5103 if (decl
== NULL_TREE
)
5105 itype
= TREE_TYPE (TREE_TYPE (decl
));
5109 /* expand_sync_fetch_operation can always compensate when interested
5110 in the new value. */
5111 if (direct_optab_handler (newoptab
, TYPE_MODE (itype
))
5113 && direct_optab_handler (oldoptab
, TYPE_MODE (itype
))
5114 == CODE_FOR_nothing
)
5119 /* When interested in the old value, expand_sync_fetch_operation
5120 can compensate only if the operation is reversible. AND and OR
5121 are not reversible. */
5122 if (direct_optab_handler (oldoptab
, TYPE_MODE (itype
))
5124 && (oldbase
== BUILT_IN_SYNC_FETCH_AND_AND_N
5125 || oldbase
== BUILT_IN_SYNC_FETCH_AND_OR_N
5126 || direct_optab_handler (newoptab
, TYPE_MODE (itype
))
5127 == CODE_FOR_nothing
))
5130 else if (direct_optab_handler (optab
, TYPE_MODE (itype
)) == CODE_FOR_nothing
)
5133 gsi
= gsi_last_bb (load_bb
);
5134 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5135 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
5136 fold_convert_loc (loc
, itype
, rhs
));
5137 if (need_old
|| need_new
)
5139 lhs
= need_old
? loaded_val
: stored_val
;
5140 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
5141 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
5144 call
= fold_convert_loc (loc
, void_type_node
, call
);
5145 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5146 gsi_remove (&gsi
, true);
5148 gsi
= gsi_last_bb (store_bb
);
5149 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5150 gsi_remove (&gsi
, true);
5151 gsi
= gsi_last_bb (store_bb
);
5152 gsi_remove (&gsi
, true);
5154 if (gimple_in_ssa_p (cfun
))
5155 update_ssa (TODO_update_ssa_no_phi
);
5160 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5164 newval = rhs; // with oldval replacing *addr in rhs
5165 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5166 if (oldval != newval)
5169 INDEX is log2 of the size of the data type, and thus usable to find the
5170 index of the builtin decl. */
5173 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
5174 tree addr
, tree loaded_val
, tree stored_val
,
5177 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
5178 tree type
, itype
, cmpxchg
, iaddr
;
5179 gimple_stmt_iterator si
;
5180 basic_block loop_header
= single_succ (load_bb
);
5183 enum built_in_function fncode
;
5185 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5187 cmpxchg
= builtin_decl_explicit (fncode
);
5188 if (cmpxchg
== NULL_TREE
)
5190 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5191 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
5193 if (direct_optab_handler (sync_compare_and_swap_optab
, TYPE_MODE (itype
))
5194 == CODE_FOR_nothing
)
5197 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5198 si
= gsi_last_bb (load_bb
);
5199 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5201 /* For floating-point values, we'll need to view-convert them to integers
5202 so that we can perform the atomic compare and swap. Simplify the
5203 following code by always setting up the "i"ntegral variables. */
5204 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
5208 iaddr
= create_tmp_var (build_pointer_type_for_mode (itype
, ptr_mode
,
5211 = force_gimple_operand_gsi (&si
,
5212 fold_convert (TREE_TYPE (iaddr
), addr
),
5213 false, NULL_TREE
, true, GSI_SAME_STMT
);
5214 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
5215 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5216 loadedi
= create_tmp_var (itype
, NULL
);
5217 if (gimple_in_ssa_p (cfun
))
5219 add_referenced_var (iaddr
);
5220 add_referenced_var (loadedi
);
5221 loadedi
= make_ssa_name (loadedi
, NULL
);
5227 loadedi
= loaded_val
;
5231 = force_gimple_operand_gsi (&si
,
5232 build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)),
5234 build_int_cst (TREE_TYPE (iaddr
), 0)),
5235 true, NULL_TREE
, true, GSI_SAME_STMT
);
5237 /* Move the value to the LOADEDI temporary. */
5238 if (gimple_in_ssa_p (cfun
))
5240 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
5241 phi
= create_phi_node (loadedi
, loop_header
);
5242 SSA_NAME_DEF_STMT (loadedi
) = phi
;
5243 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
5247 gsi_insert_before (&si
,
5248 gimple_build_assign (loadedi
, initial
),
5250 if (loadedi
!= loaded_val
)
5252 gimple_stmt_iterator gsi2
;
5255 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
5256 gsi2
= gsi_start_bb (loop_header
);
5257 if (gimple_in_ssa_p (cfun
))
5260 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5261 true, GSI_SAME_STMT
);
5262 stmt
= gimple_build_assign (loaded_val
, x
);
5263 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
5267 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
5268 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5269 true, GSI_SAME_STMT
);
5272 gsi_remove (&si
, true);
5274 si
= gsi_last_bb (store_bb
);
5275 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5278 storedi
= stored_val
;
5281 force_gimple_operand_gsi (&si
,
5282 build1 (VIEW_CONVERT_EXPR
, itype
,
5283 stored_val
), true, NULL_TREE
, true,
5286 /* Build the compare&swap statement. */
5287 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
5288 new_storedi
= force_gimple_operand_gsi (&si
,
5289 fold_convert (TREE_TYPE (loadedi
),
5292 true, GSI_SAME_STMT
);
5294 if (gimple_in_ssa_p (cfun
))
5298 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
5299 if (gimple_in_ssa_p (cfun
))
5300 add_referenced_var (old_vali
);
5301 stmt
= gimple_build_assign (old_vali
, loadedi
);
5302 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5304 stmt
= gimple_build_assign (loadedi
, new_storedi
);
5305 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5308 /* Note that we always perform the comparison as an integer, even for
5309 floating point. This allows the atomic operation to properly
5310 succeed even with NaNs and -0.0. */
5311 stmt
= gimple_build_cond_empty
5312 (build2 (NE_EXPR
, boolean_type_node
,
5313 new_storedi
, old_vali
));
5314 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5317 e
= single_succ_edge (store_bb
);
5318 e
->flags
&= ~EDGE_FALLTHRU
;
5319 e
->flags
|= EDGE_FALSE_VALUE
;
5321 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
5323 /* Copy the new value to loadedi (we already did that before the condition
5324 if we are not in SSA). */
5325 if (gimple_in_ssa_p (cfun
))
5327 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
5328 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
5331 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5332 gsi_remove (&si
, true);
5334 if (gimple_in_ssa_p (cfun
))
5335 update_ssa (TODO_update_ssa_no_phi
);
5340 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5342 GOMP_atomic_start ();
5346 The result is not globally atomic, but works so long as all parallel
5347 references are within #pragma omp atomic directives. According to
5348 responses received from omp@openmp.org, appears to be within spec.
5349 Which makes sense, since that's how several other compilers handle
5350 this situation as well.
5351 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5352 expanding. STORED_VAL is the operand of the matching
5353 GIMPLE_OMP_ATOMIC_STORE.
5356 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5360 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5365 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
5366 tree addr
, tree loaded_val
, tree stored_val
)
5368 gimple_stmt_iterator si
;
5372 si
= gsi_last_bb (load_bb
);
5373 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5375 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
5376 t
= build_call_expr (t
, 0);
5377 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5379 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
5380 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5381 gsi_remove (&si
, true);
5383 si
= gsi_last_bb (store_bb
);
5384 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5386 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
5388 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5390 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
5391 t
= build_call_expr (t
, 0);
5392 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5393 gsi_remove (&si
, true);
5395 if (gimple_in_ssa_p (cfun
))
5396 update_ssa (TODO_update_ssa_no_phi
);
5400 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5401 using expand_omp_atomic_fetch_op. If it failed, we try to
5402 call expand_omp_atomic_pipeline, and if it fails too, the
5403 ultimate fallback is wrapping the operation in a mutex
5404 (expand_omp_atomic_mutex). REGION is the atomic region built
5405 by build_omp_regions_1(). */
5408 expand_omp_atomic (struct omp_region
*region
)
5410 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
5411 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
5412 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
5413 tree addr
= gimple_omp_atomic_load_rhs (load
);
5414 tree stored_val
= gimple_omp_atomic_store_val (store
);
5415 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5416 HOST_WIDE_INT index
;
5418 /* Make sure the type is one of the supported sizes. */
5419 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5420 index
= exact_log2 (index
);
5421 if (index
>= 0 && index
<= 4)
5423 unsigned int align
= TYPE_ALIGN_UNIT (type
);
5425 /* __sync builtins require strict data alignment. */
5426 if (exact_log2 (align
) >= index
)
5428 /* Atomic load. FIXME: have some target hook signalize what loads
5429 are actually atomic? */
5430 if (loaded_val
== stored_val
5431 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5432 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5433 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5434 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
))
5437 /* Atomic store. FIXME: have some target hook signalize what
5438 stores are actually atomic? */
5439 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5440 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5441 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5442 && store_bb
== single_succ (load_bb
)
5443 && first_stmt (store_bb
) == store
5444 && expand_omp_atomic_store (load_bb
, addr
))
5447 /* When possible, use specialized atomic update functions. */
5448 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
5449 && store_bb
== single_succ (load_bb
))
5451 if (expand_omp_atomic_fetch_op (load_bb
, addr
,
5452 loaded_val
, stored_val
, index
))
5456 /* If we don't have specialized __sync builtins, try and implement
5457 as a compare and swap loop. */
5458 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
5459 loaded_val
, stored_val
, index
))
5464 /* The ultimate fallback is wrapping the operation in a mutex. */
5465 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
5469 /* Expand the parallel region tree rooted at REGION. Expansion
5470 proceeds in depth-first order. Innermost regions are expanded
5471 first. This way, parallel regions that require a new function to
5472 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5473 internal dependencies in their body. */
5476 expand_omp (struct omp_region
*region
)
5480 location_t saved_location
;
5482 /* First, determine whether this is a combined parallel+workshare
5484 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5485 determine_parallel_type (region
);
5488 expand_omp (region
->inner
);
5490 saved_location
= input_location
;
5491 if (gimple_has_location (last_stmt (region
->entry
)))
5492 input_location
= gimple_location (last_stmt (region
->entry
));
5494 switch (region
->type
)
5496 case GIMPLE_OMP_PARALLEL
:
5497 case GIMPLE_OMP_TASK
:
5498 expand_omp_taskreg (region
);
5501 case GIMPLE_OMP_FOR
:
5502 expand_omp_for (region
);
5505 case GIMPLE_OMP_SECTIONS
:
5506 expand_omp_sections (region
);
5509 case GIMPLE_OMP_SECTION
:
5510 /* Individual omp sections are handled together with their
5511 parent GIMPLE_OMP_SECTIONS region. */
5514 case GIMPLE_OMP_SINGLE
:
5515 expand_omp_single (region
);
5518 case GIMPLE_OMP_MASTER
:
5519 case GIMPLE_OMP_ORDERED
:
5520 case GIMPLE_OMP_CRITICAL
:
5521 expand_omp_synch (region
);
5524 case GIMPLE_OMP_ATOMIC_LOAD
:
5525 expand_omp_atomic (region
);
5532 input_location
= saved_location
;
5533 region
= region
->next
;
5538 /* Helper for build_omp_regions. Scan the dominator tree starting at
5539 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5540 true, the function ends once a single tree is built (otherwise, whole
5541 forest of OMP constructs may be built). */
5544 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
5547 gimple_stmt_iterator gsi
;
5551 gsi
= gsi_last_bb (bb
);
5552 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
5554 struct omp_region
*region
;
5555 enum gimple_code code
;
5557 stmt
= gsi_stmt (gsi
);
5558 code
= gimple_code (stmt
);
5559 if (code
== GIMPLE_OMP_RETURN
)
5561 /* STMT is the return point out of region PARENT. Mark it
5562 as the exit point and make PARENT the immediately
5563 enclosing region. */
5564 gcc_assert (parent
);
5567 parent
= parent
->outer
;
5569 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
5571 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5572 GIMPLE_OMP_RETURN, but matches with
5573 GIMPLE_OMP_ATOMIC_LOAD. */
5574 gcc_assert (parent
);
5575 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
5578 parent
= parent
->outer
;
5581 else if (code
== GIMPLE_OMP_CONTINUE
)
5583 gcc_assert (parent
);
5586 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
5588 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5589 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5594 /* Otherwise, this directive becomes the parent for a new
5596 region
= new_omp_region (bb
, code
, parent
);
5601 if (single_tree
&& !parent
)
5604 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5606 son
= next_dom_son (CDI_DOMINATORS
, son
))
5607 build_omp_regions_1 (son
, parent
, single_tree
);
5610 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5614 build_omp_regions_root (basic_block root
)
5616 gcc_assert (root_omp_region
== NULL
);
5617 build_omp_regions_1 (root
, NULL
, true);
5618 gcc_assert (root_omp_region
!= NULL
);
5621 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5624 omp_expand_local (basic_block head
)
5626 build_omp_regions_root (head
);
5627 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5629 fprintf (dump_file
, "\nOMP region tree\n\n");
5630 dump_omp_region (dump_file
, root_omp_region
, 0);
5631 fprintf (dump_file
, "\n");
5634 remove_exit_barriers (root_omp_region
);
5635 expand_omp (root_omp_region
);
5637 free_omp_regions ();
5640 /* Scan the CFG and build a tree of OMP regions. Return the root of
5641 the OMP region tree. */
5644 build_omp_regions (void)
5646 gcc_assert (root_omp_region
== NULL
);
5647 calculate_dominance_info (CDI_DOMINATORS
);
5648 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
5651 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5654 execute_expand_omp (void)
5656 build_omp_regions ();
5658 if (!root_omp_region
)
5663 fprintf (dump_file
, "\nOMP region tree\n\n");
5664 dump_omp_region (dump_file
, root_omp_region
, 0);
5665 fprintf (dump_file
, "\n");
5668 remove_exit_barriers (root_omp_region
);
5670 expand_omp (root_omp_region
);
5672 cleanup_tree_cfg ();
5674 free_omp_regions ();
5679 /* OMP expansion -- the default pass, run before creation of SSA form. */
5682 gate_expand_omp (void)
5684 return (flag_openmp
!= 0 && !seen_error ());
5687 struct gimple_opt_pass pass_expand_omp
=
5691 "ompexp", /* name */
5692 gate_expand_omp
, /* gate */
5693 execute_expand_omp
, /* execute */
5696 0, /* static_pass_number */
5697 TV_NONE
, /* tv_id */
5698 PROP_gimple_any
, /* properties_required */
5699 0, /* properties_provided */
5700 0, /* properties_destroyed */
5701 0, /* todo_flags_start */
5702 0 /* todo_flags_finish */
5706 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5708 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5709 CTX is the enclosing OMP context for the current statement. */
5712 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5714 tree block
, control
;
5715 gimple_stmt_iterator tgsi
;
5717 gimple stmt
, new_stmt
, bind
, t
;
5718 gimple_seq ilist
, dlist
, olist
, new_body
, body
;
5719 struct gimplify_ctx gctx
;
5721 stmt
= gsi_stmt (*gsi_p
);
5723 push_gimplify_context (&gctx
);
5727 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
5728 &ilist
, &dlist
, ctx
);
5730 tgsi
= gsi_start (gimple_omp_body (stmt
));
5731 for (len
= 0; !gsi_end_p (tgsi
); len
++, gsi_next (&tgsi
))
5734 tgsi
= gsi_start (gimple_omp_body (stmt
));
5736 for (i
= 0; i
< len
; i
++, gsi_next (&tgsi
))
5741 sec_start
= gsi_stmt (tgsi
);
5742 sctx
= maybe_lookup_ctx (sec_start
);
5745 gimple_seq_add_stmt (&body
, sec_start
);
5747 lower_omp (gimple_omp_body (sec_start
), sctx
);
5748 gimple_seq_add_seq (&body
, gimple_omp_body (sec_start
));
5749 gimple_omp_set_body (sec_start
, NULL
);
5753 gimple_seq l
= NULL
;
5754 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
5756 gimple_seq_add_seq (&body
, l
);
5757 gimple_omp_section_set_last (sec_start
);
5760 gimple_seq_add_stmt (&body
, gimple_build_omp_return (false));
5763 block
= make_node (BLOCK
);
5764 bind
= gimple_build_bind (NULL
, body
, block
);
5767 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
5769 block
= make_node (BLOCK
);
5770 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
5772 pop_gimplify_context (new_stmt
);
5773 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
5774 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5775 if (BLOCK_VARS (block
))
5776 TREE_USED (block
) = 1;
5779 gimple_seq_add_seq (&new_body
, ilist
);
5780 gimple_seq_add_stmt (&new_body
, stmt
);
5781 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
5782 gimple_seq_add_stmt (&new_body
, bind
);
5784 control
= create_tmp_var (unsigned_type_node
, ".section");
5785 t
= gimple_build_omp_continue (control
, control
);
5786 gimple_omp_sections_set_control (stmt
, control
);
5787 gimple_seq_add_stmt (&new_body
, t
);
5789 gimple_seq_add_seq (&new_body
, olist
);
5790 gimple_seq_add_seq (&new_body
, dlist
);
5792 new_body
= maybe_catch_exception (new_body
);
5794 t
= gimple_build_omp_return
5795 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
5796 OMP_CLAUSE_NOWAIT
));
5797 gimple_seq_add_stmt (&new_body
, t
);
5799 gimple_bind_set_body (new_stmt
, new_body
);
5800 gimple_omp_set_body (stmt
, NULL
);
5802 gsi_replace (gsi_p
, new_stmt
, true);
5806 /* A subroutine of lower_omp_single. Expand the simple form of
5807 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5809 if (GOMP_single_start ())
5811 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5813 FIXME. It may be better to delay expanding the logic of this until
5814 pass_expand_omp. The expanded logic may make the job more difficult
5815 to a synchronization analysis pass. */
5818 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
5820 location_t loc
= gimple_location (single_stmt
);
5821 tree tlabel
= create_artificial_label (loc
);
5822 tree flabel
= create_artificial_label (loc
);
5826 decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START
);
5827 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
5828 call
= gimple_build_call (decl
, 0);
5829 gimple_call_set_lhs (call
, lhs
);
5830 gimple_seq_add_stmt (pre_p
, call
);
5832 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
5833 fold_convert_loc (loc
, TREE_TYPE (lhs
),
5836 gimple_seq_add_stmt (pre_p
, cond
);
5837 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
5838 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5839 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
5843 /* A subroutine of lower_omp_single. Expand the simple form of
5844 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5846 #pragma omp single copyprivate (a, b, c)
5848 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5851 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5857 GOMP_single_copy_end (©out);
5868 FIXME. It may be better to delay expanding the logic of this until
5869 pass_expand_omp. The expanded logic may make the job more difficult
5870 to a synchronization analysis pass. */
5873 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
5875 tree ptr_type
, t
, l0
, l1
, l2
, bfn_decl
;
5876 gimple_seq copyin_seq
;
5877 location_t loc
= gimple_location (single_stmt
);
5879 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
5881 ptr_type
= build_pointer_type (ctx
->record_type
);
5882 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
5884 l0
= create_artificial_label (loc
);
5885 l1
= create_artificial_label (loc
);
5886 l2
= create_artificial_label (loc
);
5888 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START
);
5889 t
= build_call_expr_loc (loc
, bfn_decl
, 0);
5890 t
= fold_convert_loc (loc
, ptr_type
, t
);
5891 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
5893 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
5894 build_int_cst (ptr_type
, 0));
5895 t
= build3 (COND_EXPR
, void_type_node
, t
,
5896 build_and_jump (&l0
), build_and_jump (&l1
));
5897 gimplify_and_add (t
, pre_p
);
5899 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
5901 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5904 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
5907 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
5908 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END
);
5909 t
= build_call_expr_loc (loc
, bfn_decl
, 1, t
);
5910 gimplify_and_add (t
, pre_p
);
5912 t
= build_and_jump (&l2
);
5913 gimplify_and_add (t
, pre_p
);
5915 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
5917 gimple_seq_add_seq (pre_p
, copyin_seq
);
5919 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
5923 /* Expand code for an OpenMP single directive. */
5926 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5929 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
5930 gimple_seq bind_body
, dlist
;
5931 struct gimplify_ctx gctx
;
5933 push_gimplify_context (&gctx
);
5936 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
5937 &bind_body
, &dlist
, ctx
);
5938 lower_omp (gimple_omp_body (single_stmt
), ctx
);
5940 gimple_seq_add_stmt (&bind_body
, single_stmt
);
5942 if (ctx
->record_type
)
5943 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
5945 lower_omp_single_simple (single_stmt
, &bind_body
);
5947 gimple_omp_set_body (single_stmt
, NULL
);
5949 gimple_seq_add_seq (&bind_body
, dlist
);
5951 bind_body
= maybe_catch_exception (bind_body
);
5953 t
= gimple_build_omp_return
5954 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
5955 OMP_CLAUSE_NOWAIT
));
5956 gimple_seq_add_stmt (&bind_body
, t
);
5958 block
= make_node (BLOCK
);
5959 bind
= gimple_build_bind (NULL
, bind_body
, block
);
5961 pop_gimplify_context (bind
);
5963 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5964 BLOCK_VARS (block
) = ctx
->block_vars
;
5965 gsi_replace (gsi_p
, bind
, true);
5966 if (BLOCK_VARS (block
))
5967 TREE_USED (block
) = 1;
5971 /* Expand code for an OpenMP master directive. */
5974 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5976 tree block
, lab
= NULL
, x
, bfn_decl
;
5977 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
5978 location_t loc
= gimple_location (stmt
);
5980 struct gimplify_ctx gctx
;
5982 push_gimplify_context (&gctx
);
5984 block
= make_node (BLOCK
);
5985 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
5988 bfn_decl
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
5989 x
= build_call_expr_loc (loc
, bfn_decl
, 0);
5990 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
5991 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
5993 gimplify_and_add (x
, &tseq
);
5994 gimple_bind_add_seq (bind
, tseq
);
5996 lower_omp (gimple_omp_body (stmt
), ctx
);
5997 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5998 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5999 gimple_omp_set_body (stmt
, NULL
);
6001 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
6003 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6005 pop_gimplify_context (bind
);
6007 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6008 BLOCK_VARS (block
) = ctx
->block_vars
;
6009 gsi_replace (gsi_p
, bind
, true);
6013 /* Expand code for an OpenMP ordered directive. */
6016 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6019 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
6020 struct gimplify_ctx gctx
;
6022 push_gimplify_context (&gctx
);
6024 block
= make_node (BLOCK
);
6025 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
6028 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START
),
6030 gimple_bind_add_stmt (bind
, x
);
6032 lower_omp (gimple_omp_body (stmt
), ctx
);
6033 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6034 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6035 gimple_omp_set_body (stmt
, NULL
);
6037 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END
), 0);
6038 gimple_bind_add_stmt (bind
, x
);
6040 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6042 pop_gimplify_context (bind
);
6044 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6045 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6046 gsi_replace (gsi_p
, bind
, true);
6050 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6051 substitution of a couple of function calls. But in the NAMED case,
6052 requires that languages coordinate a symbol name. It is therefore
6053 best put here in common code. */
6055 static GTY((param1_is (tree
), param2_is (tree
)))
6056 splay_tree critical_name_mutexes
;
6059 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6062 tree name
, lock
, unlock
;
6063 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
6064 location_t loc
= gimple_location (stmt
);
6066 struct gimplify_ctx gctx
;
6068 name
= gimple_omp_critical_name (stmt
);
6074 if (!critical_name_mutexes
)
6075 critical_name_mutexes
6076 = splay_tree_new_ggc (splay_tree_compare_pointers
,
6077 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
6078 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
6080 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
6085 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
6087 new_str
= ACONCAT ((".gomp_critical_user_",
6088 IDENTIFIER_POINTER (name
), NULL
));
6089 DECL_NAME (decl
) = get_identifier (new_str
);
6090 TREE_PUBLIC (decl
) = 1;
6091 TREE_STATIC (decl
) = 1;
6092 DECL_COMMON (decl
) = 1;
6093 DECL_ARTIFICIAL (decl
) = 1;
6094 DECL_IGNORED_P (decl
) = 1;
6095 varpool_finalize_decl (decl
);
6097 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
6098 (splay_tree_value
) decl
);
6101 decl
= (tree
) n
->value
;
6103 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START
);
6104 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
6106 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END
);
6107 unlock
= build_call_expr_loc (loc
, unlock
, 1,
6108 build_fold_addr_expr_loc (loc
, decl
));
6112 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START
);
6113 lock
= build_call_expr_loc (loc
, lock
, 0);
6115 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END
);
6116 unlock
= build_call_expr_loc (loc
, unlock
, 0);
6119 push_gimplify_context (&gctx
);
6121 block
= make_node (BLOCK
);
6122 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
), block
);
6124 tbody
= gimple_bind_body (bind
);
6125 gimplify_and_add (lock
, &tbody
);
6126 gimple_bind_set_body (bind
, tbody
);
6128 lower_omp (gimple_omp_body (stmt
), ctx
);
6129 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6130 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6131 gimple_omp_set_body (stmt
, NULL
);
6133 tbody
= gimple_bind_body (bind
);
6134 gimplify_and_add (unlock
, &tbody
);
6135 gimple_bind_set_body (bind
, tbody
);
6137 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6139 pop_gimplify_context (bind
);
6140 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6141 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6142 gsi_replace (gsi_p
, bind
, true);
6146 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6147 for a lastprivate clause. Given a loop control predicate of (V
6148 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6149 is appended to *DLIST, iterator initialization is appended to
6153 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
6154 gimple_seq
*dlist
, struct omp_context
*ctx
)
6156 tree clauses
, cond
, vinit
;
6157 enum tree_code cond_code
;
6160 cond_code
= fd
->loop
.cond_code
;
6161 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
6163 /* When possible, use a strict equality expression. This can let VRP
6164 type optimizations deduce the value and remove a copy. */
6165 if (host_integerp (fd
->loop
.step
, 0))
6167 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
6168 if (step
== 1 || step
== -1)
6169 cond_code
= EQ_EXPR
;
6172 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
6174 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
6176 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
6177 if (!gimple_seq_empty_p (stmts
))
6179 gimple_seq_add_seq (&stmts
, *dlist
);
6182 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6183 vinit
= fd
->loop
.n1
;
6184 if (cond_code
== EQ_EXPR
6185 && host_integerp (fd
->loop
.n2
, 0)
6186 && ! integer_zerop (fd
->loop
.n2
))
6187 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
6189 /* Initialize the iterator variable, so that threads that don't execute
6190 any iterations don't execute the lastprivate clauses by accident. */
6191 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
6196 /* Lower code for an OpenMP loop directive. */
6199 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6202 struct omp_for_data fd
;
6203 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
6204 gimple_seq omp_for_body
, body
, dlist
;
6206 struct gimplify_ctx gctx
;
6208 push_gimplify_context (&gctx
);
6210 lower_omp (gimple_omp_for_pre_body (stmt
), ctx
);
6211 lower_omp (gimple_omp_body (stmt
), ctx
);
6213 block
= make_node (BLOCK
);
6214 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6216 /* Move declaration of temporaries in the loop body before we make
6218 omp_for_body
= gimple_omp_body (stmt
);
6219 if (!gimple_seq_empty_p (omp_for_body
)
6220 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
6222 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
6223 gimple_bind_append_vars (new_stmt
, vars
);
6226 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6229 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
6230 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
6232 /* Lower the header expressions. At this point, we can assume that
6233 the header is of the form:
6235 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6237 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6238 using the .omp_data_s mapping, if needed. */
6239 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
6241 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
6242 if (!is_gimple_min_invariant (*rhs_p
))
6243 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6245 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
6246 if (!is_gimple_min_invariant (*rhs_p
))
6247 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6249 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
6250 if (!is_gimple_min_invariant (*rhs_p
))
6251 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6254 /* Once lowered, extract the bounds and clauses. */
6255 extract_omp_for_data (stmt
, &fd
, NULL
);
6257 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
6259 gimple_seq_add_stmt (&body
, stmt
);
6260 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
6262 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
6265 /* After the loop, add exit clauses. */
6266 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
6267 gimple_seq_add_seq (&body
, dlist
);
6269 body
= maybe_catch_exception (body
);
6271 /* Region exit marker goes at the end of the loop body. */
6272 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
6274 pop_gimplify_context (new_stmt
);
6276 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6277 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
6278 if (BLOCK_VARS (block
))
6279 TREE_USED (block
) = 1;
6281 gimple_bind_set_body (new_stmt
, body
);
6282 gimple_omp_set_body (stmt
, NULL
);
6283 gimple_omp_for_set_pre_body (stmt
, NULL
);
6284 gsi_replace (gsi_p
, new_stmt
, true);
6287 /* Callback for walk_stmts. Check if the current statement only contains
6288 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6291 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
6292 bool *handled_ops_p
,
6293 struct walk_stmt_info
*wi
)
6295 int *info
= (int *) wi
->info
;
6296 gimple stmt
= gsi_stmt (*gsi_p
);
6298 *handled_ops_p
= true;
6299 switch (gimple_code (stmt
))
6303 case GIMPLE_OMP_FOR
:
6304 case GIMPLE_OMP_SECTIONS
:
6305 *info
= *info
== 0 ? 1 : -1;
6314 struct omp_taskcopy_context
6316 /* This field must be at the beginning, as we do "inheritance": Some
6317 callback functions for tree-inline.c (e.g., omp_copy_decl)
6318 receive a copy_body_data pointer that is up-casted to an
6319 omp_context pointer. */
6325 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
6327 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
6329 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
6330 return create_tmp_var (TREE_TYPE (var
), NULL
);
6336 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
6338 tree name
, new_fields
= NULL
, type
, f
;
6340 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6341 name
= DECL_NAME (TYPE_NAME (orig_type
));
6342 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
6343 TYPE_DECL
, name
, type
);
6344 TYPE_NAME (type
) = name
;
6346 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
6348 tree new_f
= copy_node (f
);
6349 DECL_CONTEXT (new_f
) = type
;
6350 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
6351 TREE_CHAIN (new_f
) = new_fields
;
6352 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6353 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6354 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
6357 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
6359 TYPE_FIELDS (type
) = nreverse (new_fields
);
6364 /* Create task copyfn. */
6367 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
6369 struct function
*child_cfun
;
6370 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
6371 tree record_type
, srecord_type
, bind
, list
;
6372 bool record_needs_remap
= false, srecord_needs_remap
= false;
6374 struct omp_taskcopy_context tcctx
;
6375 struct gimplify_ctx gctx
;
6376 location_t loc
= gimple_location (task_stmt
);
6378 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
6379 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6380 gcc_assert (child_cfun
->cfg
== NULL
);
6381 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
6383 /* Reset DECL_CONTEXT on function arguments. */
6384 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
6385 DECL_CONTEXT (t
) = child_fn
;
6387 /* Populate the function. */
6388 push_gimplify_context (&gctx
);
6389 current_function_decl
= child_fn
;
6391 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
6392 TREE_SIDE_EFFECTS (bind
) = 1;
6394 DECL_SAVED_TREE (child_fn
) = bind
;
6395 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
6397 /* Remap src and dst argument types if needed. */
6398 record_type
= ctx
->record_type
;
6399 srecord_type
= ctx
->srecord_type
;
6400 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
6401 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6403 record_needs_remap
= true;
6406 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
6407 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6409 srecord_needs_remap
= true;
6413 if (record_needs_remap
|| srecord_needs_remap
)
6415 memset (&tcctx
, '\0', sizeof (tcctx
));
6416 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
6417 tcctx
.cb
.dst_fn
= child_fn
;
6418 tcctx
.cb
.src_node
= cgraph_get_node (tcctx
.cb
.src_fn
);
6419 gcc_checking_assert (tcctx
.cb
.src_node
);
6420 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
6421 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
6422 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
6423 tcctx
.cb
.eh_lp_nr
= 0;
6424 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
6425 tcctx
.cb
.decl_map
= pointer_map_create ();
6428 if (record_needs_remap
)
6429 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
6430 if (srecord_needs_remap
)
6431 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
6434 tcctx
.cb
.decl_map
= NULL
;
6436 push_cfun (child_cfun
);
6438 arg
= DECL_ARGUMENTS (child_fn
);
6439 TREE_TYPE (arg
) = build_pointer_type (record_type
);
6440 sarg
= DECL_CHAIN (arg
);
6441 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
6443 /* First pass: initialize temporaries used in record_type and srecord_type
6444 sizes and field offsets. */
6445 if (tcctx
.cb
.decl_map
)
6446 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6447 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6451 decl
= OMP_CLAUSE_DECL (c
);
6452 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
6455 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6456 sf
= (tree
) n
->value
;
6457 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6458 src
= build_simple_mem_ref_loc (loc
, sarg
);
6459 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6460 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
6461 append_to_statement_list (t
, &list
);
6464 /* Second pass: copy shared var pointers and copy construct non-VLA
6465 firstprivate vars. */
6466 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6467 switch (OMP_CLAUSE_CODE (c
))
6469 case OMP_CLAUSE_SHARED
:
6470 decl
= OMP_CLAUSE_DECL (c
);
6471 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6474 f
= (tree
) n
->value
;
6475 if (tcctx
.cb
.decl_map
)
6476 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6477 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6478 sf
= (tree
) n
->value
;
6479 if (tcctx
.cb
.decl_map
)
6480 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6481 src
= build_simple_mem_ref_loc (loc
, sarg
);
6482 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6483 dst
= build_simple_mem_ref_loc (loc
, arg
);
6484 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6485 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6486 append_to_statement_list (t
, &list
);
6488 case OMP_CLAUSE_FIRSTPRIVATE
:
6489 decl
= OMP_CLAUSE_DECL (c
);
6490 if (is_variable_sized (decl
))
6492 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6495 f
= (tree
) n
->value
;
6496 if (tcctx
.cb
.decl_map
)
6497 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6498 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6501 sf
= (tree
) n
->value
;
6502 if (tcctx
.cb
.decl_map
)
6503 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6504 src
= build_simple_mem_ref_loc (loc
, sarg
);
6505 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6506 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
6507 src
= build_simple_mem_ref_loc (loc
, src
);
6511 dst
= build_simple_mem_ref_loc (loc
, arg
);
6512 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6513 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6514 append_to_statement_list (t
, &list
);
6516 case OMP_CLAUSE_PRIVATE
:
6517 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
6519 decl
= OMP_CLAUSE_DECL (c
);
6520 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6521 f
= (tree
) n
->value
;
6522 if (tcctx
.cb
.decl_map
)
6523 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6524 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6527 sf
= (tree
) n
->value
;
6528 if (tcctx
.cb
.decl_map
)
6529 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6530 src
= build_simple_mem_ref_loc (loc
, sarg
);
6531 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6532 if (use_pointer_for_field (decl
, NULL
))
6533 src
= build_simple_mem_ref_loc (loc
, src
);
6537 dst
= build_simple_mem_ref_loc (loc
, arg
);
6538 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6539 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6540 append_to_statement_list (t
, &list
);
6546 /* Last pass: handle VLA firstprivates. */
6547 if (tcctx
.cb
.decl_map
)
6548 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6549 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6553 decl
= OMP_CLAUSE_DECL (c
);
6554 if (!is_variable_sized (decl
))
6556 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6559 f
= (tree
) n
->value
;
6560 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6561 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
6562 ind
= DECL_VALUE_EXPR (decl
);
6563 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
6564 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
6565 n
= splay_tree_lookup (ctx
->sfield_map
,
6566 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6567 sf
= (tree
) n
->value
;
6568 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6569 src
= build_simple_mem_ref_loc (loc
, sarg
);
6570 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6571 src
= build_simple_mem_ref_loc (loc
, src
);
6572 dst
= build_simple_mem_ref_loc (loc
, arg
);
6573 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6574 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6575 append_to_statement_list (t
, &list
);
6576 n
= splay_tree_lookup (ctx
->field_map
,
6577 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6578 df
= (tree
) n
->value
;
6579 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
6580 ptr
= build_simple_mem_ref_loc (loc
, arg
);
6581 ptr
= build3 (COMPONENT_REF
, TREE_TYPE (df
), ptr
, df
, NULL
);
6582 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
6583 build_fold_addr_expr_loc (loc
, dst
));
6584 append_to_statement_list (t
, &list
);
6587 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
6588 append_to_statement_list (t
, &list
);
6590 if (tcctx
.cb
.decl_map
)
6591 pointer_map_destroy (tcctx
.cb
.decl_map
);
6592 pop_gimplify_context (NULL
);
6593 BIND_EXPR_BODY (bind
) = list
;
6595 current_function_decl
= ctx
->cb
.src_fn
;
6598 /* Lower the OpenMP parallel or task directive in the current statement
6599 in GSI_P. CTX holds context information for the directive. */
6602 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6606 gimple stmt
= gsi_stmt (*gsi_p
);
6607 gimple par_bind
, bind
;
6608 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
6609 struct gimplify_ctx gctx
;
6610 location_t loc
= gimple_location (stmt
);
6612 clauses
= gimple_omp_taskreg_clauses (stmt
);
6613 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
6614 par_body
= gimple_bind_body (par_bind
);
6615 child_fn
= ctx
->cb
.dst_fn
;
6616 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
6617 && !gimple_omp_parallel_combined_p (stmt
))
6619 struct walk_stmt_info wi
;
6622 memset (&wi
, 0, sizeof (wi
));
6625 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
6627 gimple_omp_parallel_set_combined_p (stmt
, true);
6629 if (ctx
->srecord_type
)
6630 create_task_copyfn (stmt
, ctx
);
6632 push_gimplify_context (&gctx
);
6636 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
6637 lower_omp (par_body
, ctx
);
6638 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
6639 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
6641 /* Declare all the variables created by mapping and the variables
6642 declared in the scope of the parallel body. */
6643 record_vars_into (ctx
->block_vars
, child_fn
);
6644 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
6646 if (ctx
->record_type
)
6649 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
6650 : ctx
->record_type
, ".omp_data_o");
6651 DECL_NAMELESS (ctx
->sender_decl
) = 1;
6652 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
6653 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
6658 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
6659 lower_send_shared_vars (&ilist
, &olist
, ctx
);
6661 /* Once all the expansions are done, sequence all the different
6662 fragments inside gimple_omp_body. */
6666 if (ctx
->record_type
)
6668 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6669 /* fixup_child_record_type might have changed receiver_decl's type. */
6670 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
6671 gimple_seq_add_stmt (&new_body
,
6672 gimple_build_assign (ctx
->receiver_decl
, t
));
6675 gimple_seq_add_seq (&new_body
, par_ilist
);
6676 gimple_seq_add_seq (&new_body
, par_body
);
6677 gimple_seq_add_seq (&new_body
, par_olist
);
6678 new_body
= maybe_catch_exception (new_body
);
6679 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
6680 gimple_omp_set_body (stmt
, new_body
);
6682 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
6683 gimple_bind_add_stmt (bind
, stmt
);
6686 gimple_seq_add_stmt (&ilist
, bind
);
6687 gimple_seq_add_seq (&ilist
, olist
);
6688 bind
= gimple_build_bind (NULL
, ilist
, NULL
);
6691 gsi_replace (gsi_p
, bind
, true);
6693 pop_gimplify_context (NULL
);
6696 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6697 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6698 of OpenMP context, but with task_shared_vars set. */
6701 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
6706 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6707 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
6710 if (task_shared_vars
6712 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
6715 /* If a global variable has been privatized, TREE_CONSTANT on
6716 ADDR_EXPR might be wrong. */
6717 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
6718 recompute_tree_invariant_for_addr_expr (t
);
6720 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
6725 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6727 gimple stmt
= gsi_stmt (*gsi_p
);
6728 struct walk_stmt_info wi
;
6730 if (gimple_has_location (stmt
))
6731 input_location
= gimple_location (stmt
);
6733 if (task_shared_vars
)
6734 memset (&wi
, '\0', sizeof (wi
));
6736 /* If we have issued syntax errors, avoid doing any heavy lifting.
6737 Just replace the OpenMP directives with a NOP to avoid
6738 confusing RTL expansion. */
6739 if (seen_error () && is_gimple_omp (stmt
))
6741 gsi_replace (gsi_p
, gimple_build_nop (), true);
6745 switch (gimple_code (stmt
))
6748 if ((ctx
|| task_shared_vars
)
6749 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
6750 ctx
? NULL
: &wi
, NULL
)
6751 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
6752 ctx
? NULL
: &wi
, NULL
)))
6753 gimple_regimplify_operands (stmt
, gsi_p
);
6756 lower_omp (gimple_catch_handler (stmt
), ctx
);
6758 case GIMPLE_EH_FILTER
:
6759 lower_omp (gimple_eh_filter_failure (stmt
), ctx
);
6762 lower_omp (gimple_try_eval (stmt
), ctx
);
6763 lower_omp (gimple_try_cleanup (stmt
), ctx
);
6766 lower_omp (gimple_bind_body (stmt
), ctx
);
6768 case GIMPLE_OMP_PARALLEL
:
6769 case GIMPLE_OMP_TASK
:
6770 ctx
= maybe_lookup_ctx (stmt
);
6771 lower_omp_taskreg (gsi_p
, ctx
);
6773 case GIMPLE_OMP_FOR
:
6774 ctx
= maybe_lookup_ctx (stmt
);
6776 lower_omp_for (gsi_p
, ctx
);
6778 case GIMPLE_OMP_SECTIONS
:
6779 ctx
= maybe_lookup_ctx (stmt
);
6781 lower_omp_sections (gsi_p
, ctx
);
6783 case GIMPLE_OMP_SINGLE
:
6784 ctx
= maybe_lookup_ctx (stmt
);
6786 lower_omp_single (gsi_p
, ctx
);
6788 case GIMPLE_OMP_MASTER
:
6789 ctx
= maybe_lookup_ctx (stmt
);
6791 lower_omp_master (gsi_p
, ctx
);
6793 case GIMPLE_OMP_ORDERED
:
6794 ctx
= maybe_lookup_ctx (stmt
);
6796 lower_omp_ordered (gsi_p
, ctx
);
6798 case GIMPLE_OMP_CRITICAL
:
6799 ctx
= maybe_lookup_ctx (stmt
);
6801 lower_omp_critical (gsi_p
, ctx
);
6803 case GIMPLE_OMP_ATOMIC_LOAD
:
6804 if ((ctx
|| task_shared_vars
)
6805 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
6806 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
6807 gimple_regimplify_operands (stmt
, gsi_p
);
6810 if ((ctx
|| task_shared_vars
)
6811 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
6813 gimple_regimplify_operands (stmt
, gsi_p
);
6819 lower_omp (gimple_seq body
, omp_context
*ctx
)
6821 location_t saved_location
= input_location
;
6822 gimple_stmt_iterator gsi
= gsi_start (body
);
6823 for (gsi
= gsi_start (body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6824 lower_omp_1 (&gsi
, ctx
);
6825 input_location
= saved_location
;
6828 /* Main entry point. */
6831 execute_lower_omp (void)
6835 /* This pass always runs, to provide PROP_gimple_lomp.
6836 But there is nothing to do unless -fopenmp is given. */
6837 if (flag_openmp
== 0)
6840 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
6841 delete_omp_context
);
6843 body
= gimple_body (current_function_decl
);
6844 scan_omp (body
, NULL
);
6845 gcc_assert (taskreg_nesting_level
== 0);
6847 if (all_contexts
->root
)
6849 struct gimplify_ctx gctx
;
6851 if (task_shared_vars
)
6852 push_gimplify_context (&gctx
);
6853 lower_omp (body
, NULL
);
6854 if (task_shared_vars
)
6855 pop_gimplify_context (NULL
);
6860 splay_tree_delete (all_contexts
);
6861 all_contexts
= NULL
;
6863 BITMAP_FREE (task_shared_vars
);
6867 struct gimple_opt_pass pass_lower_omp
=
6871 "omplower", /* name */
6873 execute_lower_omp
, /* execute */
6876 0, /* static_pass_number */
6877 TV_NONE
, /* tv_id */
6878 PROP_gimple_any
, /* properties_required */
6879 PROP_gimple_lomp
, /* properties_provided */
6880 0, /* properties_destroyed */
6881 0, /* todo_flags_start */
6882 0 /* todo_flags_finish */
6886 /* The following is a utility to diagnose OpenMP structured block violations.
6887 It is not part of the "omplower" pass, as that's invoked too late. It
6888 should be invoked by the respective front ends after gimplification. */
6890 static splay_tree all_labels
;
6892 /* Check for mismatched contexts and generate an error if needed. Return
6893 true if an error is detected. */
6896 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
6897 gimple branch_ctx
, gimple label_ctx
)
6899 if (label_ctx
== branch_ctx
)
6904 Previously we kept track of the label's entire context in diagnose_sb_[12]
6905 so we could traverse it and issue a correct "exit" or "enter" error
6906 message upon a structured block violation.
6908 We built the context by building a list with tree_cons'ing, but there is
6909 no easy counterpart in gimple tuples. It seems like far too much work
6910 for issuing exit/enter error messages. If someone really misses the
6911 distinct error message... patches welcome.
6915 /* Try to avoid confusing the user by producing and error message
6916 with correct "exit" or "enter" verbiage. We prefer "exit"
6917 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6918 if (branch_ctx
== NULL
)
6924 if (TREE_VALUE (label_ctx
) == branch_ctx
)
6929 label_ctx
= TREE_CHAIN (label_ctx
);
6934 error ("invalid exit from OpenMP structured block");
6936 error ("invalid entry to OpenMP structured block");
6939 /* If it's obvious we have an invalid entry, be specific about the error. */
6940 if (branch_ctx
== NULL
)
6941 error ("invalid entry to OpenMP structured block");
6943 /* Otherwise, be vague and lazy, but efficient. */
6944 error ("invalid branch to/from an OpenMP structured block");
6946 gsi_replace (gsi_p
, gimple_build_nop (), false);
6950 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6951 where each label is found. */
6954 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
6955 struct walk_stmt_info
*wi
)
6957 gimple context
= (gimple
) wi
->info
;
6958 gimple inner_context
;
6959 gimple stmt
= gsi_stmt (*gsi_p
);
6961 *handled_ops_p
= true;
6963 switch (gimple_code (stmt
))
6967 case GIMPLE_OMP_PARALLEL
:
6968 case GIMPLE_OMP_TASK
:
6969 case GIMPLE_OMP_SECTIONS
:
6970 case GIMPLE_OMP_SINGLE
:
6971 case GIMPLE_OMP_SECTION
:
6972 case GIMPLE_OMP_MASTER
:
6973 case GIMPLE_OMP_ORDERED
:
6974 case GIMPLE_OMP_CRITICAL
:
6975 /* The minimal context here is just the current OMP construct. */
6976 inner_context
= stmt
;
6977 wi
->info
= inner_context
;
6978 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6982 case GIMPLE_OMP_FOR
:
6983 inner_context
= stmt
;
6984 wi
->info
= inner_context
;
6985 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6987 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
6988 diagnose_sb_1
, NULL
, wi
);
6989 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6994 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
6995 (splay_tree_value
) context
);
7005 /* Pass 2: Check each branch and see if its context differs from that of
7006 the destination label's context. */
7009 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
7010 struct walk_stmt_info
*wi
)
7012 gimple context
= (gimple
) wi
->info
;
7014 gimple stmt
= gsi_stmt (*gsi_p
);
7016 *handled_ops_p
= true;
7018 switch (gimple_code (stmt
))
7022 case GIMPLE_OMP_PARALLEL
:
7023 case GIMPLE_OMP_TASK
:
7024 case GIMPLE_OMP_SECTIONS
:
7025 case GIMPLE_OMP_SINGLE
:
7026 case GIMPLE_OMP_SECTION
:
7027 case GIMPLE_OMP_MASTER
:
7028 case GIMPLE_OMP_ORDERED
:
7029 case GIMPLE_OMP_CRITICAL
:
7031 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
7035 case GIMPLE_OMP_FOR
:
7037 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7039 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
7040 diagnose_sb_2
, NULL
, wi
);
7041 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
7047 tree lab
= gimple_cond_true_label (stmt
);
7050 n
= splay_tree_lookup (all_labels
,
7051 (splay_tree_key
) lab
);
7052 diagnose_sb_0 (gsi_p
, context
,
7053 n
? (gimple
) n
->value
: NULL
);
7055 lab
= gimple_cond_false_label (stmt
);
7058 n
= splay_tree_lookup (all_labels
,
7059 (splay_tree_key
) lab
);
7060 diagnose_sb_0 (gsi_p
, context
,
7061 n
? (gimple
) n
->value
: NULL
);
7068 tree lab
= gimple_goto_dest (stmt
);
7069 if (TREE_CODE (lab
) != LABEL_DECL
)
7072 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7073 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
7080 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
7082 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
7083 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7084 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
7091 diagnose_sb_0 (gsi_p
, context
, NULL
);
7102 diagnose_omp_structured_block_errors (void)
7104 struct walk_stmt_info wi
;
7105 gimple_seq body
= gimple_body (current_function_decl
);
7107 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
7109 memset (&wi
, 0, sizeof (wi
));
7110 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
7112 memset (&wi
, 0, sizeof (wi
));
7113 wi
.want_locations
= true;
7114 walk_gimple_seq (body
, diagnose_sb_2
, NULL
, &wi
);
7116 splay_tree_delete (all_labels
);
7123 gate_diagnose_omp_blocks (void)
7125 return flag_openmp
!= 0;
7128 struct gimple_opt_pass pass_diagnose_omp_blocks
=
7132 "*diagnose_omp_blocks", /* name */
7133 gate_diagnose_omp_blocks
, /* gate */
7134 diagnose_omp_structured_block_errors
, /* execute */
7137 0, /* static_pass_number */
7138 TV_NONE
, /* tv_id */
7139 PROP_gimple_any
, /* properties_required */
7140 0, /* properties_provided */
7141 0, /* properties_destroyed */
7142 0, /* todo_flags_start */
7143 0, /* todo_flags_finish */
7147 #include "gt-omp-low.h"