1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
42 #include "tree-pass.h"
45 #include "splay-tree.h"
50 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
54 re-gimplifying things when variables have been replaced with complex
57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
61 /* Context structure. Used to store information about each parallel
62 directive in the code. */
64 typedef struct omp_context
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context
*outer
;
76 /* Map variables to fields in a structure that allows communication
77 between sending and receiving threads. */
83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map
;
91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
95 /* What to do with variables with implicitly determined sharing
97 enum omp_clause_default_kind default_kind
;
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
104 /* True if this parallel directive is nested within another. */
109 struct omp_for_data_loop
111 tree v
, n1
, n2
, step
;
112 enum tree_code cond_code
;
115 /* A structure describing the main elements of a parallel loop. */
119 struct omp_for_data_loop loop
;
124 bool have_nowait
, have_ordered
;
125 enum omp_clause_schedule_kind sched_kind
;
126 struct omp_for_data_loop
*loops
;
130 static splay_tree all_contexts
;
131 static int taskreg_nesting_level
;
132 struct omp_region
*root_omp_region
;
133 static bitmap task_shared_vars
;
135 static void scan_omp (gimple_seq
, omp_context
*);
136 static tree
scan_omp_1_op (tree
*, int *, void *);
138 #define WALK_SUBSTMTS \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
150 scan_omp_op (tree
*tp
, omp_context
*ctx
)
152 struct walk_stmt_info wi
;
154 memset (&wi
, 0, sizeof (wi
));
156 wi
.want_locations
= true;
158 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
161 static void lower_omp (gimple_seq
, omp_context
*);
162 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
163 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
168 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
170 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
171 if (OMP_CLAUSE_CODE (clauses
) == kind
)
177 /* Return true if CTX is for an omp parallel. */
180 is_parallel_ctx (omp_context
*ctx
)
182 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
186 /* Return true if CTX is for an omp task. */
189 is_task_ctx (omp_context
*ctx
)
191 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
195 /* Return true if CTX is for an omp parallel or omp task. */
198 is_taskreg_ctx (omp_context
*ctx
)
200 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
205 /* Return true if REGION is a combined parallel+workshare region. */
208 is_combined_parallel (struct omp_region
*region
)
210 return region
->is_combined_parallel
;
214 /* Extract the header elements of parallel loop FOR_STMT and store
218 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
219 struct omp_for_data_loop
*loops
)
221 tree t
, var
, *collapse_iter
, *collapse_count
;
222 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
223 struct omp_for_data_loop
*loop
;
225 struct omp_for_data_loop dummy_loop
;
226 location_t loc
= gimple_location (for_stmt
);
228 fd
->for_stmt
= for_stmt
;
230 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
231 if (fd
->collapse
> 1)
234 fd
->loops
= &fd
->loop
;
236 fd
->have_nowait
= fd
->have_ordered
= false;
237 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
238 fd
->chunk_size
= NULL_TREE
;
239 collapse_iter
= NULL
;
240 collapse_count
= NULL
;
242 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
243 switch (OMP_CLAUSE_CODE (t
))
245 case OMP_CLAUSE_NOWAIT
:
246 fd
->have_nowait
= true;
248 case OMP_CLAUSE_ORDERED
:
249 fd
->have_ordered
= true;
251 case OMP_CLAUSE_SCHEDULE
:
252 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
253 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
255 case OMP_CLAUSE_COLLAPSE
:
256 if (fd
->collapse
> 1)
258 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
259 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
271 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
272 gcc_assert (fd
->chunk_size
== NULL
);
274 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
275 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
276 gcc_assert (fd
->chunk_size
== NULL
);
277 else if (fd
->chunk_size
== NULL
)
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
284 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
285 ? integer_zero_node
: integer_one_node
;
288 for (i
= 0; i
< fd
->collapse
; i
++)
290 if (fd
->collapse
== 1)
292 else if (loops
!= NULL
)
298 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
299 gcc_assert (SSA_VAR_P (loop
->v
));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
302 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
303 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
305 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
306 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
307 switch (loop
->cond_code
)
313 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
314 loop
->n2
= fold_build2_loc (loc
,
315 POINTER_PLUS_EXPR
, TREE_TYPE (loop
->n2
),
316 loop
->n2
, size_one_node
);
318 loop
->n2
= fold_build2_loc (loc
,
319 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
320 build_int_cst (TREE_TYPE (loop
->n2
), 1));
321 loop
->cond_code
= LT_EXPR
;
324 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
325 loop
->n2
= fold_build2_loc (loc
,
326 POINTER_PLUS_EXPR
, TREE_TYPE (loop
->n2
),
327 loop
->n2
, size_int (-1));
329 loop
->n2
= fold_build2_loc (loc
,
330 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
331 build_int_cst (TREE_TYPE (loop
->n2
), 1));
332 loop
->cond_code
= GT_EXPR
;
338 t
= gimple_omp_for_incr (for_stmt
, i
);
339 gcc_assert (TREE_OPERAND (t
, 0) == var
);
340 switch (TREE_CODE (t
))
343 case POINTER_PLUS_EXPR
:
344 loop
->step
= TREE_OPERAND (t
, 1);
347 loop
->step
= TREE_OPERAND (t
, 1);
348 loop
->step
= fold_build1_loc (loc
,
349 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
356 if (iter_type
!= long_long_unsigned_type_node
)
358 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
359 iter_type
= long_long_unsigned_type_node
;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
361 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
362 >= TYPE_PRECISION (iter_type
))
366 if (loop
->cond_code
== LT_EXPR
)
367 n
= fold_build2_loc (loc
,
368 PLUS_EXPR
, TREE_TYPE (loop
->v
),
369 loop
->n2
, loop
->step
);
372 if (TREE_CODE (n
) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
374 iter_type
= long_long_unsigned_type_node
;
376 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
377 > TYPE_PRECISION (iter_type
))
381 if (loop
->cond_code
== LT_EXPR
)
384 n2
= fold_build2_loc (loc
,
385 PLUS_EXPR
, TREE_TYPE (loop
->v
),
386 loop
->n2
, loop
->step
);
390 n1
= fold_build2_loc (loc
,
391 MINUS_EXPR
, TREE_TYPE (loop
->v
),
392 loop
->n2
, loop
->step
);
395 if (TREE_CODE (n1
) != INTEGER_CST
396 || TREE_CODE (n2
) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
398 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
399 iter_type
= long_long_unsigned_type_node
;
403 if (collapse_count
&& *collapse_count
== NULL
)
405 if ((i
== 0 || count
!= NULL_TREE
)
406 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop
->n1
)
408 && TREE_CONSTANT (loop
->n2
)
409 && TREE_CODE (loop
->step
) == INTEGER_CST
)
411 tree itype
= TREE_TYPE (loop
->v
);
413 if (POINTER_TYPE_P (itype
))
415 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
416 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
417 t
= fold_build2_loc (loc
,
419 fold_convert_loc (loc
, itype
, loop
->step
), t
);
420 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
421 fold_convert_loc (loc
, itype
, loop
->n2
));
422 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
423 fold_convert_loc (loc
, itype
, loop
->n1
));
424 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
425 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
426 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
427 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
428 fold_convert_loc (loc
, itype
,
431 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
432 fold_convert_loc (loc
, itype
, loop
->step
));
433 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
434 if (count
!= NULL_TREE
)
435 count
= fold_build2_loc (loc
,
436 MULT_EXPR
, long_long_unsigned_type_node
,
440 if (TREE_CODE (count
) != INTEGER_CST
)
450 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
451 iter_type
= long_long_unsigned_type_node
;
453 iter_type
= long_integer_type_node
;
455 else if (collapse_iter
&& *collapse_iter
!= NULL
)
456 iter_type
= TREE_TYPE (*collapse_iter
);
457 fd
->iter_type
= iter_type
;
458 if (collapse_iter
&& *collapse_iter
== NULL
)
459 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
460 if (collapse_count
&& *collapse_count
== NULL
)
463 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
465 *collapse_count
= create_tmp_var (iter_type
, ".count");
468 if (fd
->collapse
> 1)
470 fd
->loop
.v
= *collapse_iter
;
471 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
472 fd
->loop
.n2
= *collapse_count
;
473 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
474 fd
->loop
.cond_code
= LT_EXPR
;
479 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
490 #pragma omp parallel for schedule (guided, i * 4)
495 # BLOCK 2 (PAR_ENTRY_BB)
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
503 #pragma omp for schedule (guided, D.1598)
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
522 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
524 struct omp_for_data fd
;
525 gimple ws_stmt
= last_stmt (ws_entry_bb
);
527 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
530 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
532 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
534 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
536 if (fd
.iter_type
!= long_integer_type_node
)
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
544 if (!is_gimple_min_invariant (fd
.loop
.n1
)
545 || !is_gimple_min_invariant (fd
.loop
.n2
)
546 || !is_gimple_min_invariant (fd
.loop
.step
)
547 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
554 /* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
558 static VEC(tree
,gc
) *
559 get_ws_args_for (gimple ws_stmt
)
562 location_t loc
= gimple_location (ws_stmt
);
563 VEC(tree
,gc
) *ws_args
;
565 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
567 struct omp_for_data fd
;
569 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
571 ws_args
= VEC_alloc (tree
, gc
, 3 + (fd
.chunk_size
!= 0));
573 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
574 VEC_quick_push (tree
, ws_args
, t
);
576 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
577 VEC_quick_push (tree
, ws_args
, t
);
579 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
580 VEC_quick_push (tree
, ws_args
, t
);
584 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
585 VEC_quick_push (tree
, ws_args
, t
);
590 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
592 /* Number of sections is equal to the number of edges from the
593 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
594 the exit of the sections region. */
595 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
596 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
597 ws_args
= VEC_alloc (tree
, gc
, 1);
598 VEC_quick_push (tree
, ws_args
, t
);
606 /* Discover whether REGION is a combined parallel+workshare region. */
609 determine_parallel_type (struct omp_region
*region
)
611 basic_block par_entry_bb
, par_exit_bb
;
612 basic_block ws_entry_bb
, ws_exit_bb
;
614 if (region
== NULL
|| region
->inner
== NULL
615 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
616 || region
->inner
->cont
== NULL
)
619 /* We only support parallel+for and parallel+sections. */
620 if (region
->type
!= GIMPLE_OMP_PARALLEL
621 || (region
->inner
->type
!= GIMPLE_OMP_FOR
622 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
625 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
626 WS_EXIT_BB -> PAR_EXIT_BB. */
627 par_entry_bb
= region
->entry
;
628 par_exit_bb
= region
->exit
;
629 ws_entry_bb
= region
->inner
->entry
;
630 ws_exit_bb
= region
->inner
->exit
;
632 if (single_succ (par_entry_bb
) == ws_entry_bb
633 && single_succ (ws_exit_bb
) == par_exit_bb
634 && workshare_safe_to_combine_p (ws_entry_bb
)
635 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
636 || (last_and_only_stmt (ws_entry_bb
)
637 && last_and_only_stmt (par_exit_bb
))))
639 gimple ws_stmt
= last_stmt (ws_entry_bb
);
641 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
643 /* If this is a combined parallel loop, we need to determine
644 whether or not to use the combined library calls. There
645 are two cases where we do not apply the transformation:
646 static loops and any kind of ordered loop. In the first
647 case, we already open code the loop so there is no need
648 to do anything else. In the latter case, the combined
649 parallel loop call would still need extra synchronization
650 to implement ordered semantics, so there would not be any
651 gain in using the combined call. */
652 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
653 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
655 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
656 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
658 region
->is_combined_parallel
= false;
659 region
->inner
->is_combined_parallel
= false;
664 region
->is_combined_parallel
= true;
665 region
->inner
->is_combined_parallel
= true;
666 region
->ws_args
= get_ws_args_for (ws_stmt
);
671 /* Return true if EXPR is variable sized. */
674 is_variable_sized (const_tree expr
)
676 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
679 /* Return true if DECL is a reference type. */
682 is_reference (tree decl
)
684 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
687 /* Lookup variables in the decl or field splay trees. The "maybe" form
688 allows for the variable form to not have been entered, otherwise we
689 assert that the variable must have been entered. */
692 lookup_decl (tree var
, omp_context
*ctx
)
695 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
700 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
703 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
704 return n
? *n
: NULL_TREE
;
708 lookup_field (tree var
, omp_context
*ctx
)
711 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
712 return (tree
) n
->value
;
716 lookup_sfield (tree var
, omp_context
*ctx
)
719 n
= splay_tree_lookup (ctx
->sfield_map
720 ? ctx
->sfield_map
: ctx
->field_map
,
721 (splay_tree_key
) var
);
722 return (tree
) n
->value
;
726 maybe_lookup_field (tree var
, omp_context
*ctx
)
729 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
730 return n
? (tree
) n
->value
: NULL_TREE
;
733 /* Return true if DECL should be copied by pointer. SHARED_CTX is
734 the parallel context if DECL is to be shared. */
737 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
739 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
742 /* We can only use copy-in/copy-out semantics for shared variables
743 when we know the value is not accessible from an outer scope. */
746 /* ??? Trivially accessible from anywhere. But why would we even
747 be passing an address in this case? Should we simply assert
748 this to be false, or should we have a cleanup pass that removes
749 these from the list of mappings? */
750 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
753 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
754 without analyzing the expression whether or not its location
755 is accessible to anyone else. In the case of nested parallel
756 regions it certainly may be. */
757 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
760 /* Do not use copy-in/copy-out for variables that have their
762 if (TREE_ADDRESSABLE (decl
))
765 /* Disallow copy-in/out in nested parallel if
766 decl is shared in outer parallel, otherwise
767 each thread could store the shared variable
768 in its own copy-in location, making the
769 variable no longer really shared. */
770 if (!TREE_READONLY (decl
) && shared_ctx
->is_nested
)
774 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
775 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
782 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
783 c
; c
= OMP_CLAUSE_CHAIN (c
))
784 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
785 && OMP_CLAUSE_DECL (c
) == decl
)
793 /* For tasks avoid using copy-in/out, unless they are readonly
794 (in which case just copy-in is used). As tasks can be
795 deferred or executed in different thread, when GOMP_task
796 returns, the task hasn't necessarily terminated. */
797 if (!TREE_READONLY (decl
) && is_task_ctx (shared_ctx
))
799 tree outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
800 if (is_gimple_reg (outer
))
802 /* Taking address of OUTER in lower_send_shared_vars
803 might need regimplification of everything that uses the
805 if (!task_shared_vars
)
806 task_shared_vars
= BITMAP_ALLOC (NULL
);
807 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
808 TREE_ADDRESSABLE (outer
) = 1;
817 /* Create a new VAR_DECL and copy information from VAR to it. */
820 copy_var_decl (tree var
, tree name
, tree type
)
822 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
824 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
825 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
826 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
827 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
828 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
829 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
830 TREE_USED (copy
) = 1;
831 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
836 /* Construct a new automatic decl similar to VAR. */
839 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
841 tree copy
= copy_var_decl (var
, name
, type
);
843 DECL_CONTEXT (copy
) = current_function_decl
;
844 DECL_CHAIN (copy
) = ctx
->block_vars
;
845 ctx
->block_vars
= copy
;
851 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
853 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
856 /* Build tree nodes to access the field for VAR on the receiver side. */
859 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
861 tree x
, field
= lookup_field (var
, ctx
);
863 /* If the receiver record type was remapped in the child function,
864 remap the field into the new record type. */
865 x
= maybe_lookup_field (field
, ctx
);
869 x
= build_simple_mem_ref (ctx
->receiver_decl
);
870 x
= build3 (COMPONENT_REF
, TREE_TYPE (field
), x
, field
, NULL
);
872 x
= build_simple_mem_ref (x
);
877 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
878 of a parallel, this is a component reference; for workshare constructs
879 this is some variable. */
882 build_outer_var_ref (tree var
, omp_context
*ctx
)
886 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
888 else if (is_variable_sized (var
))
890 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
891 x
= build_outer_var_ref (x
, ctx
);
892 x
= build_simple_mem_ref (x
);
894 else if (is_taskreg_ctx (ctx
))
896 bool by_ref
= use_pointer_for_field (var
, NULL
);
897 x
= build_receiver_ref (var
, by_ref
, ctx
);
900 x
= lookup_decl (var
, ctx
->outer
);
901 else if (is_reference (var
))
902 /* This can happen with orphaned constructs. If var is reference, it is
903 possible it is shared and as such valid. */
908 if (is_reference (var
))
909 x
= build_simple_mem_ref (x
);
914 /* Build tree nodes to access the field for VAR on the sender side. */
917 build_sender_ref (tree var
, omp_context
*ctx
)
919 tree field
= lookup_sfield (var
, ctx
);
920 return build3 (COMPONENT_REF
, TREE_TYPE (field
),
921 ctx
->sender_decl
, field
, NULL
);
924 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
927 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
929 tree field
, type
, sfield
= NULL_TREE
;
931 gcc_assert ((mask
& 1) == 0
932 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
933 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
934 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
936 type
= TREE_TYPE (var
);
938 type
= build_pointer_type (type
);
939 else if ((mask
& 3) == 1 && is_reference (var
))
940 type
= TREE_TYPE (type
);
942 field
= build_decl (DECL_SOURCE_LOCATION (var
),
943 FIELD_DECL
, DECL_NAME (var
), type
);
945 /* Remember what variable this field was created for. This does have a
946 side effect of making dwarf2out ignore this member, so for helpful
947 debugging we clear it later in delete_omp_context. */
948 DECL_ABSTRACT_ORIGIN (field
) = var
;
949 if (type
== TREE_TYPE (var
))
951 DECL_ALIGN (field
) = DECL_ALIGN (var
);
952 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
953 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
956 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
960 insert_field_into_struct (ctx
->record_type
, field
);
961 if (ctx
->srecord_type
)
963 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
964 FIELD_DECL
, DECL_NAME (var
), type
);
965 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
966 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
967 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
968 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
969 insert_field_into_struct (ctx
->srecord_type
, sfield
);
974 if (ctx
->srecord_type
== NULL_TREE
)
978 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
979 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
980 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
982 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
983 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
984 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
985 insert_field_into_struct (ctx
->srecord_type
, sfield
);
986 splay_tree_insert (ctx
->sfield_map
,
987 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
988 (splay_tree_value
) sfield
);
992 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
993 : ctx
->srecord_type
, field
);
997 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
998 (splay_tree_value
) field
);
999 if ((mask
& 2) && ctx
->sfield_map
)
1000 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
1001 (splay_tree_value
) sfield
);
1005 install_var_local (tree var
, omp_context
*ctx
)
1007 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1008 insert_decl_map (&ctx
->cb
, var
, new_var
);
1012 /* Adjust the replacement for DECL in CTX for the new context. This means
1013 copying the DECL_VALUE_EXPR, and fixing up the type. */
1016 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1018 tree new_decl
, size
;
1020 new_decl
= lookup_decl (decl
, ctx
);
1022 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1024 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1025 && DECL_HAS_VALUE_EXPR_P (decl
))
1027 tree ve
= DECL_VALUE_EXPR (decl
);
1028 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1029 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1030 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1033 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1035 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1036 if (size
== error_mark_node
)
1037 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1038 DECL_SIZE (new_decl
) = size
;
1040 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1041 if (size
== error_mark_node
)
1042 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1043 DECL_SIZE_UNIT (new_decl
) = size
;
1047 /* The callback for remap_decl. Search all containing contexts for a
1048 mapping of the variable; this avoids having to duplicate the splay
1049 tree ahead of time. We know a mapping doesn't already exist in the
1050 given context. Create new mappings to implement default semantics. */
1053 omp_copy_decl (tree var
, copy_body_data
*cb
)
1055 omp_context
*ctx
= (omp_context
*) cb
;
1058 if (TREE_CODE (var
) == LABEL_DECL
)
1060 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1061 DECL_CONTEXT (new_var
) = current_function_decl
;
1062 insert_decl_map (&ctx
->cb
, var
, new_var
);
1066 while (!is_taskreg_ctx (ctx
))
1071 new_var
= maybe_lookup_decl (var
, ctx
);
1076 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1079 return error_mark_node
;
1083 /* Return the parallel region associated with STMT. */
1085 /* Debugging dumps for parallel regions. */
1086 void dump_omp_region (FILE *, struct omp_region
*, int);
1087 void debug_omp_region (struct omp_region
*);
1088 void debug_all_omp_regions (void);
1090 /* Dump the parallel region tree rooted at REGION. */
1093 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1095 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1096 gimple_code_name
[region
->type
]);
1099 dump_omp_region (file
, region
->inner
, indent
+ 4);
1103 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1104 region
->cont
->index
);
1108 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1109 region
->exit
->index
);
1111 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1114 dump_omp_region (file
, region
->next
, indent
);
1118 debug_omp_region (struct omp_region
*region
)
1120 dump_omp_region (stderr
, region
, 0);
1124 debug_all_omp_regions (void)
1126 dump_omp_region (stderr
, root_omp_region
, 0);
1130 /* Create a new parallel region starting at STMT inside region PARENT. */
1133 new_omp_region (basic_block bb
, enum gimple_code type
,
1134 struct omp_region
*parent
)
1136 struct omp_region
*region
= XCNEW (struct omp_region
);
1138 region
->outer
= parent
;
1140 region
->type
= type
;
1144 /* This is a nested region. Add it to the list of inner
1145 regions in PARENT. */
1146 region
->next
= parent
->inner
;
1147 parent
->inner
= region
;
1151 /* This is a toplevel region. Add it to the list of toplevel
1152 regions in ROOT_OMP_REGION. */
1153 region
->next
= root_omp_region
;
1154 root_omp_region
= region
;
1160 /* Release the memory associated with the region tree rooted at REGION. */
1163 free_omp_region_1 (struct omp_region
*region
)
1165 struct omp_region
*i
, *n
;
1167 for (i
= region
->inner
; i
; i
= n
)
1170 free_omp_region_1 (i
);
1176 /* Release the memory for the entire omp region tree. */
1179 free_omp_regions (void)
1181 struct omp_region
*r
, *n
;
1182 for (r
= root_omp_region
; r
; r
= n
)
1185 free_omp_region_1 (r
);
1187 root_omp_region
= NULL
;
1191 /* Create a new context, with OUTER_CTX being the surrounding context. */
1193 static omp_context
*
1194 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1196 omp_context
*ctx
= XCNEW (omp_context
);
1198 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1199 (splay_tree_value
) ctx
);
1204 ctx
->outer
= outer_ctx
;
1205 ctx
->cb
= outer_ctx
->cb
;
1206 ctx
->cb
.block
= NULL
;
1207 ctx
->depth
= outer_ctx
->depth
+ 1;
1211 ctx
->cb
.src_fn
= current_function_decl
;
1212 ctx
->cb
.dst_fn
= current_function_decl
;
1213 ctx
->cb
.src_node
= cgraph_node (current_function_decl
);
1214 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1215 ctx
->cb
.src_cfun
= cfun
;
1216 ctx
->cb
.copy_decl
= omp_copy_decl
;
1217 ctx
->cb
.eh_lp_nr
= 0;
1218 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1222 ctx
->cb
.decl_map
= pointer_map_create ();
1227 static gimple_seq
maybe_catch_exception (gimple_seq
);
1229 /* Finalize task copyfn. */
1232 finalize_task_copyfn (gimple task_stmt
)
1234 struct function
*child_cfun
;
1235 tree child_fn
, old_fn
;
1236 gimple_seq seq
, new_seq
;
1239 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1240 if (child_fn
== NULL_TREE
)
1243 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
1247 = cfun
->curr_properties
;
1249 old_fn
= current_function_decl
;
1250 push_cfun (child_cfun
);
1251 current_function_decl
= child_fn
;
1252 bind
= gimplify_body (&DECL_SAVED_TREE (child_fn
), child_fn
, false);
1253 seq
= gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq
, bind
);
1255 new_seq
= maybe_catch_exception (seq
);
1258 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1259 seq
= gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq
, bind
);
1262 gimple_set_body (child_fn
, seq
);
1264 current_function_decl
= old_fn
;
1266 cgraph_add_new_function (child_fn
, false);
1269 /* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1273 delete_omp_context (splay_tree_value value
)
1275 omp_context
*ctx
= (omp_context
*) value
;
1277 pointer_map_destroy (ctx
->cb
.decl_map
);
1280 splay_tree_delete (ctx
->field_map
);
1281 if (ctx
->sfield_map
)
1282 splay_tree_delete (ctx
->sfield_map
);
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx
->record_type
)
1289 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1290 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1292 if (ctx
->srecord_type
)
1295 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1296 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1299 if (is_task_ctx (ctx
))
1300 finalize_task_copyfn (ctx
->stmt
);
1305 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1309 fixup_child_record_type (omp_context
*ctx
)
1311 tree f
, type
= ctx
->record_type
;
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1318 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1322 tree name
, new_fields
= NULL
;
1324 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1325 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1326 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1327 TYPE_DECL
, name
, type
);
1328 TYPE_NAME (type
) = name
;
1330 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1332 tree new_f
= copy_node (f
);
1333 DECL_CONTEXT (new_f
) = type
;
1334 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1335 DECL_CHAIN (new_f
) = new_fields
;
1336 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1337 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1339 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1346 (splay_tree_value
) new_f
);
1348 TYPE_FIELDS (type
) = nreverse (new_fields
);
1352 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1355 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1359 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1362 bool scan_array_reductions
= false;
1364 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1368 switch (OMP_CLAUSE_CODE (c
))
1370 case OMP_CLAUSE_PRIVATE
:
1371 decl
= OMP_CLAUSE_DECL (c
);
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1374 else if (!is_variable_sized (decl
))
1375 install_var_local (decl
, ctx
);
1378 case OMP_CLAUSE_SHARED
:
1379 gcc_assert (is_taskreg_ctx (ctx
));
1380 decl
= OMP_CLAUSE_DECL (c
);
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1382 || !is_variable_sized (decl
));
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1387 by_ref
= use_pointer_for_field (decl
, ctx
);
1388 if (! TREE_READONLY (decl
)
1389 || TREE_ADDRESSABLE (decl
)
1391 || is_reference (decl
))
1393 install_var_field (decl
, by_ref
, 3, ctx
);
1394 install_var_local (decl
, ctx
);
1397 /* We don't need to copy const scalar vars back. */
1398 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1401 case OMP_CLAUSE_LASTPRIVATE
:
1402 /* Let the corresponding firstprivate clause create
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1408 case OMP_CLAUSE_FIRSTPRIVATE
:
1409 case OMP_CLAUSE_REDUCTION
:
1410 decl
= OMP_CLAUSE_DECL (c
);
1412 if (is_variable_sized (decl
))
1414 if (is_task_ctx (ctx
))
1415 install_var_field (decl
, false, 1, ctx
);
1418 else if (is_taskreg_ctx (ctx
))
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1422 by_ref
= use_pointer_for_field (decl
, NULL
);
1424 if (is_task_ctx (ctx
)
1425 && (global
|| by_ref
|| is_reference (decl
)))
1427 install_var_field (decl
, false, 1, ctx
);
1429 install_var_field (decl
, by_ref
, 2, ctx
);
1432 install_var_field (decl
, by_ref
, 3, ctx
);
1434 install_var_local (decl
, ctx
);
1437 case OMP_CLAUSE_COPYPRIVATE
:
1438 case OMP_CLAUSE_COPYIN
:
1439 decl
= OMP_CLAUSE_DECL (c
);
1440 by_ref
= use_pointer_for_field (decl
, NULL
);
1441 install_var_field (decl
, by_ref
, 3, ctx
);
1444 case OMP_CLAUSE_DEFAULT
:
1445 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1449 case OMP_CLAUSE_NUM_THREADS
:
1450 case OMP_CLAUSE_SCHEDULE
:
1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1455 case OMP_CLAUSE_NOWAIT
:
1456 case OMP_CLAUSE_ORDERED
:
1457 case OMP_CLAUSE_COLLAPSE
:
1458 case OMP_CLAUSE_UNTIED
:
1466 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1468 switch (OMP_CLAUSE_CODE (c
))
1470 case OMP_CLAUSE_LASTPRIVATE
:
1471 /* Let the corresponding firstprivate clause create
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1474 scan_array_reductions
= true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1479 case OMP_CLAUSE_PRIVATE
:
1480 case OMP_CLAUSE_FIRSTPRIVATE
:
1481 case OMP_CLAUSE_REDUCTION
:
1482 decl
= OMP_CLAUSE_DECL (c
);
1483 if (is_variable_sized (decl
))
1484 install_var_local (decl
, ctx
);
1485 fixup_remapped_decl (decl
, ctx
,
1486 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1488 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1490 scan_array_reductions
= true;
1493 case OMP_CLAUSE_SHARED
:
1494 decl
= OMP_CLAUSE_DECL (c
);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1496 fixup_remapped_decl (decl
, ctx
, false);
1499 case OMP_CLAUSE_COPYPRIVATE
:
1500 case OMP_CLAUSE_COPYIN
:
1501 case OMP_CLAUSE_DEFAULT
:
1503 case OMP_CLAUSE_NUM_THREADS
:
1504 case OMP_CLAUSE_SCHEDULE
:
1505 case OMP_CLAUSE_NOWAIT
:
1506 case OMP_CLAUSE_ORDERED
:
1507 case OMP_CLAUSE_COLLAPSE
:
1508 case OMP_CLAUSE_UNTIED
:
1516 if (scan_array_reductions
)
1517 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1518 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1519 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1524 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1525 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1526 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1529 /* Create a new name for omp child function. Returns an identifier. */
1531 static GTY(()) unsigned int tmp_ompfn_id_num
;
1534 create_omp_child_function_name (bool task_copy
)
1536 return (clone_function_name (current_function_decl
,
1537 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1540 /* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1544 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1546 tree decl
, type
, name
, t
;
1548 name
= create_omp_child_function_name (task_copy
);
1550 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1551 ptr_type_node
, NULL_TREE
);
1553 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1555 decl
= build_decl (gimple_location (ctx
->stmt
),
1556 FUNCTION_DECL
, name
, type
);
1559 ctx
->cb
.dst_fn
= decl
;
1561 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1563 TREE_STATIC (decl
) = 1;
1564 TREE_USED (decl
) = 1;
1565 DECL_ARTIFICIAL (decl
) = 1;
1566 DECL_NAMELESS (decl
) = 1;
1567 DECL_IGNORED_P (decl
) = 0;
1568 TREE_PUBLIC (decl
) = 0;
1569 DECL_UNINLINABLE (decl
) = 1;
1570 DECL_EXTERNAL (decl
) = 0;
1571 DECL_CONTEXT (decl
) = NULL_TREE
;
1572 DECL_INITIAL (decl
) = make_node (BLOCK
);
1574 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1575 RESULT_DECL
, NULL_TREE
, void_type_node
);
1576 DECL_ARTIFICIAL (t
) = 1;
1577 DECL_IGNORED_P (t
) = 1;
1578 DECL_CONTEXT (t
) = decl
;
1579 DECL_RESULT (decl
) = t
;
1581 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1582 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1583 DECL_ARTIFICIAL (t
) = 1;
1584 DECL_NAMELESS (t
) = 1;
1585 DECL_ARG_TYPE (t
) = ptr_type_node
;
1586 DECL_CONTEXT (t
) = current_function_decl
;
1588 DECL_ARGUMENTS (decl
) = t
;
1590 ctx
->receiver_decl
= t
;
1593 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1594 PARM_DECL
, get_identifier (".omp_data_o"),
1596 DECL_ARTIFICIAL (t
) = 1;
1597 DECL_NAMELESS (t
) = 1;
1598 DECL_ARG_TYPE (t
) = ptr_type_node
;
1599 DECL_CONTEXT (t
) = current_function_decl
;
1601 TREE_ADDRESSABLE (t
) = 1;
1602 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1603 DECL_ARGUMENTS (decl
) = t
;
1606 /* Allocate memory for the function structure. The call to
1607 allocate_struct_function clobbers CFUN, so we need to restore
1609 push_struct_function (decl
);
1610 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1615 /* Scan an OpenMP parallel directive. */
1618 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1622 gimple stmt
= gsi_stmt (*gsi
);
1624 /* Ignore parallel directives with empty bodies, unless there
1625 are copyin clauses. */
1627 && empty_body_p (gimple_omp_body (stmt
))
1628 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1629 OMP_CLAUSE_COPYIN
) == NULL
)
1631 gsi_replace (gsi
, gimple_build_nop (), false);
1635 ctx
= new_omp_context (stmt
, outer_ctx
);
1636 if (taskreg_nesting_level
> 1)
1637 ctx
->is_nested
= true;
1638 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1639 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1640 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1641 name
= create_tmp_var_name (".omp_data_s");
1642 name
= build_decl (gimple_location (stmt
),
1643 TYPE_DECL
, name
, ctx
->record_type
);
1644 DECL_ARTIFICIAL (name
) = 1;
1645 DECL_NAMELESS (name
) = 1;
1646 TYPE_NAME (ctx
->record_type
) = name
;
1647 create_omp_child_function (ctx
, false);
1648 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1650 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1651 scan_omp (gimple_omp_body (stmt
), ctx
);
1653 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1654 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1657 layout_type (ctx
->record_type
);
1658 fixup_child_record_type (ctx
);
1662 /* Scan an OpenMP task directive. */
1665 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1669 gimple stmt
= gsi_stmt (*gsi
);
1670 location_t loc
= gimple_location (stmt
);
1672 /* Ignore task directives with empty bodies. */
1674 && empty_body_p (gimple_omp_body (stmt
)))
1676 gsi_replace (gsi
, gimple_build_nop (), false);
1680 ctx
= new_omp_context (stmt
, outer_ctx
);
1681 if (taskreg_nesting_level
> 1)
1682 ctx
->is_nested
= true;
1683 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1684 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1685 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1686 name
= create_tmp_var_name (".omp_data_s");
1687 name
= build_decl (gimple_location (stmt
),
1688 TYPE_DECL
, name
, ctx
->record_type
);
1689 DECL_ARTIFICIAL (name
) = 1;
1690 DECL_NAMELESS (name
) = 1;
1691 TYPE_NAME (ctx
->record_type
) = name
;
1692 create_omp_child_function (ctx
, false);
1693 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1695 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1697 if (ctx
->srecord_type
)
1699 name
= create_tmp_var_name (".omp_data_a");
1700 name
= build_decl (gimple_location (stmt
),
1701 TYPE_DECL
, name
, ctx
->srecord_type
);
1702 DECL_ARTIFICIAL (name
) = 1;
1703 DECL_NAMELESS (name
) = 1;
1704 TYPE_NAME (ctx
->srecord_type
) = name
;
1705 create_omp_child_function (ctx
, true);
1708 scan_omp (gimple_omp_body (stmt
), ctx
);
1710 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1712 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1713 t
= build_int_cst (long_integer_type_node
, 0);
1714 gimple_omp_task_set_arg_size (stmt
, t
);
1715 t
= build_int_cst (long_integer_type_node
, 1);
1716 gimple_omp_task_set_arg_align (stmt
, t
);
1720 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1721 /* Move VLA fields to the end. */
1722 p
= &TYPE_FIELDS (ctx
->record_type
);
1724 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1725 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1728 *p
= TREE_CHAIN (*p
);
1729 TREE_CHAIN (*q
) = NULL_TREE
;
1730 q
= &TREE_CHAIN (*q
);
1733 p
= &DECL_CHAIN (*p
);
1735 layout_type (ctx
->record_type
);
1736 fixup_child_record_type (ctx
);
1737 if (ctx
->srecord_type
)
1738 layout_type (ctx
->srecord_type
);
1739 t
= fold_convert_loc (loc
, long_integer_type_node
,
1740 TYPE_SIZE_UNIT (ctx
->record_type
));
1741 gimple_omp_task_set_arg_size (stmt
, t
);
1742 t
= build_int_cst (long_integer_type_node
,
1743 TYPE_ALIGN_UNIT (ctx
->record_type
));
1744 gimple_omp_task_set_arg_align (stmt
, t
);
1749 /* Scan an OpenMP loop directive. */
1752 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1757 ctx
= new_omp_context (stmt
, outer_ctx
);
1759 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1761 scan_omp (gimple_omp_for_pre_body (stmt
), ctx
);
1762 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1764 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1765 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1766 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1767 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1769 scan_omp (gimple_omp_body (stmt
), ctx
);
1772 /* Scan an OpenMP sections directive. */
1775 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1779 ctx
= new_omp_context (stmt
, outer_ctx
);
1780 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1781 scan_omp (gimple_omp_body (stmt
), ctx
);
1784 /* Scan an OpenMP single directive. */
1787 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1792 ctx
= new_omp_context (stmt
, outer_ctx
);
1793 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1794 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1795 name
= create_tmp_var_name (".omp_copy_s");
1796 name
= build_decl (gimple_location (stmt
),
1797 TYPE_DECL
, name
, ctx
->record_type
);
1798 TYPE_NAME (ctx
->record_type
) = name
;
1800 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1801 scan_omp (gimple_omp_body (stmt
), ctx
);
1803 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1804 ctx
->record_type
= NULL
;
1806 layout_type (ctx
->record_type
);
1810 /* Check OpenMP nesting restrictions. */
1812 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1814 switch (gimple_code (stmt
))
1816 case GIMPLE_OMP_FOR
:
1817 case GIMPLE_OMP_SECTIONS
:
1818 case GIMPLE_OMP_SINGLE
:
1820 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1821 switch (gimple_code (ctx
->stmt
))
1823 case GIMPLE_OMP_FOR
:
1824 case GIMPLE_OMP_SECTIONS
:
1825 case GIMPLE_OMP_SINGLE
:
1826 case GIMPLE_OMP_ORDERED
:
1827 case GIMPLE_OMP_MASTER
:
1828 case GIMPLE_OMP_TASK
:
1829 if (is_gimple_call (stmt
))
1831 warning (0, "barrier region may not be closely nested inside "
1832 "of work-sharing, critical, ordered, master or "
1833 "explicit task region");
1836 warning (0, "work-sharing region may not be closely nested inside "
1837 "of work-sharing, critical, ordered, master or explicit "
1840 case GIMPLE_OMP_PARALLEL
:
1846 case GIMPLE_OMP_MASTER
:
1847 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1848 switch (gimple_code (ctx
->stmt
))
1850 case GIMPLE_OMP_FOR
:
1851 case GIMPLE_OMP_SECTIONS
:
1852 case GIMPLE_OMP_SINGLE
:
1853 case GIMPLE_OMP_TASK
:
1854 warning (0, "master region may not be closely nested inside "
1855 "of work-sharing or explicit task region");
1857 case GIMPLE_OMP_PARALLEL
:
1863 case GIMPLE_OMP_ORDERED
:
1864 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1865 switch (gimple_code (ctx
->stmt
))
1867 case GIMPLE_OMP_CRITICAL
:
1868 case GIMPLE_OMP_TASK
:
1869 warning (0, "ordered region may not be closely nested inside "
1870 "of critical or explicit task region");
1872 case GIMPLE_OMP_FOR
:
1873 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1874 OMP_CLAUSE_ORDERED
) == NULL
)
1875 warning (0, "ordered region must be closely nested inside "
1876 "a loop region with an ordered clause");
1878 case GIMPLE_OMP_PARALLEL
:
1884 case GIMPLE_OMP_CRITICAL
:
1885 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1886 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1887 && (gimple_omp_critical_name (stmt
)
1888 == gimple_omp_critical_name (ctx
->stmt
)))
1890 warning (0, "critical region may not be nested inside a critical "
1891 "region with the same name");
1901 /* Helper function scan_omp.
1903 Callback for walk_tree or operators in walk_gimple_stmt used to
1904 scan for OpenMP directives in TP. */
1907 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1909 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1910 omp_context
*ctx
= (omp_context
*) wi
->info
;
1913 switch (TREE_CODE (t
))
1920 *tp
= remap_decl (t
, &ctx
->cb
);
1924 if (ctx
&& TYPE_P (t
))
1925 *tp
= remap_type (t
, &ctx
->cb
);
1926 else if (!DECL_P (t
))
1931 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
1932 if (tem
!= TREE_TYPE (t
))
1934 if (TREE_CODE (t
) == INTEGER_CST
)
1935 *tp
= build_int_cst_wide (tem
,
1936 TREE_INT_CST_LOW (t
),
1937 TREE_INT_CST_HIGH (t
));
1939 TREE_TYPE (t
) = tem
;
1950 /* Helper function for scan_omp.
1952 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1953 the current statement in GSI. */
1956 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1957 struct walk_stmt_info
*wi
)
1959 gimple stmt
= gsi_stmt (*gsi
);
1960 omp_context
*ctx
= (omp_context
*) wi
->info
;
1962 if (gimple_has_location (stmt
))
1963 input_location
= gimple_location (stmt
);
1965 /* Check the OpenMP nesting restrictions. */
1968 if (is_gimple_omp (stmt
))
1969 check_omp_nesting_restrictions (stmt
, ctx
);
1970 else if (is_gimple_call (stmt
))
1972 tree fndecl
= gimple_call_fndecl (stmt
);
1973 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
1974 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
1975 check_omp_nesting_restrictions (stmt
, ctx
);
1979 *handled_ops_p
= true;
1981 switch (gimple_code (stmt
))
1983 case GIMPLE_OMP_PARALLEL
:
1984 taskreg_nesting_level
++;
1985 scan_omp_parallel (gsi
, ctx
);
1986 taskreg_nesting_level
--;
1989 case GIMPLE_OMP_TASK
:
1990 taskreg_nesting_level
++;
1991 scan_omp_task (gsi
, ctx
);
1992 taskreg_nesting_level
--;
1995 case GIMPLE_OMP_FOR
:
1996 scan_omp_for (stmt
, ctx
);
1999 case GIMPLE_OMP_SECTIONS
:
2000 scan_omp_sections (stmt
, ctx
);
2003 case GIMPLE_OMP_SINGLE
:
2004 scan_omp_single (stmt
, ctx
);
2007 case GIMPLE_OMP_SECTION
:
2008 case GIMPLE_OMP_MASTER
:
2009 case GIMPLE_OMP_ORDERED
:
2010 case GIMPLE_OMP_CRITICAL
:
2011 ctx
= new_omp_context (stmt
, ctx
);
2012 scan_omp (gimple_omp_body (stmt
), ctx
);
2019 *handled_ops_p
= false;
2021 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2022 insert_decl_map (&ctx
->cb
, var
, var
);
2026 *handled_ops_p
= false;
2034 /* Scan all the statements starting at the current statement. CTX
2035 contains context information about the OpenMP directives and
2036 clauses found during the scan. */
2039 scan_omp (gimple_seq body
, omp_context
*ctx
)
2041 location_t saved_location
;
2042 struct walk_stmt_info wi
;
2044 memset (&wi
, 0, sizeof (wi
));
2046 wi
.want_locations
= true;
2048 saved_location
= input_location
;
2049 walk_gimple_seq (body
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2050 input_location
= saved_location
;
2053 /* Re-gimplification and code generation routines. */
2055 /* Build a call to GOMP_barrier. */
2058 build_omp_barrier (void)
2060 return build_call_expr (built_in_decls
[BUILT_IN_GOMP_BARRIER
], 0);
2063 /* If a context was created for STMT when it was scanned, return it. */
2065 static omp_context
*
2066 maybe_lookup_ctx (gimple stmt
)
2069 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2070 return n
? (omp_context
*) n
->value
: NULL
;
2074 /* Find the mapping for DECL in CTX or the immediately enclosing
2075 context that has a mapping for DECL.
2077 If CTX is a nested parallel directive, we may have to use the decl
2078 mappings created in CTX's parent context. Suppose that we have the
2079 following parallel nesting (variable UIDs showed for clarity):
2082 #omp parallel shared(iD.1562) -> outer parallel
2083 iD.1562 = iD.1562 + 1;
2085 #omp parallel shared (iD.1562) -> inner parallel
2086 iD.1562 = iD.1562 - 1;
2088 Each parallel structure will create a distinct .omp_data_s structure
2089 for copying iD.1562 in/out of the directive:
2091 outer parallel .omp_data_s.1.i -> iD.1562
2092 inner parallel .omp_data_s.2.i -> iD.1562
2094 A shared variable mapping will produce a copy-out operation before
2095 the parallel directive and a copy-in operation after it. So, in
2096 this case we would have:
2099 .omp_data_o.1.i = iD.1562;
2100 #omp parallel shared(iD.1562) -> outer parallel
2101 .omp_data_i.1 = &.omp_data_o.1
2102 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2104 .omp_data_o.2.i = iD.1562; -> **
2105 #omp parallel shared(iD.1562) -> inner parallel
2106 .omp_data_i.2 = &.omp_data_o.2
2107 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2110 ** This is a problem. The symbol iD.1562 cannot be referenced
2111 inside the body of the outer parallel region. But since we are
2112 emitting this copy operation while expanding the inner parallel
2113 directive, we need to access the CTX structure of the outer
2114 parallel directive to get the correct mapping:
2116 .omp_data_o.2.i = .omp_data_i.1->i
2118 Since there may be other workshare or parallel directives enclosing
2119 the parallel directive, it may be necessary to walk up the context
2120 parent chain. This is not a problem in general because nested
2121 parallelism happens only rarely. */
2124 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2129 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2130 t
= maybe_lookup_decl (decl
, up
);
2132 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2134 return t
? t
: decl
;
2138 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2139 in outer contexts. */
2142 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2147 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2148 t
= maybe_lookup_decl (decl
, up
);
2150 return t
? t
: decl
;
2154 /* Construct the initialization value for reduction CLAUSE. */
2157 omp_reduction_init (tree clause
, tree type
)
2159 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2160 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2167 case TRUTH_ORIF_EXPR
:
2168 case TRUTH_XOR_EXPR
:
2170 return build_zero_cst (type
);
2173 case TRUTH_AND_EXPR
:
2174 case TRUTH_ANDIF_EXPR
:
2176 return fold_convert_loc (loc
, type
, integer_one_node
);
2179 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2182 if (SCALAR_FLOAT_TYPE_P (type
))
2184 REAL_VALUE_TYPE max
, min
;
2185 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2188 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2191 real_maxval (&min
, 1, TYPE_MODE (type
));
2192 return build_real (type
, min
);
2196 gcc_assert (INTEGRAL_TYPE_P (type
));
2197 return TYPE_MIN_VALUE (type
);
2201 if (SCALAR_FLOAT_TYPE_P (type
))
2203 REAL_VALUE_TYPE max
;
2204 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2207 real_maxval (&max
, 0, TYPE_MODE (type
));
2208 return build_real (type
, max
);
2212 gcc_assert (INTEGRAL_TYPE_P (type
));
2213 return TYPE_MAX_VALUE (type
);
2221 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2222 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2223 private variables. Initialization statements go in ILIST, while calls
2224 to destructors go in DLIST. */
2227 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2230 gimple_stmt_iterator diter
;
2231 tree c
, dtor
, copyin_seq
, x
, ptr
;
2232 bool copyin_by_ref
= false;
2233 bool lastprivate_firstprivate
= false;
2236 *dlist
= gimple_seq_alloc ();
2237 diter
= gsi_start (*dlist
);
2240 /* Do all the fixed sized types in the first pass, and the variable sized
2241 types in the second pass. This makes sure that the scalar arguments to
2242 the variable sized types are processed before we use them in the
2243 variable sized operations. */
2244 for (pass
= 0; pass
< 2; ++pass
)
2246 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2248 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2251 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2255 case OMP_CLAUSE_PRIVATE
:
2256 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2259 case OMP_CLAUSE_SHARED
:
2260 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2262 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2265 case OMP_CLAUSE_FIRSTPRIVATE
:
2266 case OMP_CLAUSE_COPYIN
:
2267 case OMP_CLAUSE_REDUCTION
:
2269 case OMP_CLAUSE_LASTPRIVATE
:
2270 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2272 lastprivate_firstprivate
= true;
2281 new_var
= var
= OMP_CLAUSE_DECL (c
);
2282 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2283 new_var
= lookup_decl (var
, ctx
);
2285 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2290 else if (is_variable_sized (var
))
2292 /* For variable sized types, we need to allocate the
2293 actual storage here. Call alloca and store the
2294 result in the pointer decl that we created elsewhere. */
2298 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2303 ptr
= DECL_VALUE_EXPR (new_var
);
2304 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2305 ptr
= TREE_OPERAND (ptr
, 0);
2306 gcc_assert (DECL_P (ptr
));
2307 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2309 /* void *tmp = __builtin_alloca */
2311 = gimple_build_call (built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
2312 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2313 gimple_add_tmp_var (tmp
);
2314 gimple_call_set_lhs (stmt
, tmp
);
2316 gimple_seq_add_stmt (ilist
, stmt
);
2318 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2319 gimplify_assign (ptr
, x
, ilist
);
2322 else if (is_reference (var
))
2324 /* For references that are being privatized for Fortran,
2325 allocate new backing storage for the new pointer
2326 variable. This allows us to avoid changing all the
2327 code that expects a pointer to something that expects
2328 a direct variable. Note that this doesn't apply to
2329 C++, since reference types are disallowed in data
2330 sharing clauses there, except for NRV optimized
2335 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2336 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2338 x
= build_receiver_ref (var
, false, ctx
);
2339 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2341 else if (TREE_CONSTANT (x
))
2343 const char *name
= NULL
;
2344 if (DECL_NAME (var
))
2345 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2347 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2349 gimple_add_tmp_var (x
);
2350 TREE_ADDRESSABLE (x
) = 1;
2351 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2355 x
= build_call_expr_loc (clause_loc
,
2356 built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
2359 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2360 gimplify_assign (new_var
, x
, ilist
);
2362 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2364 else if (c_kind
== OMP_CLAUSE_REDUCTION
2365 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2373 switch (OMP_CLAUSE_CODE (c
))
2375 case OMP_CLAUSE_SHARED
:
2376 /* Shared global vars are just accessed directly. */
2377 if (is_global_var (new_var
))
2379 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2380 needs to be delayed until after fixup_child_record_type so
2381 that we get the correct type during the dereference. */
2382 by_ref
= use_pointer_for_field (var
, ctx
);
2383 x
= build_receiver_ref (var
, by_ref
, ctx
);
2384 SET_DECL_VALUE_EXPR (new_var
, x
);
2385 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2387 /* ??? If VAR is not passed by reference, and the variable
2388 hasn't been initialized yet, then we'll get a warning for
2389 the store into the omp_data_s structure. Ideally, we'd be
2390 able to notice this and not store anything at all, but
2391 we're generating code too early. Suppress the warning. */
2393 TREE_NO_WARNING (var
) = 1;
2396 case OMP_CLAUSE_LASTPRIVATE
:
2397 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2401 case OMP_CLAUSE_PRIVATE
:
2402 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2403 x
= build_outer_var_ref (var
, ctx
);
2404 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2406 if (is_task_ctx (ctx
))
2407 x
= build_receiver_ref (var
, false, ctx
);
2409 x
= build_outer_var_ref (var
, ctx
);
2413 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2415 gimplify_and_add (x
, ilist
);
2419 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2422 gimple_seq tseq
= NULL
;
2425 gimplify_stmt (&dtor
, &tseq
);
2426 gsi_insert_seq_before (&diter
, tseq
, GSI_SAME_STMT
);
2430 case OMP_CLAUSE_FIRSTPRIVATE
:
2431 if (is_task_ctx (ctx
))
2433 if (is_reference (var
) || is_variable_sized (var
))
2435 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2437 || use_pointer_for_field (var
, NULL
))
2439 x
= build_receiver_ref (var
, false, ctx
);
2440 SET_DECL_VALUE_EXPR (new_var
, x
);
2441 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2445 x
= build_outer_var_ref (var
, ctx
);
2446 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2447 gimplify_and_add (x
, ilist
);
2451 case OMP_CLAUSE_COPYIN
:
2452 by_ref
= use_pointer_for_field (var
, NULL
);
2453 x
= build_receiver_ref (var
, by_ref
, ctx
);
2454 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2455 append_to_statement_list (x
, ©in_seq
);
2456 copyin_by_ref
|= by_ref
;
2459 case OMP_CLAUSE_REDUCTION
:
2460 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2462 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2463 x
= build_outer_var_ref (var
, ctx
);
2465 if (is_reference (var
))
2466 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2467 SET_DECL_VALUE_EXPR (placeholder
, x
);
2468 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2469 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2470 gimple_seq_add_seq (ilist
,
2471 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2472 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2473 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2477 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2478 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2479 gimplify_assign (new_var
, x
, ilist
);
2489 /* The copyin sequence is not to be executed by the main thread, since
2490 that would result in self-copies. Perhaps not visible to scalars,
2491 but it certainly is to C++ operator=. */
2494 x
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
2495 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2496 build_int_cst (TREE_TYPE (x
), 0));
2497 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2498 gimplify_and_add (x
, ilist
);
2501 /* If any copyin variable is passed by reference, we must ensure the
2502 master thread doesn't modify it before it is copied over in all
2503 threads. Similarly for variables in both firstprivate and
2504 lastprivate clauses we need to ensure the lastprivate copying
2505 happens after firstprivate copying in all threads. */
2506 if (copyin_by_ref
|| lastprivate_firstprivate
)
2507 gimplify_and_add (build_omp_barrier (), ilist
);
2511 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2512 both parallel and workshare constructs. PREDICATE may be NULL if it's
2516 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2519 tree x
, c
, label
= NULL
;
2520 bool par_clauses
= false;
2522 /* Early exit if there are no lastprivate clauses. */
2523 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
2524 if (clauses
== NULL
)
2526 /* If this was a workshare clause, see if it had been combined
2527 with its parallel. In that case, look for the clauses on the
2528 parallel statement itself. */
2529 if (is_parallel_ctx (ctx
))
2533 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2536 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2537 OMP_CLAUSE_LASTPRIVATE
);
2538 if (clauses
== NULL
)
2546 tree label_true
, arm1
, arm2
;
2548 label
= create_artificial_label (UNKNOWN_LOCATION
);
2549 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2550 arm1
= TREE_OPERAND (predicate
, 0);
2551 arm2
= TREE_OPERAND (predicate
, 1);
2552 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2553 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2554 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2556 gimple_seq_add_stmt (stmt_list
, stmt
);
2557 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2560 for (c
= clauses
; c
;)
2563 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2565 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2567 var
= OMP_CLAUSE_DECL (c
);
2568 new_var
= lookup_decl (var
, ctx
);
2570 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2572 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2573 gimple_seq_add_seq (stmt_list
,
2574 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2576 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2578 x
= build_outer_var_ref (var
, ctx
);
2579 if (is_reference (var
))
2580 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2581 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2582 gimplify_and_add (x
, stmt_list
);
2584 c
= OMP_CLAUSE_CHAIN (c
);
2585 if (c
== NULL
&& !par_clauses
)
2587 /* If this was a workshare clause, see if it had been combined
2588 with its parallel. In that case, continue looking for the
2589 clauses also on the parallel statement itself. */
2590 if (is_parallel_ctx (ctx
))
2594 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2597 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2598 OMP_CLAUSE_LASTPRIVATE
);
2604 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
2608 /* Generate code to implement the REDUCTION clauses. */
2611 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
2613 gimple_seq sub_seq
= NULL
;
2618 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2619 update in that case, otherwise use a lock. */
2620 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
2621 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
2623 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2625 /* Never use OMP_ATOMIC for array reductions. */
2635 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2637 tree var
, ref
, new_var
;
2638 enum tree_code code
;
2639 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2641 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
2644 var
= OMP_CLAUSE_DECL (c
);
2645 new_var
= lookup_decl (var
, ctx
);
2646 if (is_reference (var
))
2647 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2648 ref
= build_outer_var_ref (var
, ctx
);
2649 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2651 /* reduction(-:var) sums up the partial results, so it acts
2652 identically to reduction(+:var). */
2653 if (code
== MINUS_EXPR
)
2658 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
2660 addr
= save_expr (addr
);
2661 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
2662 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
2663 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
2664 gimplify_and_add (x
, stmt_seqp
);
2668 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2670 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2672 if (is_reference (var
))
2673 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
2674 SET_DECL_VALUE_EXPR (placeholder
, ref
);
2675 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2676 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2677 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
2678 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
2679 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
2683 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
2684 ref
= build_outer_var_ref (var
, ctx
);
2685 gimplify_assign (ref
, x
, &sub_seq
);
2689 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
], 0);
2690 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2692 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
2694 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
], 0);
2695 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2699 /* Generate code to implement the COPYPRIVATE clauses. */
2702 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
2707 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2709 tree var
, new_var
, ref
, x
;
2711 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2713 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2716 var
= OMP_CLAUSE_DECL (c
);
2717 by_ref
= use_pointer_for_field (var
, NULL
);
2719 ref
= build_sender_ref (var
, ctx
);
2720 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
2723 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
2724 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
2726 gimplify_assign (ref
, x
, slist
);
2728 ref
= build_receiver_ref (var
, false, ctx
);
2731 ref
= fold_convert_loc (clause_loc
,
2732 build_pointer_type (TREE_TYPE (new_var
)),
2734 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2736 if (is_reference (var
))
2738 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
2739 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
2740 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2742 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
2743 gimplify_and_add (x
, rlist
);
2748 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2749 and REDUCTION from the sender (aka parent) side. */
2752 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
2757 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2759 tree val
, ref
, x
, var
;
2760 bool by_ref
, do_in
= false, do_out
= false;
2761 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2763 switch (OMP_CLAUSE_CODE (c
))
2765 case OMP_CLAUSE_PRIVATE
:
2766 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2769 case OMP_CLAUSE_FIRSTPRIVATE
:
2770 case OMP_CLAUSE_COPYIN
:
2771 case OMP_CLAUSE_LASTPRIVATE
:
2772 case OMP_CLAUSE_REDUCTION
:
2778 val
= OMP_CLAUSE_DECL (c
);
2779 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2781 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2782 && is_global_var (var
))
2784 if (is_variable_sized (val
))
2786 by_ref
= use_pointer_for_field (val
, NULL
);
2788 switch (OMP_CLAUSE_CODE (c
))
2790 case OMP_CLAUSE_PRIVATE
:
2791 case OMP_CLAUSE_FIRSTPRIVATE
:
2792 case OMP_CLAUSE_COPYIN
:
2796 case OMP_CLAUSE_LASTPRIVATE
:
2797 if (by_ref
|| is_reference (val
))
2799 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2806 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
2811 case OMP_CLAUSE_REDUCTION
:
2813 do_out
= !(by_ref
|| is_reference (val
));
2822 ref
= build_sender_ref (val
, ctx
);
2823 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
2824 gimplify_assign (ref
, x
, ilist
);
2825 if (is_task_ctx (ctx
))
2826 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
2831 ref
= build_sender_ref (val
, ctx
);
2832 gimplify_assign (var
, ref
, olist
);
2837 /* Generate code to implement SHARED from the sender (aka parent)
2838 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2839 list things that got automatically shared. */
2842 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
2844 tree var
, ovar
, nvar
, f
, x
, record_type
;
2846 if (ctx
->record_type
== NULL
)
2849 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
2850 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
2852 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2853 nvar
= maybe_lookup_decl (ovar
, ctx
);
2854 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2857 /* If CTX is a nested parallel directive. Find the immediately
2858 enclosing parallel or workshare construct that contains a
2859 mapping for OVAR. */
2860 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2862 if (use_pointer_for_field (ovar
, ctx
))
2864 x
= build_sender_ref (ovar
, ctx
);
2865 var
= build_fold_addr_expr (var
);
2866 gimplify_assign (x
, var
, ilist
);
2870 x
= build_sender_ref (ovar
, ctx
);
2871 gimplify_assign (x
, var
, ilist
);
2873 if (!TREE_READONLY (var
)
2874 /* We don't need to receive a new reference to a result
2875 or parm decl. In fact we may not store to it as we will
2876 invalidate any pending RSO and generate wrong gimple
2878 && !((TREE_CODE (var
) == RESULT_DECL
2879 || TREE_CODE (var
) == PARM_DECL
)
2880 && DECL_BY_REFERENCE (var
)))
2882 x
= build_sender_ref (ovar
, ctx
);
2883 gimplify_assign (var
, x
, olist
);
2890 /* A convenience function to build an empty GIMPLE_COND with just the
2894 gimple_build_cond_empty (tree cond
)
2896 enum tree_code pred_code
;
2899 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
2900 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
2904 /* Build the function calls to GOMP_parallel_start etc to actually
2905 generate the parallel operation. REGION is the parallel region
2906 being expanded. BB is the block where to insert the code. WS_ARGS
2907 will be set if this is a call to a combined parallel+workshare
2908 construct, it contains the list of additional arguments needed by
2909 the workshare construct. */
2912 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2913 gimple entry_stmt
, VEC(tree
,gc
) *ws_args
)
2915 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2916 gimple_stmt_iterator gsi
;
2919 location_t clause_loc
;
2922 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
2924 /* Determine what flavor of GOMP_parallel_start we will be
2926 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2927 if (is_combined_parallel (region
))
2929 switch (region
->inner
->type
)
2931 case GIMPLE_OMP_FOR
:
2932 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
2933 start_ix
= BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2934 + (region
->inner
->sched_kind
2935 == OMP_CLAUSE_SCHEDULE_RUNTIME
2936 ? 3 : region
->inner
->sched_kind
);
2938 case GIMPLE_OMP_SECTIONS
:
2939 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2946 /* By default, the value of NUM_THREADS is zero (selected at run time)
2947 and there is no conditional. */
2949 val
= build_int_cst (unsigned_type_node
, 0);
2951 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2953 cond
= OMP_CLAUSE_IF_EXPR (c
);
2955 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2958 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2959 clause_loc
= OMP_CLAUSE_LOCATION (c
);
2962 clause_loc
= gimple_location (entry_stmt
);
2964 /* Ensure 'val' is of the correct type. */
2965 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
2967 /* If we found the clause 'if (cond)', build either
2968 (cond != 0) or (cond ? val : 1u). */
2971 gimple_stmt_iterator gsi
;
2973 cond
= gimple_boolify (cond
);
2975 if (integer_zerop (val
))
2976 val
= fold_build2_loc (clause_loc
,
2977 EQ_EXPR
, unsigned_type_node
, cond
,
2978 build_int_cst (TREE_TYPE (cond
), 0));
2981 basic_block cond_bb
, then_bb
, else_bb
;
2982 edge e
, e_then
, e_else
;
2983 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
2985 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
2986 if (gimple_in_ssa_p (cfun
))
2988 tmp_then
= make_ssa_name (tmp_var
, NULL
);
2989 tmp_else
= make_ssa_name (tmp_var
, NULL
);
2990 tmp_join
= make_ssa_name (tmp_var
, NULL
);
2999 e
= split_block (bb
, NULL
);
3004 then_bb
= create_empty_bb (cond_bb
);
3005 else_bb
= create_empty_bb (then_bb
);
3006 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
3007 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
3009 stmt
= gimple_build_cond_empty (cond
);
3010 gsi
= gsi_start_bb (cond_bb
);
3011 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3013 gsi
= gsi_start_bb (then_bb
);
3014 stmt
= gimple_build_assign (tmp_then
, val
);
3015 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3017 gsi
= gsi_start_bb (else_bb
);
3018 stmt
= gimple_build_assign
3019 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
3020 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3022 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3023 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3024 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3025 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3027 if (gimple_in_ssa_p (cfun
))
3029 gimple phi
= create_phi_node (tmp_join
, bb
);
3030 SSA_NAME_DEF_STMT (tmp_join
) = phi
;
3031 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3032 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3038 gsi
= gsi_start_bb (bb
);
3039 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3040 false, GSI_CONTINUE_LINKING
);
3043 gsi
= gsi_last_bb (bb
);
3044 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3046 t1
= null_pointer_node
;
3048 t1
= build_fold_addr_expr (t
);
3049 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3051 args
= VEC_alloc (tree
, gc
, 3 + VEC_length (tree
, ws_args
));
3052 VEC_quick_push (tree
, args
, t2
);
3053 VEC_quick_push (tree
, args
, t1
);
3054 VEC_quick_push (tree
, args
, val
);
3055 VEC_splice (tree
, args
, ws_args
);
3057 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
3058 built_in_decls
[start_ix
], args
);
3060 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3061 false, GSI_CONTINUE_LINKING
);
3063 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3065 t
= null_pointer_node
;
3067 t
= build_fold_addr_expr (t
);
3068 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3069 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3070 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3071 false, GSI_CONTINUE_LINKING
);
3073 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3074 built_in_decls
[BUILT_IN_GOMP_PARALLEL_END
], 0);
3075 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3076 false, GSI_CONTINUE_LINKING
);
3080 /* Build the function call to GOMP_task to actually
3081 generate the task operation. BB is the block where to insert the code. */
3084 expand_task_call (basic_block bb
, gimple entry_stmt
)
3086 tree t
, t1
, t2
, t3
, flags
, cond
, c
, clauses
;
3087 gimple_stmt_iterator gsi
;
3088 location_t loc
= gimple_location (entry_stmt
);
3090 clauses
= gimple_omp_task_clauses (entry_stmt
);
3092 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3094 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3096 cond
= boolean_true_node
;
3098 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3099 flags
= build_int_cst (unsigned_type_node
, (c
? 1 : 0));
3101 gsi
= gsi_last_bb (bb
);
3102 t
= gimple_omp_task_data_arg (entry_stmt
);
3104 t2
= null_pointer_node
;
3106 t2
= build_fold_addr_expr_loc (loc
, t
);
3107 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3108 t
= gimple_omp_task_copy_fn (entry_stmt
);
3110 t3
= null_pointer_node
;
3112 t3
= build_fold_addr_expr_loc (loc
, t
);
3114 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_TASK
], 7, t1
, t2
, t3
,
3115 gimple_omp_task_arg_size (entry_stmt
),
3116 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3118 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3119 false, GSI_CONTINUE_LINKING
);
3123 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3124 catch handler and return it. This prevents programs from violating the
3125 structured block semantics with throws. */
3128 maybe_catch_exception (gimple_seq body
)
3133 if (!flag_exceptions
)
3136 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
3137 decl
= lang_hooks
.eh_protect_cleanup_actions ();
3139 decl
= built_in_decls
[BUILT_IN_TRAP
];
3141 g
= gimple_build_eh_must_not_throw (decl
);
3142 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3145 return gimple_seq_alloc_with_stmt (g
);
3148 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3151 vec2chain (VEC(tree
,gc
) *v
)
3153 tree chain
= NULL_TREE
, t
;
3156 FOR_EACH_VEC_ELT_REVERSE (tree
, v
, ix
, t
)
3158 DECL_CHAIN (t
) = chain
;
3166 /* Remove barriers in REGION->EXIT's block. Note that this is only
3167 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3168 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3169 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3173 remove_exit_barrier (struct omp_region
*region
)
3175 gimple_stmt_iterator gsi
;
3176 basic_block exit_bb
;
3180 int any_addressable_vars
= -1;
3182 exit_bb
= region
->exit
;
3184 /* If the parallel region doesn't return, we don't have REGION->EXIT
3189 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3190 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3191 statements that can appear in between are extremely limited -- no
3192 memory operations at all. Here, we allow nothing at all, so the
3193 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3194 gsi
= gsi_last_bb (exit_bb
);
3195 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3197 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3200 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3202 gsi
= gsi_last_bb (e
->src
);
3203 if (gsi_end_p (gsi
))
3205 stmt
= gsi_stmt (gsi
);
3206 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3207 && !gimple_omp_return_nowait_p (stmt
))
3209 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3210 in many cases. If there could be tasks queued, the barrier
3211 might be needed to let the tasks run before some local
3212 variable of the parallel that the task uses as shared
3213 runs out of scope. The task can be spawned either
3214 from within current function (this would be easy to check)
3215 or from some function it calls and gets passed an address
3216 of such a variable. */
3217 if (any_addressable_vars
< 0)
3219 gimple parallel_stmt
= last_stmt (region
->entry
);
3220 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3221 tree local_decls
, block
, decl
;
3224 any_addressable_vars
= 0;
3225 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
3226 if (TREE_ADDRESSABLE (decl
))
3228 any_addressable_vars
= 1;
3231 for (block
= gimple_block (stmt
);
3232 !any_addressable_vars
3234 && TREE_CODE (block
) == BLOCK
;
3235 block
= BLOCK_SUPERCONTEXT (block
))
3237 for (local_decls
= BLOCK_VARS (block
);
3239 local_decls
= DECL_CHAIN (local_decls
))
3240 if (TREE_ADDRESSABLE (local_decls
))
3242 any_addressable_vars
= 1;
3245 if (block
== gimple_block (parallel_stmt
))
3249 if (!any_addressable_vars
)
3250 gimple_omp_return_set_nowait (stmt
);
3256 remove_exit_barriers (struct omp_region
*region
)
3258 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3259 remove_exit_barrier (region
);
3263 region
= region
->inner
;
3264 remove_exit_barriers (region
);
3265 while (region
->next
)
3267 region
= region
->next
;
3268 remove_exit_barriers (region
);
3273 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3274 calls. These can't be declared as const functions, but
3275 within one parallel body they are constant, so they can be
3276 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3277 which are declared const. Similarly for task body, except
3278 that in untied task omp_get_thread_num () can change at any task
3279 scheduling point. */
3282 optimize_omp_library_calls (gimple entry_stmt
)
3285 gimple_stmt_iterator gsi
;
3287 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
]);
3289 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
]);
3290 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3291 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3292 OMP_CLAUSE_UNTIED
) != NULL
);
3295 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3297 gimple call
= gsi_stmt (gsi
);
3300 if (is_gimple_call (call
)
3301 && (decl
= gimple_call_fndecl (call
))
3302 && DECL_EXTERNAL (decl
)
3303 && TREE_PUBLIC (decl
)
3304 && DECL_INITIAL (decl
) == NULL
)
3308 if (DECL_NAME (decl
) == thr_num_id
)
3310 /* In #pragma omp task untied omp_get_thread_num () can change
3311 during the execution of the task region. */
3314 built_in
= built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
];
3316 else if (DECL_NAME (decl
) == num_thr_id
)
3317 built_in
= built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
];
3321 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3322 || gimple_call_num_args (call
) != 0)
3325 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3328 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3329 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3330 TREE_TYPE (TREE_TYPE (built_in
))))
3333 gimple_call_set_fndecl (call
, built_in
);
3338 /* Expand the OpenMP parallel or task directive starting at REGION. */
3341 expand_omp_taskreg (struct omp_region
*region
)
3343 basic_block entry_bb
, exit_bb
, new_bb
;
3344 struct function
*child_cfun
;
3345 tree child_fn
, block
, t
;
3347 gimple_stmt_iterator gsi
;
3348 gimple entry_stmt
, stmt
;
3350 VEC(tree
,gc
) *ws_args
;
3352 entry_stmt
= last_stmt (region
->entry
);
3353 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3354 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3355 /* If this function has been already instrumented, make sure
3356 the child function isn't instrumented again. */
3357 child_cfun
->after_tree_profile
= cfun
->after_tree_profile
;
3359 entry_bb
= region
->entry
;
3360 exit_bb
= region
->exit
;
3362 if (is_combined_parallel (region
))
3363 ws_args
= region
->ws_args
;
3367 if (child_cfun
->cfg
)
3369 /* Due to inlining, it may happen that we have already outlined
3370 the region, in which case all we need to do is make the
3371 sub-graph unreachable and emit the parallel call. */
3372 edge entry_succ_e
, exit_succ_e
;
3373 gimple_stmt_iterator gsi
;
3375 entry_succ_e
= single_succ_edge (entry_bb
);
3377 gsi
= gsi_last_bb (entry_bb
);
3378 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3379 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3380 gsi_remove (&gsi
, true);
3385 exit_succ_e
= single_succ_edge (exit_bb
);
3386 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3388 remove_edge_and_dominated_blocks (entry_succ_e
);
3392 unsigned srcidx
, dstidx
, num
;
3394 /* If the parallel region needs data sent from the parent
3395 function, then the very first statement (except possible
3396 tree profile counter updates) of the parallel body
3397 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3398 &.OMP_DATA_O is passed as an argument to the child function,
3399 we need to replace it with the argument as seen by the child
3402 In most cases, this will end up being the identity assignment
3403 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3404 a function call that has been inlined, the original PARM_DECL
3405 .OMP_DATA_I may have been converted into a different local
3406 variable. In which case, we need to keep the assignment. */
3407 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3409 basic_block entry_succ_bb
= single_succ (entry_bb
);
3410 gimple_stmt_iterator gsi
;
3412 gimple parcopy_stmt
= NULL
;
3414 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3418 gcc_assert (!gsi_end_p (gsi
));
3419 stmt
= gsi_stmt (gsi
);
3420 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3423 if (gimple_num_ops (stmt
) == 2)
3425 tree arg
= gimple_assign_rhs1 (stmt
);
3427 /* We're ignore the subcode because we're
3428 effectively doing a STRIP_NOPS. */
3430 if (TREE_CODE (arg
) == ADDR_EXPR
3431 && TREE_OPERAND (arg
, 0)
3432 == gimple_omp_taskreg_data_arg (entry_stmt
))
3434 parcopy_stmt
= stmt
;
3440 gcc_assert (parcopy_stmt
!= NULL
);
3441 arg
= DECL_ARGUMENTS (child_fn
);
3443 if (!gimple_in_ssa_p (cfun
))
3445 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3446 gsi_remove (&gsi
, true);
3449 /* ?? Is setting the subcode really necessary ?? */
3450 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3451 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3456 /* If we are in ssa form, we must load the value from the default
3457 definition of the argument. That should not be defined now,
3458 since the argument is not used uninitialized. */
3459 gcc_assert (gimple_default_def (cfun
, arg
) == NULL
);
3460 narg
= make_ssa_name (arg
, gimple_build_nop ());
3461 set_default_def (arg
, narg
);
3462 /* ?? Is setting the subcode really necessary ?? */
3463 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3464 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3465 update_stmt (parcopy_stmt
);
3469 /* Declare local variables needed in CHILD_CFUN. */
3470 block
= DECL_INITIAL (child_fn
);
3471 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
3472 /* The gimplifier could record temporaries in parallel/task block
3473 rather than in containing function's local_decls chain,
3474 which would mean cgraph missed finalizing them. Do it now. */
3475 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
3476 if (TREE_CODE (t
) == VAR_DECL
3478 && !DECL_EXTERNAL (t
))
3479 varpool_finalize_decl (t
);
3480 DECL_SAVED_TREE (child_fn
) = NULL
;
3481 gimple_set_body (child_fn
, bb_seq (single_succ (entry_bb
)));
3482 TREE_USED (block
) = 1;
3484 /* Reset DECL_CONTEXT on function arguments. */
3485 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
3486 DECL_CONTEXT (t
) = child_fn
;
3488 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3489 so that it can be moved to the child function. */
3490 gsi
= gsi_last_bb (entry_bb
);
3491 stmt
= gsi_stmt (gsi
);
3492 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3493 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3494 gsi_remove (&gsi
, true);
3495 e
= split_block (entry_bb
, stmt
);
3497 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3499 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3502 gsi
= gsi_last_bb (exit_bb
);
3503 gcc_assert (!gsi_end_p (gsi
)
3504 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3505 stmt
= gimple_build_return (NULL
);
3506 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3507 gsi_remove (&gsi
, true);
3510 /* Move the parallel region into CHILD_CFUN. */
3512 if (gimple_in_ssa_p (cfun
))
3514 push_cfun (child_cfun
);
3515 init_tree_ssa (child_cfun
);
3516 init_ssa_operands ();
3517 cfun
->gimple_df
->in_ssa_p
= true;
3522 block
= gimple_block (entry_stmt
);
3524 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3526 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3528 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3529 num
= VEC_length (tree
, child_cfun
->local_decls
);
3530 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
3532 t
= VEC_index (tree
, child_cfun
->local_decls
, srcidx
);
3533 if (DECL_CONTEXT (t
) == cfun
->decl
)
3535 if (srcidx
!= dstidx
)
3536 VEC_replace (tree
, child_cfun
->local_decls
, dstidx
, t
);
3540 VEC_truncate (tree
, child_cfun
->local_decls
, dstidx
);
3542 /* Inform the callgraph about the new function. */
3543 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
3544 = cfun
->curr_properties
;
3545 cgraph_add_new_function (child_fn
, true);
3547 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3548 fixed in a following pass. */
3549 push_cfun (child_cfun
);
3550 save_current
= current_function_decl
;
3551 current_function_decl
= child_fn
;
3553 optimize_omp_library_calls (entry_stmt
);
3554 rebuild_cgraph_edges ();
3556 /* Some EH regions might become dead, see PR34608. If
3557 pass_cleanup_cfg isn't the first pass to happen with the
3558 new child, these dead EH edges might cause problems.
3559 Clean them up now. */
3560 if (flag_exceptions
)
3563 bool changed
= false;
3566 changed
|= gimple_purge_dead_eh_edges (bb
);
3568 cleanup_tree_cfg ();
3570 if (gimple_in_ssa_p (cfun
))
3571 update_ssa (TODO_update_ssa
);
3572 current_function_decl
= save_current
;
3576 /* Emit a library call to launch the children threads. */
3577 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
3578 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
3580 expand_task_call (new_bb
, entry_stmt
);
3581 update_ssa (TODO_update_ssa_only_virtuals
);
3585 /* A subroutine of expand_omp_for. Generate code for a parallel
3586 loop with any schedule. Given parameters:
3588 for (V = N1; V cond N2; V += STEP) BODY;
3590 where COND is "<" or ">", we generate pseudocode
3592 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3593 if (more) goto L0; else goto L3;
3600 if (V cond iend) goto L1; else goto L2;
3602 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3605 If this is a combined omp parallel loop, instead of the call to
3606 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3608 For collapsed loops, given parameters:
3610 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3611 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3612 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3615 we generate pseudocode
3621 count3 = (adj + N32 - N31) / STEP3;
3626 count2 = (adj + N22 - N21) / STEP2;
3631 count1 = (adj + N12 - N11) / STEP1;
3632 count = count1 * count2 * count3;
3633 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3634 if (more) goto L0; else goto L3;
3638 V3 = N31 + (T % count3) * STEP3;
3640 V2 = N21 + (T % count2) * STEP2;
3642 V1 = N11 + T * STEP1;
3647 if (V < iend) goto L10; else goto L2;
3650 if (V3 cond3 N32) goto L1; else goto L11;
3654 if (V2 cond2 N22) goto L1; else goto L12;
3660 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3666 expand_omp_for_generic (struct omp_region
*region
,
3667 struct omp_for_data
*fd
,
3668 enum built_in_function start_fn
,
3669 enum built_in_function next_fn
)
3671 tree type
, istart0
, iend0
, iend
;
3672 tree t
, vmain
, vback
, bias
= NULL_TREE
;
3673 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
3674 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
3675 gimple_stmt_iterator gsi
;
3677 bool in_combined_parallel
= is_combined_parallel (region
);
3678 bool broken_loop
= region
->cont
== NULL
;
3680 tree
*counts
= NULL
;
3683 gcc_assert (!broken_loop
|| !in_combined_parallel
);
3684 gcc_assert (fd
->iter_type
== long_integer_type_node
3685 || !in_combined_parallel
);
3687 type
= TREE_TYPE (fd
->loop
.v
);
3688 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
3689 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
3690 TREE_ADDRESSABLE (istart0
) = 1;
3691 TREE_ADDRESSABLE (iend0
) = 1;
3692 if (gimple_in_ssa_p (cfun
))
3694 add_referenced_var (istart0
);
3695 add_referenced_var (iend0
);
3698 /* See if we need to bias by LLONG_MIN. */
3699 if (fd
->iter_type
== long_long_unsigned_type_node
3700 && TREE_CODE (type
) == INTEGER_TYPE
3701 && !TYPE_UNSIGNED (type
))
3705 if (fd
->loop
.cond_code
== LT_EXPR
)
3708 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3712 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3715 if (TREE_CODE (n1
) != INTEGER_CST
3716 || TREE_CODE (n2
) != INTEGER_CST
3717 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
3718 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
3721 entry_bb
= region
->entry
;
3722 cont_bb
= region
->cont
;
3724 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3725 gcc_assert (broken_loop
3726 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
3727 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3728 l1_bb
= single_succ (l0_bb
);
3731 l2_bb
= create_empty_bb (cont_bb
);
3732 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
3733 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3737 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3738 exit_bb
= region
->exit
;
3740 gsi
= gsi_last_bb (entry_bb
);
3742 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3743 if (fd
->collapse
> 1)
3745 /* collapsed loops need work for expansion in SSA form. */
3746 gcc_assert (!gimple_in_ssa_p (cfun
));
3747 counts
= (tree
*) alloca (fd
->collapse
* sizeof (tree
));
3748 for (i
= 0; i
< fd
->collapse
; i
++)
3750 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
3752 if (POINTER_TYPE_P (itype
))
3753 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
3754 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
3756 t
= fold_build2 (PLUS_EXPR
, itype
,
3757 fold_convert (itype
, fd
->loops
[i
].step
), t
);
3758 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
3759 fold_convert (itype
, fd
->loops
[i
].n2
));
3760 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
3761 fold_convert (itype
, fd
->loops
[i
].n1
));
3762 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
3763 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3764 fold_build1 (NEGATE_EXPR
, itype
, t
),
3765 fold_build1 (NEGATE_EXPR
, itype
,
3766 fold_convert (itype
,
3767 fd
->loops
[i
].step
)));
3769 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
3770 fold_convert (itype
, fd
->loops
[i
].step
));
3771 t
= fold_convert (type
, t
);
3772 if (TREE_CODE (t
) == INTEGER_CST
)
3776 counts
[i
] = create_tmp_var (type
, ".count");
3777 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3778 true, GSI_SAME_STMT
);
3779 stmt
= gimple_build_assign (counts
[i
], t
);
3780 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3782 if (SSA_VAR_P (fd
->loop
.n2
))
3788 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
3789 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3790 true, GSI_SAME_STMT
);
3792 stmt
= gimple_build_assign (fd
->loop
.n2
, t
);
3793 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3797 if (in_combined_parallel
)
3799 /* In a combined parallel loop, emit a call to
3800 GOMP_loop_foo_next. */
3801 t
= build_call_expr (built_in_decls
[next_fn
], 2,
3802 build_fold_addr_expr (istart0
),
3803 build_fold_addr_expr (iend0
));
3807 tree t0
, t1
, t2
, t3
, t4
;
3808 /* If this is not a combined parallel loop, emit a call to
3809 GOMP_loop_foo_start in ENTRY_BB. */
3810 t4
= build_fold_addr_expr (iend0
);
3811 t3
= build_fold_addr_expr (istart0
);
3812 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
3813 if (POINTER_TYPE_P (type
)
3814 && TYPE_PRECISION (type
) != TYPE_PRECISION (fd
->iter_type
))
3816 /* Avoid casting pointers to integer of a different size. */
3818 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
3819 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n2
));
3820 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n1
));
3824 t1
= fold_convert (fd
->iter_type
, fd
->loop
.n2
);
3825 t0
= fold_convert (fd
->iter_type
, fd
->loop
.n1
);
3829 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
3830 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
3832 if (fd
->iter_type
== long_integer_type_node
)
3836 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3837 t
= build_call_expr (built_in_decls
[start_fn
], 6,
3838 t0
, t1
, t2
, t
, t3
, t4
);
3841 t
= build_call_expr (built_in_decls
[start_fn
], 5,
3842 t0
, t1
, t2
, t3
, t4
);
3849 /* The GOMP_loop_ull_*start functions have additional boolean
3850 argument, true for < loops and false for > loops.
3851 In Fortran, the C bool type can be different from
3852 boolean_type_node. */
3853 c_bool_type
= TREE_TYPE (TREE_TYPE (built_in_decls
[start_fn
]));
3854 t5
= build_int_cst (c_bool_type
,
3855 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
3858 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3859 t
= build_call_expr (built_in_decls
[start_fn
], 7,
3860 t5
, t0
, t1
, t2
, t
, t3
, t4
);
3863 t
= build_call_expr (built_in_decls
[start_fn
], 6,
3864 t5
, t0
, t1
, t2
, t3
, t4
);
3867 if (TREE_TYPE (t
) != boolean_type_node
)
3868 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3869 t
, build_int_cst (TREE_TYPE (t
), 0));
3870 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3871 true, GSI_SAME_STMT
);
3872 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3874 /* Remove the GIMPLE_OMP_FOR statement. */
3875 gsi_remove (&gsi
, true);
3877 /* Iteration setup for sequential loop goes in L0_BB. */
3878 gsi
= gsi_start_bb (l0_bb
);
3881 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3882 if (POINTER_TYPE_P (type
))
3883 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3885 t
= fold_convert (type
, t
);
3886 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3887 false, GSI_CONTINUE_LINKING
);
3888 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
3889 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3893 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3894 if (POINTER_TYPE_P (type
))
3895 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3897 t
= fold_convert (type
, t
);
3898 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3899 false, GSI_CONTINUE_LINKING
);
3900 if (fd
->collapse
> 1)
3902 tree tem
= create_tmp_var (type
, ".tem");
3904 stmt
= gimple_build_assign (tem
, fd
->loop
.v
);
3905 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3906 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3908 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
;
3910 if (POINTER_TYPE_P (vtype
))
3911 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (vtype
), 0);
3912 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
3913 t
= fold_convert (itype
, t
);
3914 t
= fold_build2 (MULT_EXPR
, itype
, t
,
3915 fold_convert (itype
, fd
->loops
[i
].step
));
3916 if (POINTER_TYPE_P (vtype
))
3917 t
= fold_build2 (POINTER_PLUS_EXPR
, vtype
,
3918 fd
->loops
[i
].n1
, fold_convert (sizetype
, t
));
3920 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
3921 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3922 false, GSI_CONTINUE_LINKING
);
3923 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3924 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3927 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
3928 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3929 false, GSI_CONTINUE_LINKING
);
3930 stmt
= gimple_build_assign (tem
, t
);
3931 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3938 /* Code to control the increment and predicate for the sequential
3939 loop goes in the CONT_BB. */
3940 gsi
= gsi_last_bb (cont_bb
);
3941 stmt
= gsi_stmt (gsi
);
3942 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
3943 vmain
= gimple_omp_continue_control_use (stmt
);
3944 vback
= gimple_omp_continue_control_def (stmt
);
3946 if (POINTER_TYPE_P (type
))
3947 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, vmain
,
3948 fold_convert (sizetype
, fd
->loop
.step
));
3950 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3951 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3952 true, GSI_SAME_STMT
);
3953 stmt
= gimple_build_assign (vback
, t
);
3954 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3956 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, iend
);
3957 stmt
= gimple_build_cond_empty (t
);
3958 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3960 /* Remove GIMPLE_OMP_CONTINUE. */
3961 gsi_remove (&gsi
, true);
3963 if (fd
->collapse
> 1)
3965 basic_block last_bb
, bb
;
3968 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3970 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
3972 bb
= create_empty_bb (last_bb
);
3973 gsi
= gsi_start_bb (bb
);
3975 if (i
< fd
->collapse
- 1)
3977 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
3978 e
->probability
= REG_BR_PROB_BASE
/ 8;
3980 t
= fd
->loops
[i
+ 1].n1
;
3981 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3982 false, GSI_CONTINUE_LINKING
);
3983 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
3984 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3989 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
3991 if (POINTER_TYPE_P (vtype
))
3992 t
= fold_build2 (POINTER_PLUS_EXPR
, vtype
,
3994 fold_convert (sizetype
, fd
->loops
[i
].step
));
3996 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
,
3998 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3999 false, GSI_CONTINUE_LINKING
);
4000 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4001 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4005 t
= fd
->loops
[i
].n2
;
4006 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4007 false, GSI_CONTINUE_LINKING
);
4008 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4010 stmt
= gimple_build_cond_empty (t
);
4011 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4012 e
= make_edge (bb
, l1_bb
, EDGE_TRUE_VALUE
);
4013 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4016 make_edge (bb
, l1_bb
, EDGE_FALLTHRU
);
4021 /* Emit code to get the next parallel iteration in L2_BB. */
4022 gsi
= gsi_start_bb (l2_bb
);
4024 t
= build_call_expr (built_in_decls
[next_fn
], 2,
4025 build_fold_addr_expr (istart0
),
4026 build_fold_addr_expr (iend0
));
4027 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4028 false, GSI_CONTINUE_LINKING
);
4029 if (TREE_TYPE (t
) != boolean_type_node
)
4030 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4031 t
, build_int_cst (TREE_TYPE (t
), 0));
4032 stmt
= gimple_build_cond_empty (t
);
4033 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4036 /* Add the loop cleanup function. */
4037 gsi
= gsi_last_bb (exit_bb
);
4038 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4039 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END_NOWAIT
];
4041 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END
];
4042 stmt
= gimple_build_call (t
, 0);
4043 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4044 gsi_remove (&gsi
, true);
4046 /* Connect the new blocks. */
4047 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4048 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4054 e
= find_edge (cont_bb
, l3_bb
);
4055 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4057 phis
= phi_nodes (l3_bb
);
4058 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4060 gimple phi
= gsi_stmt (gsi
);
4061 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4062 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4066 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4067 if (fd
->collapse
> 1)
4069 e
= find_edge (cont_bb
, l1_bb
);
4071 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4075 e
= find_edge (cont_bb
, l1_bb
);
4076 e
->flags
= EDGE_TRUE_VALUE
;
4078 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4079 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4080 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4082 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4083 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4084 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4085 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4086 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4087 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4088 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4089 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4094 /* A subroutine of expand_omp_for. Generate code for a parallel
4095 loop with static schedule and no specified chunk size. Given
4098 for (V = N1; V cond N2; V += STEP) BODY;
4100 where COND is "<" or ">", we generate pseudocode
4106 if ((__typeof (V)) -1 > 0 && cond is >)
4107 n = -(adj + N2 - N1) / -STEP;
4109 n = (adj + N2 - N1) / STEP;
4111 q += (q * nthreads != n);
4113 e0 = min(s0 + q, n);
4115 if (s0 >= e0) goto L2; else goto L0;
4121 if (V cond e) goto L1;
4126 expand_omp_for_static_nochunk (struct omp_region
*region
,
4127 struct omp_for_data
*fd
)
4129 tree n
, q
, s0
, e0
, e
, t
, nthreads
, threadid
;
4130 tree type
, itype
, vmain
, vback
;
4131 basic_block entry_bb
, exit_bb
, seq_start_bb
, body_bb
, cont_bb
;
4133 gimple_stmt_iterator gsi
;
4136 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4137 if (POINTER_TYPE_P (type
))
4138 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4140 entry_bb
= region
->entry
;
4141 cont_bb
= region
->cont
;
4142 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4143 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4144 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4145 body_bb
= single_succ (seq_start_bb
);
4146 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4147 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4148 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4149 exit_bb
= region
->exit
;
4151 /* Iteration space partitioning goes in ENTRY_BB. */
4152 gsi
= gsi_last_bb (entry_bb
);
4153 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4155 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
4156 t
= fold_convert (itype
, t
);
4157 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4158 true, GSI_SAME_STMT
);
4160 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4161 t
= fold_convert (itype
, t
);
4162 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4163 true, GSI_SAME_STMT
);
4166 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4167 true, NULL_TREE
, true, GSI_SAME_STMT
);
4169 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4170 true, NULL_TREE
, true, GSI_SAME_STMT
);
4172 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4173 true, NULL_TREE
, true, GSI_SAME_STMT
);
4175 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4176 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4177 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4178 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4179 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4180 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4181 fold_build1 (NEGATE_EXPR
, itype
, t
),
4182 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4184 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4185 t
= fold_convert (itype
, t
);
4186 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4188 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4189 q
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4191 t
= fold_build2 (MULT_EXPR
, itype
, q
, nthreads
);
4192 t
= fold_build2 (NE_EXPR
, itype
, t
, n
);
4193 t
= fold_build2 (PLUS_EXPR
, itype
, q
, t
);
4194 q
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4196 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4197 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4199 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4200 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4201 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4203 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4204 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4206 /* Remove the GIMPLE_OMP_FOR statement. */
4207 gsi_remove (&gsi
, true);
4209 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4210 gsi
= gsi_start_bb (seq_start_bb
);
4212 t
= fold_convert (itype
, s0
);
4213 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4214 if (POINTER_TYPE_P (type
))
4215 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4216 fold_convert (sizetype
, t
));
4218 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4219 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4220 false, GSI_CONTINUE_LINKING
);
4221 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4222 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4224 t
= fold_convert (itype
, e0
);
4225 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4226 if (POINTER_TYPE_P (type
))
4227 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4228 fold_convert (sizetype
, t
));
4230 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4231 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4232 false, GSI_CONTINUE_LINKING
);
4234 /* The code controlling the sequential loop replaces the
4235 GIMPLE_OMP_CONTINUE. */
4236 gsi
= gsi_last_bb (cont_bb
);
4237 stmt
= gsi_stmt (gsi
);
4238 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4239 vmain
= gimple_omp_continue_control_use (stmt
);
4240 vback
= gimple_omp_continue_control_def (stmt
);
4242 if (POINTER_TYPE_P (type
))
4243 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, vmain
,
4244 fold_convert (sizetype
, fd
->loop
.step
));
4246 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4247 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4248 true, GSI_SAME_STMT
);
4249 stmt
= gimple_build_assign (vback
, t
);
4250 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4252 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, e
);
4253 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4255 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4256 gsi_remove (&gsi
, true);
4258 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4259 gsi
= gsi_last_bb (exit_bb
);
4260 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4261 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
4262 false, GSI_SAME_STMT
);
4263 gsi_remove (&gsi
, true);
4265 /* Connect all the blocks. */
4266 find_edge (entry_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
4267 find_edge (entry_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
4269 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4270 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4272 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, entry_bb
);
4273 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4274 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4275 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4276 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4280 /* A subroutine of expand_omp_for. Generate code for a parallel
4281 loop with static schedule and a specified chunk size. Given
4284 for (V = N1; V cond N2; V += STEP) BODY;
4286 where COND is "<" or ">", we generate pseudocode
4292 if ((__typeof (V)) -1 > 0 && cond is >)
4293 n = -(adj + N2 - N1) / -STEP;
4295 n = (adj + N2 - N1) / STEP;
4297 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4298 here so that V is defined
4299 if the loop is not entered
4301 s0 = (trip * nthreads + threadid) * CHUNK;
4302 e0 = min(s0 + CHUNK, n);
4303 if (s0 < n) goto L1; else goto L4;
4310 if (V cond e) goto L2; else goto L3;
4318 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
4320 tree n
, s0
, e0
, e
, t
;
4321 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
4322 tree type
, itype
, v_main
, v_back
, v_extra
;
4323 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
4324 basic_block trip_update_bb
, cont_bb
, fin_bb
;
4325 gimple_stmt_iterator si
;
4329 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4330 if (POINTER_TYPE_P (type
))
4331 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4333 entry_bb
= region
->entry
;
4334 se
= split_block (entry_bb
, last_stmt (entry_bb
));
4336 iter_part_bb
= se
->dest
;
4337 cont_bb
= region
->cont
;
4338 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
4339 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
4340 == FALLTHRU_EDGE (cont_bb
)->dest
);
4341 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
4342 body_bb
= single_succ (seq_start_bb
);
4343 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4344 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4345 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4346 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
4347 exit_bb
= region
->exit
;
4349 /* Trip and adjustment setup goes in ENTRY_BB. */
4350 si
= gsi_last_bb (entry_bb
);
4351 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
4353 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
4354 t
= fold_convert (itype
, t
);
4355 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4356 true, GSI_SAME_STMT
);
4358 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4359 t
= fold_convert (itype
, t
);
4360 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4361 true, GSI_SAME_STMT
);
4364 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
4365 true, NULL_TREE
, true, GSI_SAME_STMT
);
4367 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
4368 true, NULL_TREE
, true, GSI_SAME_STMT
);
4370 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
4371 true, NULL_TREE
, true, GSI_SAME_STMT
);
4373 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
4374 true, NULL_TREE
, true, GSI_SAME_STMT
);
4376 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4377 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4378 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4379 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4380 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4381 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4382 fold_build1 (NEGATE_EXPR
, itype
, t
),
4383 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4385 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4386 t
= fold_convert (itype
, t
);
4387 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4388 true, GSI_SAME_STMT
);
4390 trip_var
= create_tmp_var (itype
, ".trip");
4391 if (gimple_in_ssa_p (cfun
))
4393 add_referenced_var (trip_var
);
4394 trip_init
= make_ssa_name (trip_var
, NULL
);
4395 trip_main
= make_ssa_name (trip_var
, NULL
);
4396 trip_back
= make_ssa_name (trip_var
, NULL
);
4400 trip_init
= trip_var
;
4401 trip_main
= trip_var
;
4402 trip_back
= trip_var
;
4405 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
4406 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4408 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
4409 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4410 if (POINTER_TYPE_P (type
))
4411 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4412 fold_convert (sizetype
, t
));
4414 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4415 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4416 true, GSI_SAME_STMT
);
4418 /* Remove the GIMPLE_OMP_FOR. */
4419 gsi_remove (&si
, true);
4421 /* Iteration space partitioning goes in ITER_PART_BB. */
4422 si
= gsi_last_bb (iter_part_bb
);
4424 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
4425 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
4426 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
4427 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4428 false, GSI_CONTINUE_LINKING
);
4430 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
4431 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4432 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4433 false, GSI_CONTINUE_LINKING
);
4435 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
4436 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
4438 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4439 si
= gsi_start_bb (seq_start_bb
);
4441 t
= fold_convert (itype
, s0
);
4442 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4443 if (POINTER_TYPE_P (type
))
4444 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4445 fold_convert (sizetype
, t
));
4447 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4448 t
= force_gimple_operand_gsi (&si
, t
, false, NULL_TREE
,
4449 false, GSI_CONTINUE_LINKING
);
4450 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4451 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4453 t
= fold_convert (itype
, e0
);
4454 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4455 if (POINTER_TYPE_P (type
))
4456 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4457 fold_convert (sizetype
, t
));
4459 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4460 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4461 false, GSI_CONTINUE_LINKING
);
4463 /* The code controlling the sequential loop goes in CONT_BB,
4464 replacing the GIMPLE_OMP_CONTINUE. */
4465 si
= gsi_last_bb (cont_bb
);
4466 stmt
= gsi_stmt (si
);
4467 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4468 v_main
= gimple_omp_continue_control_use (stmt
);
4469 v_back
= gimple_omp_continue_control_def (stmt
);
4471 if (POINTER_TYPE_P (type
))
4472 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, v_main
,
4473 fold_convert (sizetype
, fd
->loop
.step
));
4475 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
4476 stmt
= gimple_build_assign (v_back
, t
);
4477 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4479 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, v_back
, e
);
4480 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4482 /* Remove GIMPLE_OMP_CONTINUE. */
4483 gsi_remove (&si
, true);
4485 /* Trip update code goes into TRIP_UPDATE_BB. */
4486 si
= gsi_start_bb (trip_update_bb
);
4488 t
= build_int_cst (itype
, 1);
4489 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4490 stmt
= gimple_build_assign (trip_back
, t
);
4491 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4493 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4494 si
= gsi_last_bb (exit_bb
);
4495 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
4496 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4497 false, GSI_SAME_STMT
);
4498 gsi_remove (&si
, true);
4500 /* Connect the new blocks. */
4501 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4502 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4504 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4505 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
4507 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
4509 if (gimple_in_ssa_p (cfun
))
4511 gimple_stmt_iterator psi
;
4514 edge_var_map_vector head
;
4518 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4519 remove arguments of the phi nodes in fin_bb. We need to create
4520 appropriate phi nodes in iter_part_bb instead. */
4521 se
= single_pred_edge (fin_bb
);
4522 re
= single_succ_edge (trip_update_bb
);
4523 head
= redirect_edge_var_map_vector (re
);
4524 ene
= single_succ_edge (entry_bb
);
4526 psi
= gsi_start_phis (fin_bb
);
4527 for (i
= 0; !gsi_end_p (psi
) && VEC_iterate (edge_var_map
, head
, i
, vm
);
4528 gsi_next (&psi
), ++i
)
4531 source_location locus
;
4533 phi
= gsi_stmt (psi
);
4534 t
= gimple_phi_result (phi
);
4535 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4536 nphi
= create_phi_node (t
, iter_part_bb
);
4537 SSA_NAME_DEF_STMT (t
) = nphi
;
4539 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4540 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4542 /* A special case -- fd->loop.v is not yet computed in
4543 iter_part_bb, we need to use v_extra instead. */
4544 if (t
== fd
->loop
.v
)
4546 add_phi_arg (nphi
, t
, ene
, locus
);
4547 locus
= redirect_edge_var_map_location (vm
);
4548 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
4550 gcc_assert (!gsi_end_p (psi
) && i
== VEC_length (edge_var_map
, head
));
4551 redirect_edge_var_map_clear (re
);
4554 psi
= gsi_start_phis (fin_bb
);
4555 if (gsi_end_p (psi
))
4557 remove_phi_node (&psi
, false);
4560 /* Make phi node for trip. */
4561 phi
= create_phi_node (trip_main
, iter_part_bb
);
4562 SSA_NAME_DEF_STMT (trip_main
) = phi
;
4563 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4565 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4569 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4570 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4571 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4572 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4573 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4574 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4575 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4576 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4577 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4581 /* Expand the OpenMP loop defined by REGION. */
4584 expand_omp_for (struct omp_region
*region
)
4586 struct omp_for_data fd
;
4587 struct omp_for_data_loop
*loops
;
4590 = (struct omp_for_data_loop
*)
4591 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
4592 * sizeof (struct omp_for_data_loop
));
4593 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
4594 region
->sched_kind
= fd
.sched_kind
;
4596 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
4597 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4598 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4601 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
4602 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4603 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4606 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
4609 && region
->cont
!= NULL
)
4611 if (fd
.chunk_size
== NULL
)
4612 expand_omp_for_static_nochunk (region
, &fd
);
4614 expand_omp_for_static_chunk (region
, &fd
);
4618 int fn_index
, start_ix
, next_ix
;
4620 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4621 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
4622 ? 3 : fd
.sched_kind
;
4623 fn_index
+= fd
.have_ordered
* 4;
4624 start_ix
= BUILT_IN_GOMP_LOOP_STATIC_START
+ fn_index
;
4625 next_ix
= BUILT_IN_GOMP_LOOP_STATIC_NEXT
+ fn_index
;
4626 if (fd
.iter_type
== long_long_unsigned_type_node
)
4628 start_ix
+= BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4629 - BUILT_IN_GOMP_LOOP_STATIC_START
;
4630 next_ix
+= BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4631 - BUILT_IN_GOMP_LOOP_STATIC_NEXT
;
4633 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
4634 (enum built_in_function
) next_ix
);
4637 update_ssa (TODO_update_ssa_only_virtuals
);
4641 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4643 v = GOMP_sections_start (n);
4660 v = GOMP_sections_next ();
4665 If this is a combined parallel sections, replace the call to
4666 GOMP_sections_start with call to GOMP_sections_next. */
4669 expand_omp_sections (struct omp_region
*region
)
4671 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
4672 VEC (tree
,heap
) *label_vec
;
4674 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
4675 gimple_stmt_iterator si
, switch_si
;
4676 gimple sections_stmt
, stmt
, cont
;
4679 struct omp_region
*inner
;
4681 bool exit_reachable
= region
->cont
!= NULL
;
4683 gcc_assert (exit_reachable
== (region
->exit
!= NULL
));
4684 entry_bb
= region
->entry
;
4685 l0_bb
= single_succ (entry_bb
);
4686 l1_bb
= region
->cont
;
4687 l2_bb
= region
->exit
;
4690 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
4691 l2
= gimple_block_label (l2_bb
);
4694 /* This can happen if there are reductions. */
4695 len
= EDGE_COUNT (l0_bb
->succs
);
4696 gcc_assert (len
> 0);
4697 e
= EDGE_SUCC (l0_bb
, len
- 1);
4698 si
= gsi_last_bb (e
->dest
);
4701 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4702 l2
= gimple_block_label (e
->dest
);
4704 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
4706 si
= gsi_last_bb (e
->dest
);
4708 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4710 l2
= gimple_block_label (e
->dest
);
4715 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
4719 default_bb
= create_empty_bb (l0_bb
);
4720 l2
= gimple_block_label (default_bb
);
4723 /* We will build a switch() with enough cases for all the
4724 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4725 and a default case to abort if something goes wrong. */
4726 len
= EDGE_COUNT (l0_bb
->succs
);
4728 /* Use VEC_quick_push on label_vec throughout, since we know the size
4730 label_vec
= VEC_alloc (tree
, heap
, len
);
4732 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4733 GIMPLE_OMP_SECTIONS statement. */
4734 si
= gsi_last_bb (entry_bb
);
4735 sections_stmt
= gsi_stmt (si
);
4736 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
4737 vin
= gimple_omp_sections_control (sections_stmt
);
4738 if (!is_combined_parallel (region
))
4740 /* If we are not inside a combined parallel+sections region,
4741 call GOMP_sections_start. */
4742 t
= build_int_cst (unsigned_type_node
,
4743 exit_reachable
? len
- 1 : len
);
4744 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_START
];
4745 stmt
= gimple_build_call (u
, 1, t
);
4749 /* Otherwise, call GOMP_sections_next. */
4750 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
];
4751 stmt
= gimple_build_call (u
, 0);
4753 gimple_call_set_lhs (stmt
, vin
);
4754 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4755 gsi_remove (&si
, true);
4757 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4759 switch_si
= gsi_last_bb (l0_bb
);
4760 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
4763 cont
= last_stmt (l1_bb
);
4764 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
4765 vmain
= gimple_omp_continue_control_use (cont
);
4766 vnext
= gimple_omp_continue_control_def (cont
);
4777 t
= build3 (CASE_LABEL_EXPR
, void_type_node
,
4778 build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
4779 VEC_quick_push (tree
, label_vec
, t
);
4783 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4784 for (inner
= region
->inner
, casei
= 1;
4786 inner
= inner
->next
, i
++, casei
++)
4788 basic_block s_entry_bb
, s_exit_bb
;
4790 /* Skip optional reduction region. */
4791 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
4798 s_entry_bb
= inner
->entry
;
4799 s_exit_bb
= inner
->exit
;
4801 t
= gimple_block_label (s_entry_bb
);
4802 u
= build_int_cst (unsigned_type_node
, casei
);
4803 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, u
, NULL
, t
);
4804 VEC_quick_push (tree
, label_vec
, u
);
4806 si
= gsi_last_bb (s_entry_bb
);
4807 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
4808 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
4809 gsi_remove (&si
, true);
4810 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
4812 if (s_exit_bb
== NULL
)
4815 si
= gsi_last_bb (s_exit_bb
);
4816 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4817 gsi_remove (&si
, true);
4819 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
4822 /* Error handling code goes in DEFAULT_BB. */
4823 t
= gimple_block_label (default_bb
);
4824 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, NULL
, NULL
, t
);
4825 make_edge (l0_bb
, default_bb
, 0);
4827 stmt
= gimple_build_switch_vec (vmain
, u
, label_vec
);
4828 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
4829 gsi_remove (&switch_si
, true);
4830 VEC_free (tree
, heap
, label_vec
);
4832 si
= gsi_start_bb (default_bb
);
4833 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_TRAP
], 0);
4834 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4838 /* Code to get the next section goes in L1_BB. */
4839 si
= gsi_last_bb (l1_bb
);
4840 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
4842 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
], 0);
4843 gimple_call_set_lhs (stmt
, vnext
);
4844 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4845 gsi_remove (&si
, true);
4847 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
4849 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4850 si
= gsi_last_bb (l2_bb
);
4851 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
4852 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END_NOWAIT
];
4854 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END
];
4855 stmt
= gimple_build_call (t
, 0);
4856 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4857 gsi_remove (&si
, true);
4860 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
4864 /* Expand code for an OpenMP single directive. We've already expanded
4865 much of the code, here we simply place the GOMP_barrier call. */
4868 expand_omp_single (struct omp_region
*region
)
4870 basic_block entry_bb
, exit_bb
;
4871 gimple_stmt_iterator si
;
4872 bool need_barrier
= false;
4874 entry_bb
= region
->entry
;
4875 exit_bb
= region
->exit
;
4877 si
= gsi_last_bb (entry_bb
);
4878 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4879 be removed. We need to ensure that the thread that entered the single
4880 does not exit before the data is copied out by the other threads. */
4881 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
4882 OMP_CLAUSE_COPYPRIVATE
))
4883 need_barrier
= true;
4884 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
4885 gsi_remove (&si
, true);
4886 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4888 si
= gsi_last_bb (exit_bb
);
4889 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
4890 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4891 false, GSI_SAME_STMT
);
4892 gsi_remove (&si
, true);
4893 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4897 /* Generic expansion for OpenMP synchronization directives: master,
4898 ordered and critical. All we need to do here is remove the entry
4899 and exit markers for REGION. */
4902 expand_omp_synch (struct omp_region
*region
)
4904 basic_block entry_bb
, exit_bb
;
4905 gimple_stmt_iterator si
;
4907 entry_bb
= region
->entry
;
4908 exit_bb
= region
->exit
;
4910 si
= gsi_last_bb (entry_bb
);
4911 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
4912 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
4913 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
4914 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
4915 gsi_remove (&si
, true);
4916 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4920 si
= gsi_last_bb (exit_bb
);
4921 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4922 gsi_remove (&si
, true);
4923 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4927 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4928 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4929 size of the data type, and thus usable to find the index of the builtin
4930 decl. Returns false if the expression is not of the proper form. */
4933 expand_omp_atomic_fetch_op (basic_block load_bb
,
4934 tree addr
, tree loaded_val
,
4935 tree stored_val
, int index
)
4937 enum built_in_function base
;
4938 tree decl
, itype
, call
;
4941 basic_block store_bb
= single_succ (load_bb
);
4942 gimple_stmt_iterator gsi
;
4946 /* We expect to find the following sequences:
4949 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4952 val = tmp OP something; (or: something OP tmp)
4953 GIMPLE_OMP_STORE (val)
4955 ???FIXME: Allow a more flexible sequence.
4956 Perhaps use data flow to pick the statements.
4960 gsi
= gsi_after_labels (store_bb
);
4961 stmt
= gsi_stmt (gsi
);
4962 loc
= gimple_location (stmt
);
4963 if (!is_gimple_assign (stmt
))
4966 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
4969 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
4972 /* Check for one of the supported fetch-op operations. */
4973 switch (gimple_assign_rhs_code (stmt
))
4976 case POINTER_PLUS_EXPR
:
4977 base
= BUILT_IN_FETCH_AND_ADD_N
;
4978 optab
= sync_add_optab
;
4981 base
= BUILT_IN_FETCH_AND_SUB_N
;
4982 optab
= sync_add_optab
;
4985 base
= BUILT_IN_FETCH_AND_AND_N
;
4986 optab
= sync_and_optab
;
4989 base
= BUILT_IN_FETCH_AND_OR_N
;
4990 optab
= sync_ior_optab
;
4993 base
= BUILT_IN_FETCH_AND_XOR_N
;
4994 optab
= sync_xor_optab
;
4999 /* Make sure the expression is of the proper form. */
5000 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
5001 rhs
= gimple_assign_rhs2 (stmt
);
5002 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
5003 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
5004 rhs
= gimple_assign_rhs1 (stmt
);
5008 decl
= built_in_decls
[base
+ index
+ 1];
5009 itype
= TREE_TYPE (TREE_TYPE (decl
));
5011 if (direct_optab_handler (optab
, TYPE_MODE (itype
)) == CODE_FOR_nothing
)
5014 gsi
= gsi_last_bb (load_bb
);
5015 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5016 call
= build_call_expr_loc (loc
,
5018 fold_convert_loc (loc
, itype
, rhs
));
5019 call
= fold_convert_loc (loc
, void_type_node
, call
);
5020 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5021 gsi_remove (&gsi
, true);
5023 gsi
= gsi_last_bb (store_bb
);
5024 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5025 gsi_remove (&gsi
, true);
5026 gsi
= gsi_last_bb (store_bb
);
5027 gsi_remove (&gsi
, true);
5029 if (gimple_in_ssa_p (cfun
))
5030 update_ssa (TODO_update_ssa_no_phi
);
5035 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5039 newval = rhs; // with oldval replacing *addr in rhs
5040 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5041 if (oldval != newval)
5044 INDEX is log2 of the size of the data type, and thus usable to find the
5045 index of the builtin decl. */
5048 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
5049 tree addr
, tree loaded_val
, tree stored_val
,
5052 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
5053 tree type
, itype
, cmpxchg
, iaddr
;
5054 gimple_stmt_iterator si
;
5055 basic_block loop_header
= single_succ (load_bb
);
5059 cmpxchg
= built_in_decls
[BUILT_IN_VAL_COMPARE_AND_SWAP_N
+ index
+ 1];
5060 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5061 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
5063 if (direct_optab_handler (sync_compare_and_swap_optab
, TYPE_MODE (itype
))
5064 == CODE_FOR_nothing
)
5067 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5068 si
= gsi_last_bb (load_bb
);
5069 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5071 /* For floating-point values, we'll need to view-convert them to integers
5072 so that we can perform the atomic compare and swap. Simplify the
5073 following code by always setting up the "i"ntegral variables. */
5074 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
5078 iaddr
= create_tmp_var (build_pointer_type_for_mode (itype
, ptr_mode
,
5081 = force_gimple_operand_gsi (&si
,
5082 fold_convert (TREE_TYPE (iaddr
), addr
),
5083 false, NULL_TREE
, true, GSI_SAME_STMT
);
5084 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
5085 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5086 loadedi
= create_tmp_var (itype
, NULL
);
5087 if (gimple_in_ssa_p (cfun
))
5089 add_referenced_var (iaddr
);
5090 add_referenced_var (loadedi
);
5091 loadedi
= make_ssa_name (loadedi
, NULL
);
5097 loadedi
= loaded_val
;
5101 = force_gimple_operand_gsi (&si
,
5102 build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)),
5104 build_int_cst (TREE_TYPE (iaddr
), 0)),
5105 true, NULL_TREE
, true, GSI_SAME_STMT
);
5107 /* Move the value to the LOADEDI temporary. */
5108 if (gimple_in_ssa_p (cfun
))
5110 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
5111 phi
= create_phi_node (loadedi
, loop_header
);
5112 SSA_NAME_DEF_STMT (loadedi
) = phi
;
5113 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
5117 gsi_insert_before (&si
,
5118 gimple_build_assign (loadedi
, initial
),
5120 if (loadedi
!= loaded_val
)
5122 gimple_stmt_iterator gsi2
;
5125 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
5126 gsi2
= gsi_start_bb (loop_header
);
5127 if (gimple_in_ssa_p (cfun
))
5130 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5131 true, GSI_SAME_STMT
);
5132 stmt
= gimple_build_assign (loaded_val
, x
);
5133 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
5137 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
5138 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5139 true, GSI_SAME_STMT
);
5142 gsi_remove (&si
, true);
5144 si
= gsi_last_bb (store_bb
);
5145 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5148 storedi
= stored_val
;
5151 force_gimple_operand_gsi (&si
,
5152 build1 (VIEW_CONVERT_EXPR
, itype
,
5153 stored_val
), true, NULL_TREE
, true,
5156 /* Build the compare&swap statement. */
5157 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
5158 new_storedi
= force_gimple_operand_gsi (&si
,
5159 fold_convert (TREE_TYPE (loadedi
),
5162 true, GSI_SAME_STMT
);
5164 if (gimple_in_ssa_p (cfun
))
5168 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
5169 if (gimple_in_ssa_p (cfun
))
5170 add_referenced_var (old_vali
);
5171 stmt
= gimple_build_assign (old_vali
, loadedi
);
5172 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5174 stmt
= gimple_build_assign (loadedi
, new_storedi
);
5175 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5178 /* Note that we always perform the comparison as an integer, even for
5179 floating point. This allows the atomic operation to properly
5180 succeed even with NaNs and -0.0. */
5181 stmt
= gimple_build_cond_empty
5182 (build2 (NE_EXPR
, boolean_type_node
,
5183 new_storedi
, old_vali
));
5184 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5187 e
= single_succ_edge (store_bb
);
5188 e
->flags
&= ~EDGE_FALLTHRU
;
5189 e
->flags
|= EDGE_FALSE_VALUE
;
5191 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
5193 /* Copy the new value to loadedi (we already did that before the condition
5194 if we are not in SSA). */
5195 if (gimple_in_ssa_p (cfun
))
5197 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
5198 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
5201 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5202 gsi_remove (&si
, true);
5204 if (gimple_in_ssa_p (cfun
))
5205 update_ssa (TODO_update_ssa_no_phi
);
5210 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5212 GOMP_atomic_start ();
5216 The result is not globally atomic, but works so long as all parallel
5217 references are within #pragma omp atomic directives. According to
5218 responses received from omp@openmp.org, appears to be within spec.
5219 Which makes sense, since that's how several other compilers handle
5220 this situation as well.
5221 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5222 expanding. STORED_VAL is the operand of the matching
5223 GIMPLE_OMP_ATOMIC_STORE.
5226 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5230 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5235 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
5236 tree addr
, tree loaded_val
, tree stored_val
)
5238 gimple_stmt_iterator si
;
5242 si
= gsi_last_bb (load_bb
);
5243 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5245 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
];
5246 t
= build_call_expr (t
, 0);
5247 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5249 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
5250 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5251 gsi_remove (&si
, true);
5253 si
= gsi_last_bb (store_bb
);
5254 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5256 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
5258 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5260 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
];
5261 t
= build_call_expr (t
, 0);
5262 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5263 gsi_remove (&si
, true);
5265 if (gimple_in_ssa_p (cfun
))
5266 update_ssa (TODO_update_ssa_no_phi
);
5270 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5271 using expand_omp_atomic_fetch_op. If it failed, we try to
5272 call expand_omp_atomic_pipeline, and if it fails too, the
5273 ultimate fallback is wrapping the operation in a mutex
5274 (expand_omp_atomic_mutex). REGION is the atomic region built
5275 by build_omp_regions_1(). */
5278 expand_omp_atomic (struct omp_region
*region
)
5280 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
5281 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
5282 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
5283 tree addr
= gimple_omp_atomic_load_rhs (load
);
5284 tree stored_val
= gimple_omp_atomic_store_val (store
);
5285 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5286 HOST_WIDE_INT index
;
5288 /* Make sure the type is one of the supported sizes. */
5289 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5290 index
= exact_log2 (index
);
5291 if (index
>= 0 && index
<= 4)
5293 unsigned int align
= TYPE_ALIGN_UNIT (type
);
5295 /* __sync builtins require strict data alignment. */
5296 if (exact_log2 (align
) >= index
)
5298 /* When possible, use specialized atomic update functions. */
5299 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
5300 && store_bb
== single_succ (load_bb
))
5302 if (expand_omp_atomic_fetch_op (load_bb
, addr
,
5303 loaded_val
, stored_val
, index
))
5307 /* If we don't have specialized __sync builtins, try and implement
5308 as a compare and swap loop. */
5309 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
5310 loaded_val
, stored_val
, index
))
5315 /* The ultimate fallback is wrapping the operation in a mutex. */
5316 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
5320 /* Expand the parallel region tree rooted at REGION. Expansion
5321 proceeds in depth-first order. Innermost regions are expanded
5322 first. This way, parallel regions that require a new function to
5323 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5324 internal dependencies in their body. */
5327 expand_omp (struct omp_region
*region
)
5331 location_t saved_location
;
5333 /* First, determine whether this is a combined parallel+workshare
5335 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5336 determine_parallel_type (region
);
5339 expand_omp (region
->inner
);
5341 saved_location
= input_location
;
5342 if (gimple_has_location (last_stmt (region
->entry
)))
5343 input_location
= gimple_location (last_stmt (region
->entry
));
5345 switch (region
->type
)
5347 case GIMPLE_OMP_PARALLEL
:
5348 case GIMPLE_OMP_TASK
:
5349 expand_omp_taskreg (region
);
5352 case GIMPLE_OMP_FOR
:
5353 expand_omp_for (region
);
5356 case GIMPLE_OMP_SECTIONS
:
5357 expand_omp_sections (region
);
5360 case GIMPLE_OMP_SECTION
:
5361 /* Individual omp sections are handled together with their
5362 parent GIMPLE_OMP_SECTIONS region. */
5365 case GIMPLE_OMP_SINGLE
:
5366 expand_omp_single (region
);
5369 case GIMPLE_OMP_MASTER
:
5370 case GIMPLE_OMP_ORDERED
:
5371 case GIMPLE_OMP_CRITICAL
:
5372 expand_omp_synch (region
);
5375 case GIMPLE_OMP_ATOMIC_LOAD
:
5376 expand_omp_atomic (region
);
5383 input_location
= saved_location
;
5384 region
= region
->next
;
5389 /* Helper for build_omp_regions. Scan the dominator tree starting at
5390 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5391 true, the function ends once a single tree is built (otherwise, whole
5392 forest of OMP constructs may be built). */
5395 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
5398 gimple_stmt_iterator gsi
;
5402 gsi
= gsi_last_bb (bb
);
5403 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
5405 struct omp_region
*region
;
5406 enum gimple_code code
;
5408 stmt
= gsi_stmt (gsi
);
5409 code
= gimple_code (stmt
);
5410 if (code
== GIMPLE_OMP_RETURN
)
5412 /* STMT is the return point out of region PARENT. Mark it
5413 as the exit point and make PARENT the immediately
5414 enclosing region. */
5415 gcc_assert (parent
);
5418 parent
= parent
->outer
;
5420 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
5422 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5423 GIMPLE_OMP_RETURN, but matches with
5424 GIMPLE_OMP_ATOMIC_LOAD. */
5425 gcc_assert (parent
);
5426 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
5429 parent
= parent
->outer
;
5432 else if (code
== GIMPLE_OMP_CONTINUE
)
5434 gcc_assert (parent
);
5437 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
5439 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5440 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5445 /* Otherwise, this directive becomes the parent for a new
5447 region
= new_omp_region (bb
, code
, parent
);
5452 if (single_tree
&& !parent
)
5455 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5457 son
= next_dom_son (CDI_DOMINATORS
, son
))
5458 build_omp_regions_1 (son
, parent
, single_tree
);
5461 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5465 build_omp_regions_root (basic_block root
)
5467 gcc_assert (root_omp_region
== NULL
);
5468 build_omp_regions_1 (root
, NULL
, true);
5469 gcc_assert (root_omp_region
!= NULL
);
5472 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5475 omp_expand_local (basic_block head
)
5477 build_omp_regions_root (head
);
5478 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5480 fprintf (dump_file
, "\nOMP region tree\n\n");
5481 dump_omp_region (dump_file
, root_omp_region
, 0);
5482 fprintf (dump_file
, "\n");
5485 remove_exit_barriers (root_omp_region
);
5486 expand_omp (root_omp_region
);
5488 free_omp_regions ();
5491 /* Scan the CFG and build a tree of OMP regions. Return the root of
5492 the OMP region tree. */
5495 build_omp_regions (void)
5497 gcc_assert (root_omp_region
== NULL
);
5498 calculate_dominance_info (CDI_DOMINATORS
);
5499 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
5502 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5505 execute_expand_omp (void)
5507 build_omp_regions ();
5509 if (!root_omp_region
)
5514 fprintf (dump_file
, "\nOMP region tree\n\n");
5515 dump_omp_region (dump_file
, root_omp_region
, 0);
5516 fprintf (dump_file
, "\n");
5519 remove_exit_barriers (root_omp_region
);
5521 expand_omp (root_omp_region
);
5523 cleanup_tree_cfg ();
5525 free_omp_regions ();
5530 /* OMP expansion -- the default pass, run before creation of SSA form. */
5533 gate_expand_omp (void)
5535 return (flag_openmp
!= 0 && !seen_error ());
5538 struct gimple_opt_pass pass_expand_omp
=
5542 "ompexp", /* name */
5543 gate_expand_omp
, /* gate */
5544 execute_expand_omp
, /* execute */
5547 0, /* static_pass_number */
5548 TV_NONE
, /* tv_id */
5549 PROP_gimple_any
, /* properties_required */
5550 0, /* properties_provided */
5551 0, /* properties_destroyed */
5552 0, /* todo_flags_start */
5553 TODO_dump_func
/* todo_flags_finish */
5557 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5559 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5560 CTX is the enclosing OMP context for the current statement. */
5563 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5565 tree block
, control
;
5566 gimple_stmt_iterator tgsi
;
5568 gimple stmt
, new_stmt
, bind
, t
;
5569 gimple_seq ilist
, dlist
, olist
, new_body
, body
;
5570 struct gimplify_ctx gctx
;
5572 stmt
= gsi_stmt (*gsi_p
);
5574 push_gimplify_context (&gctx
);
5578 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
5579 &ilist
, &dlist
, ctx
);
5581 tgsi
= gsi_start (gimple_omp_body (stmt
));
5582 for (len
= 0; !gsi_end_p (tgsi
); len
++, gsi_next (&tgsi
))
5585 tgsi
= gsi_start (gimple_omp_body (stmt
));
5587 for (i
= 0; i
< len
; i
++, gsi_next (&tgsi
))
5592 sec_start
= gsi_stmt (tgsi
);
5593 sctx
= maybe_lookup_ctx (sec_start
);
5596 gimple_seq_add_stmt (&body
, sec_start
);
5598 lower_omp (gimple_omp_body (sec_start
), sctx
);
5599 gimple_seq_add_seq (&body
, gimple_omp_body (sec_start
));
5600 gimple_omp_set_body (sec_start
, NULL
);
5604 gimple_seq l
= NULL
;
5605 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
5607 gimple_seq_add_seq (&body
, l
);
5608 gimple_omp_section_set_last (sec_start
);
5611 gimple_seq_add_stmt (&body
, gimple_build_omp_return (false));
5614 block
= make_node (BLOCK
);
5615 bind
= gimple_build_bind (NULL
, body
, block
);
5618 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
5620 block
= make_node (BLOCK
);
5621 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
5623 pop_gimplify_context (new_stmt
);
5624 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
5625 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5626 if (BLOCK_VARS (block
))
5627 TREE_USED (block
) = 1;
5630 gimple_seq_add_seq (&new_body
, ilist
);
5631 gimple_seq_add_stmt (&new_body
, stmt
);
5632 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
5633 gimple_seq_add_stmt (&new_body
, bind
);
5635 control
= create_tmp_var (unsigned_type_node
, ".section");
5636 t
= gimple_build_omp_continue (control
, control
);
5637 gimple_omp_sections_set_control (stmt
, control
);
5638 gimple_seq_add_stmt (&new_body
, t
);
5640 gimple_seq_add_seq (&new_body
, olist
);
5641 gimple_seq_add_seq (&new_body
, dlist
);
5643 new_body
= maybe_catch_exception (new_body
);
5645 t
= gimple_build_omp_return
5646 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
5647 OMP_CLAUSE_NOWAIT
));
5648 gimple_seq_add_stmt (&new_body
, t
);
5650 gimple_bind_set_body (new_stmt
, new_body
);
5651 gimple_omp_set_body (stmt
, NULL
);
5653 gsi_replace (gsi_p
, new_stmt
, true);
5657 /* A subroutine of lower_omp_single. Expand the simple form of
5658 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5660 if (GOMP_single_start ())
5662 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5664 FIXME. It may be better to delay expanding the logic of this until
5665 pass_expand_omp. The expanded logic may make the job more difficult
5666 to a synchronization analysis pass. */
5669 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
5671 location_t loc
= gimple_location (single_stmt
);
5672 tree tlabel
= create_artificial_label (loc
);
5673 tree flabel
= create_artificial_label (loc
);
5677 decl
= built_in_decls
[BUILT_IN_GOMP_SINGLE_START
];
5678 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
5679 call
= gimple_build_call (decl
, 0);
5680 gimple_call_set_lhs (call
, lhs
);
5681 gimple_seq_add_stmt (pre_p
, call
);
5683 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
5684 fold_convert_loc (loc
, TREE_TYPE (lhs
),
5687 gimple_seq_add_stmt (pre_p
, cond
);
5688 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
5689 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5690 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
5694 /* A subroutine of lower_omp_single. Expand the simple form of
5695 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5697 #pragma omp single copyprivate (a, b, c)
5699 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5702 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5708 GOMP_single_copy_end (©out);
5719 FIXME. It may be better to delay expanding the logic of this until
5720 pass_expand_omp. The expanded logic may make the job more difficult
5721 to a synchronization analysis pass. */
5724 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
5726 tree ptr_type
, t
, l0
, l1
, l2
;
5727 gimple_seq copyin_seq
;
5728 location_t loc
= gimple_location (single_stmt
);
5730 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
5732 ptr_type
= build_pointer_type (ctx
->record_type
);
5733 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
5735 l0
= create_artificial_label (loc
);
5736 l1
= create_artificial_label (loc
);
5737 l2
= create_artificial_label (loc
);
5739 t
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_START
], 0);
5740 t
= fold_convert_loc (loc
, ptr_type
, t
);
5741 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
5743 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
5744 build_int_cst (ptr_type
, 0));
5745 t
= build3 (COND_EXPR
, void_type_node
, t
,
5746 build_and_jump (&l0
), build_and_jump (&l1
));
5747 gimplify_and_add (t
, pre_p
);
5749 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
5751 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5754 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
5757 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
5758 t
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_END
],
5760 gimplify_and_add (t
, pre_p
);
5762 t
= build_and_jump (&l2
);
5763 gimplify_and_add (t
, pre_p
);
5765 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
5767 gimple_seq_add_seq (pre_p
, copyin_seq
);
5769 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
5773 /* Expand code for an OpenMP single directive. */
5776 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5779 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
5780 gimple_seq bind_body
, dlist
;
5781 struct gimplify_ctx gctx
;
5783 push_gimplify_context (&gctx
);
5786 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
5787 &bind_body
, &dlist
, ctx
);
5788 lower_omp (gimple_omp_body (single_stmt
), ctx
);
5790 gimple_seq_add_stmt (&bind_body
, single_stmt
);
5792 if (ctx
->record_type
)
5793 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
5795 lower_omp_single_simple (single_stmt
, &bind_body
);
5797 gimple_omp_set_body (single_stmt
, NULL
);
5799 gimple_seq_add_seq (&bind_body
, dlist
);
5801 bind_body
= maybe_catch_exception (bind_body
);
5803 t
= gimple_build_omp_return
5804 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
5805 OMP_CLAUSE_NOWAIT
));
5806 gimple_seq_add_stmt (&bind_body
, t
);
5808 block
= make_node (BLOCK
);
5809 bind
= gimple_build_bind (NULL
, bind_body
, block
);
5811 pop_gimplify_context (bind
);
5813 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5814 BLOCK_VARS (block
) = ctx
->block_vars
;
5815 gsi_replace (gsi_p
, bind
, true);
5816 if (BLOCK_VARS (block
))
5817 TREE_USED (block
) = 1;
5821 /* Expand code for an OpenMP master directive. */
5824 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5826 tree block
, lab
= NULL
, x
;
5827 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
5828 location_t loc
= gimple_location (stmt
);
5830 struct gimplify_ctx gctx
;
5832 push_gimplify_context (&gctx
);
5834 block
= make_node (BLOCK
);
5835 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
5838 x
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
5839 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
5840 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
5842 gimplify_and_add (x
, &tseq
);
5843 gimple_bind_add_seq (bind
, tseq
);
5845 lower_omp (gimple_omp_body (stmt
), ctx
);
5846 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5847 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5848 gimple_omp_set_body (stmt
, NULL
);
5850 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
5852 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5854 pop_gimplify_context (bind
);
5856 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5857 BLOCK_VARS (block
) = ctx
->block_vars
;
5858 gsi_replace (gsi_p
, bind
, true);
5862 /* Expand code for an OpenMP ordered directive. */
5865 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5868 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
5869 struct gimplify_ctx gctx
;
5871 push_gimplify_context (&gctx
);
5873 block
= make_node (BLOCK
);
5874 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
5877 x
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ORDERED_START
], 0);
5878 gimple_bind_add_stmt (bind
, x
);
5880 lower_omp (gimple_omp_body (stmt
), ctx
);
5881 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5882 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5883 gimple_omp_set_body (stmt
, NULL
);
5885 x
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ORDERED_END
], 0);
5886 gimple_bind_add_stmt (bind
, x
);
5888 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5890 pop_gimplify_context (bind
);
5892 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5893 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5894 gsi_replace (gsi_p
, bind
, true);
5898 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5899 substitution of a couple of function calls. But in the NAMED case,
5900 requires that languages coordinate a symbol name. It is therefore
5901 best put here in common code. */
5903 static GTY((param1_is (tree
), param2_is (tree
)))
5904 splay_tree critical_name_mutexes
;
5907 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5910 tree name
, lock
, unlock
;
5911 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
5912 location_t loc
= gimple_location (stmt
);
5914 struct gimplify_ctx gctx
;
5916 name
= gimple_omp_critical_name (stmt
);
5922 if (!critical_name_mutexes
)
5923 critical_name_mutexes
5924 = splay_tree_new_ggc (splay_tree_compare_pointers
,
5925 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
5926 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
5928 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
5933 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
5935 new_str
= ACONCAT ((".gomp_critical_user_",
5936 IDENTIFIER_POINTER (name
), NULL
));
5937 DECL_NAME (decl
) = get_identifier (new_str
);
5938 TREE_PUBLIC (decl
) = 1;
5939 TREE_STATIC (decl
) = 1;
5940 DECL_COMMON (decl
) = 1;
5941 DECL_ARTIFICIAL (decl
) = 1;
5942 DECL_IGNORED_P (decl
) = 1;
5943 varpool_finalize_decl (decl
);
5945 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
5946 (splay_tree_value
) decl
);
5949 decl
= (tree
) n
->value
;
5951 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_START
];
5952 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
5954 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_END
];
5955 unlock
= build_call_expr_loc (loc
, unlock
, 1,
5956 build_fold_addr_expr_loc (loc
, decl
));
5960 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_START
];
5961 lock
= build_call_expr_loc (loc
, lock
, 0);
5963 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_END
];
5964 unlock
= build_call_expr_loc (loc
, unlock
, 0);
5967 push_gimplify_context (&gctx
);
5969 block
= make_node (BLOCK
);
5970 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
), block
);
5972 tbody
= gimple_bind_body (bind
);
5973 gimplify_and_add (lock
, &tbody
);
5974 gimple_bind_set_body (bind
, tbody
);
5976 lower_omp (gimple_omp_body (stmt
), ctx
);
5977 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5978 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5979 gimple_omp_set_body (stmt
, NULL
);
5981 tbody
= gimple_bind_body (bind
);
5982 gimplify_and_add (unlock
, &tbody
);
5983 gimple_bind_set_body (bind
, tbody
);
5985 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5987 pop_gimplify_context (bind
);
5988 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5989 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5990 gsi_replace (gsi_p
, bind
, true);
5994 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5995 for a lastprivate clause. Given a loop control predicate of (V
5996 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5997 is appended to *DLIST, iterator initialization is appended to
6001 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
6002 gimple_seq
*dlist
, struct omp_context
*ctx
)
6004 tree clauses
, cond
, vinit
;
6005 enum tree_code cond_code
;
6008 cond_code
= fd
->loop
.cond_code
;
6009 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
6011 /* When possible, use a strict equality expression. This can let VRP
6012 type optimizations deduce the value and remove a copy. */
6013 if (host_integerp (fd
->loop
.step
, 0))
6015 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
6016 if (step
== 1 || step
== -1)
6017 cond_code
= EQ_EXPR
;
6020 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
6022 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
6024 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
6025 if (!gimple_seq_empty_p (stmts
))
6027 gimple_seq_add_seq (&stmts
, *dlist
);
6030 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6031 vinit
= fd
->loop
.n1
;
6032 if (cond_code
== EQ_EXPR
6033 && host_integerp (fd
->loop
.n2
, 0)
6034 && ! integer_zerop (fd
->loop
.n2
))
6035 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
6037 /* Initialize the iterator variable, so that threads that don't execute
6038 any iterations don't execute the lastprivate clauses by accident. */
6039 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
6044 /* Lower code for an OpenMP loop directive. */
6047 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6050 struct omp_for_data fd
;
6051 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
6052 gimple_seq omp_for_body
, body
, dlist
;
6054 struct gimplify_ctx gctx
;
6056 push_gimplify_context (&gctx
);
6058 lower_omp (gimple_omp_for_pre_body (stmt
), ctx
);
6059 lower_omp (gimple_omp_body (stmt
), ctx
);
6061 block
= make_node (BLOCK
);
6062 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6064 /* Move declaration of temporaries in the loop body before we make
6066 omp_for_body
= gimple_omp_body (stmt
);
6067 if (!gimple_seq_empty_p (omp_for_body
)
6068 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
6070 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
6071 gimple_bind_append_vars (new_stmt
, vars
);
6074 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6077 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
6078 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
6080 /* Lower the header expressions. At this point, we can assume that
6081 the header is of the form:
6083 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6085 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6086 using the .omp_data_s mapping, if needed. */
6087 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
6089 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
6090 if (!is_gimple_min_invariant (*rhs_p
))
6091 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6093 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
6094 if (!is_gimple_min_invariant (*rhs_p
))
6095 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6097 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
6098 if (!is_gimple_min_invariant (*rhs_p
))
6099 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6102 /* Once lowered, extract the bounds and clauses. */
6103 extract_omp_for_data (stmt
, &fd
, NULL
);
6105 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
6107 gimple_seq_add_stmt (&body
, stmt
);
6108 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
6110 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
6113 /* After the loop, add exit clauses. */
6114 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
6115 gimple_seq_add_seq (&body
, dlist
);
6117 body
= maybe_catch_exception (body
);
6119 /* Region exit marker goes at the end of the loop body. */
6120 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
6122 pop_gimplify_context (new_stmt
);
6124 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6125 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
6126 if (BLOCK_VARS (block
))
6127 TREE_USED (block
) = 1;
6129 gimple_bind_set_body (new_stmt
, body
);
6130 gimple_omp_set_body (stmt
, NULL
);
6131 gimple_omp_for_set_pre_body (stmt
, NULL
);
6132 gsi_replace (gsi_p
, new_stmt
, true);
6135 /* Callback for walk_stmts. Check if the current statement only contains
6136 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6139 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
6140 bool *handled_ops_p
,
6141 struct walk_stmt_info
*wi
)
6143 int *info
= (int *) wi
->info
;
6144 gimple stmt
= gsi_stmt (*gsi_p
);
6146 *handled_ops_p
= true;
6147 switch (gimple_code (stmt
))
6151 case GIMPLE_OMP_FOR
:
6152 case GIMPLE_OMP_SECTIONS
:
6153 *info
= *info
== 0 ? 1 : -1;
6162 struct omp_taskcopy_context
6164 /* This field must be at the beginning, as we do "inheritance": Some
6165 callback functions for tree-inline.c (e.g., omp_copy_decl)
6166 receive a copy_body_data pointer that is up-casted to an
6167 omp_context pointer. */
6173 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
6175 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
6177 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
6178 return create_tmp_var (TREE_TYPE (var
), NULL
);
6184 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
6186 tree name
, new_fields
= NULL
, type
, f
;
6188 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6189 name
= DECL_NAME (TYPE_NAME (orig_type
));
6190 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
6191 TYPE_DECL
, name
, type
);
6192 TYPE_NAME (type
) = name
;
6194 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
6196 tree new_f
= copy_node (f
);
6197 DECL_CONTEXT (new_f
) = type
;
6198 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
6199 TREE_CHAIN (new_f
) = new_fields
;
6200 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6201 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6202 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
6205 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
6207 TYPE_FIELDS (type
) = nreverse (new_fields
);
6212 /* Create task copyfn. */
6215 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
6217 struct function
*child_cfun
;
6218 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
6219 tree record_type
, srecord_type
, bind
, list
;
6220 bool record_needs_remap
= false, srecord_needs_remap
= false;
6222 struct omp_taskcopy_context tcctx
;
6223 struct gimplify_ctx gctx
;
6224 location_t loc
= gimple_location (task_stmt
);
6226 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
6227 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6228 gcc_assert (child_cfun
->cfg
== NULL
);
6229 child_cfun
->dont_save_pending_sizes_p
= 1;
6230 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
6232 /* Reset DECL_CONTEXT on function arguments. */
6233 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
6234 DECL_CONTEXT (t
) = child_fn
;
6236 /* Populate the function. */
6237 push_gimplify_context (&gctx
);
6238 current_function_decl
= child_fn
;
6240 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
6241 TREE_SIDE_EFFECTS (bind
) = 1;
6243 DECL_SAVED_TREE (child_fn
) = bind
;
6244 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
6246 /* Remap src and dst argument types if needed. */
6247 record_type
= ctx
->record_type
;
6248 srecord_type
= ctx
->srecord_type
;
6249 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
6250 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6252 record_needs_remap
= true;
6255 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
6256 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6258 srecord_needs_remap
= true;
6262 if (record_needs_remap
|| srecord_needs_remap
)
6264 memset (&tcctx
, '\0', sizeof (tcctx
));
6265 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
6266 tcctx
.cb
.dst_fn
= child_fn
;
6267 tcctx
.cb
.src_node
= cgraph_node (tcctx
.cb
.src_fn
);
6268 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
6269 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
6270 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
6271 tcctx
.cb
.eh_lp_nr
= 0;
6272 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
6273 tcctx
.cb
.decl_map
= pointer_map_create ();
6276 if (record_needs_remap
)
6277 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
6278 if (srecord_needs_remap
)
6279 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
6282 tcctx
.cb
.decl_map
= NULL
;
6284 push_cfun (child_cfun
);
6286 arg
= DECL_ARGUMENTS (child_fn
);
6287 TREE_TYPE (arg
) = build_pointer_type (record_type
);
6288 sarg
= DECL_CHAIN (arg
);
6289 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
6291 /* First pass: initialize temporaries used in record_type and srecord_type
6292 sizes and field offsets. */
6293 if (tcctx
.cb
.decl_map
)
6294 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6295 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6299 decl
= OMP_CLAUSE_DECL (c
);
6300 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
6303 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6304 sf
= (tree
) n
->value
;
6305 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6306 src
= build_simple_mem_ref_loc (loc
, sarg
);
6307 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6308 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
6309 append_to_statement_list (t
, &list
);
6312 /* Second pass: copy shared var pointers and copy construct non-VLA
6313 firstprivate vars. */
6314 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6315 switch (OMP_CLAUSE_CODE (c
))
6317 case OMP_CLAUSE_SHARED
:
6318 decl
= OMP_CLAUSE_DECL (c
);
6319 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6322 f
= (tree
) n
->value
;
6323 if (tcctx
.cb
.decl_map
)
6324 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6325 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6326 sf
= (tree
) n
->value
;
6327 if (tcctx
.cb
.decl_map
)
6328 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6329 src
= build_simple_mem_ref_loc (loc
, sarg
);
6330 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6331 dst
= build_simple_mem_ref_loc (loc
, arg
);
6332 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6333 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6334 append_to_statement_list (t
, &list
);
6336 case OMP_CLAUSE_FIRSTPRIVATE
:
6337 decl
= OMP_CLAUSE_DECL (c
);
6338 if (is_variable_sized (decl
))
6340 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6343 f
= (tree
) n
->value
;
6344 if (tcctx
.cb
.decl_map
)
6345 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6346 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6349 sf
= (tree
) n
->value
;
6350 if (tcctx
.cb
.decl_map
)
6351 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6352 src
= build_simple_mem_ref_loc (loc
, sarg
);
6353 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6354 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
6355 src
= build_simple_mem_ref_loc (loc
, src
);
6359 dst
= build_simple_mem_ref_loc (loc
, arg
);
6360 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6361 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6362 append_to_statement_list (t
, &list
);
6364 case OMP_CLAUSE_PRIVATE
:
6365 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
6367 decl
= OMP_CLAUSE_DECL (c
);
6368 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6369 f
= (tree
) n
->value
;
6370 if (tcctx
.cb
.decl_map
)
6371 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6372 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6375 sf
= (tree
) n
->value
;
6376 if (tcctx
.cb
.decl_map
)
6377 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6378 src
= build_simple_mem_ref_loc (loc
, sarg
);
6379 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6380 if (use_pointer_for_field (decl
, NULL
))
6381 src
= build_simple_mem_ref_loc (loc
, src
);
6385 dst
= build_simple_mem_ref_loc (loc
, arg
);
6386 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6387 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6388 append_to_statement_list (t
, &list
);
6394 /* Last pass: handle VLA firstprivates. */
6395 if (tcctx
.cb
.decl_map
)
6396 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6397 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6401 decl
= OMP_CLAUSE_DECL (c
);
6402 if (!is_variable_sized (decl
))
6404 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6407 f
= (tree
) n
->value
;
6408 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6409 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
6410 ind
= DECL_VALUE_EXPR (decl
);
6411 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
6412 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
6413 n
= splay_tree_lookup (ctx
->sfield_map
,
6414 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6415 sf
= (tree
) n
->value
;
6416 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6417 src
= build_simple_mem_ref_loc (loc
, sarg
);
6418 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6419 src
= build_simple_mem_ref_loc (loc
, src
);
6420 dst
= build_simple_mem_ref_loc (loc
, arg
);
6421 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6422 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6423 append_to_statement_list (t
, &list
);
6424 n
= splay_tree_lookup (ctx
->field_map
,
6425 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6426 df
= (tree
) n
->value
;
6427 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
6428 ptr
= build_simple_mem_ref_loc (loc
, arg
);
6429 ptr
= build3 (COMPONENT_REF
, TREE_TYPE (df
), ptr
, df
, NULL
);
6430 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
6431 build_fold_addr_expr_loc (loc
, dst
));
6432 append_to_statement_list (t
, &list
);
6435 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
6436 append_to_statement_list (t
, &list
);
6438 if (tcctx
.cb
.decl_map
)
6439 pointer_map_destroy (tcctx
.cb
.decl_map
);
6440 pop_gimplify_context (NULL
);
6441 BIND_EXPR_BODY (bind
) = list
;
6443 current_function_decl
= ctx
->cb
.src_fn
;
6446 /* Lower the OpenMP parallel or task directive in the current statement
6447 in GSI_P. CTX holds context information for the directive. */
6450 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6454 gimple stmt
= gsi_stmt (*gsi_p
);
6455 gimple par_bind
, bind
;
6456 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
6457 struct gimplify_ctx gctx
;
6458 location_t loc
= gimple_location (stmt
);
6460 clauses
= gimple_omp_taskreg_clauses (stmt
);
6461 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
6462 par_body
= gimple_bind_body (par_bind
);
6463 child_fn
= ctx
->cb
.dst_fn
;
6464 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
6465 && !gimple_omp_parallel_combined_p (stmt
))
6467 struct walk_stmt_info wi
;
6470 memset (&wi
, 0, sizeof (wi
));
6473 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
6475 gimple_omp_parallel_set_combined_p (stmt
, true);
6477 if (ctx
->srecord_type
)
6478 create_task_copyfn (stmt
, ctx
);
6480 push_gimplify_context (&gctx
);
6484 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
6485 lower_omp (par_body
, ctx
);
6486 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
6487 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
6489 /* Declare all the variables created by mapping and the variables
6490 declared in the scope of the parallel body. */
6491 record_vars_into (ctx
->block_vars
, child_fn
);
6492 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
6494 if (ctx
->record_type
)
6497 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
6498 : ctx
->record_type
, ".omp_data_o");
6499 DECL_NAMELESS (ctx
->sender_decl
) = 1;
6500 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
6501 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
6506 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
6507 lower_send_shared_vars (&ilist
, &olist
, ctx
);
6509 /* Once all the expansions are done, sequence all the different
6510 fragments inside gimple_omp_body. */
6514 if (ctx
->record_type
)
6516 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6517 /* fixup_child_record_type might have changed receiver_decl's type. */
6518 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
6519 gimple_seq_add_stmt (&new_body
,
6520 gimple_build_assign (ctx
->receiver_decl
, t
));
6523 gimple_seq_add_seq (&new_body
, par_ilist
);
6524 gimple_seq_add_seq (&new_body
, par_body
);
6525 gimple_seq_add_seq (&new_body
, par_olist
);
6526 new_body
= maybe_catch_exception (new_body
);
6527 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
6528 gimple_omp_set_body (stmt
, new_body
);
6530 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
6531 gimple_bind_add_stmt (bind
, stmt
);
6534 gimple_seq_add_stmt (&ilist
, bind
);
6535 gimple_seq_add_seq (&ilist
, olist
);
6536 bind
= gimple_build_bind (NULL
, ilist
, NULL
);
6539 gsi_replace (gsi_p
, bind
, true);
6541 pop_gimplify_context (NULL
);
6544 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6545 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6546 of OpenMP context, but with task_shared_vars set. */
6549 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
6554 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6555 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
6558 if (task_shared_vars
6560 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
6563 /* If a global variable has been privatized, TREE_CONSTANT on
6564 ADDR_EXPR might be wrong. */
6565 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
6566 recompute_tree_invariant_for_addr_expr (t
);
6568 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
6573 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6575 gimple stmt
= gsi_stmt (*gsi_p
);
6576 struct walk_stmt_info wi
;
6578 if (gimple_has_location (stmt
))
6579 input_location
= gimple_location (stmt
);
6581 if (task_shared_vars
)
6582 memset (&wi
, '\0', sizeof (wi
));
6584 /* If we have issued syntax errors, avoid doing any heavy lifting.
6585 Just replace the OpenMP directives with a NOP to avoid
6586 confusing RTL expansion. */
6587 if (seen_error () && is_gimple_omp (stmt
))
6589 gsi_replace (gsi_p
, gimple_build_nop (), true);
6593 switch (gimple_code (stmt
))
6596 if ((ctx
|| task_shared_vars
)
6597 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
6598 ctx
? NULL
: &wi
, NULL
)
6599 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
6600 ctx
? NULL
: &wi
, NULL
)))
6601 gimple_regimplify_operands (stmt
, gsi_p
);
6604 lower_omp (gimple_catch_handler (stmt
), ctx
);
6606 case GIMPLE_EH_FILTER
:
6607 lower_omp (gimple_eh_filter_failure (stmt
), ctx
);
6610 lower_omp (gimple_try_eval (stmt
), ctx
);
6611 lower_omp (gimple_try_cleanup (stmt
), ctx
);
6614 lower_omp (gimple_bind_body (stmt
), ctx
);
6616 case GIMPLE_OMP_PARALLEL
:
6617 case GIMPLE_OMP_TASK
:
6618 ctx
= maybe_lookup_ctx (stmt
);
6619 lower_omp_taskreg (gsi_p
, ctx
);
6621 case GIMPLE_OMP_FOR
:
6622 ctx
= maybe_lookup_ctx (stmt
);
6624 lower_omp_for (gsi_p
, ctx
);
6626 case GIMPLE_OMP_SECTIONS
:
6627 ctx
= maybe_lookup_ctx (stmt
);
6629 lower_omp_sections (gsi_p
, ctx
);
6631 case GIMPLE_OMP_SINGLE
:
6632 ctx
= maybe_lookup_ctx (stmt
);
6634 lower_omp_single (gsi_p
, ctx
);
6636 case GIMPLE_OMP_MASTER
:
6637 ctx
= maybe_lookup_ctx (stmt
);
6639 lower_omp_master (gsi_p
, ctx
);
6641 case GIMPLE_OMP_ORDERED
:
6642 ctx
= maybe_lookup_ctx (stmt
);
6644 lower_omp_ordered (gsi_p
, ctx
);
6646 case GIMPLE_OMP_CRITICAL
:
6647 ctx
= maybe_lookup_ctx (stmt
);
6649 lower_omp_critical (gsi_p
, ctx
);
6651 case GIMPLE_OMP_ATOMIC_LOAD
:
6652 if ((ctx
|| task_shared_vars
)
6653 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
6654 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
6655 gimple_regimplify_operands (stmt
, gsi_p
);
6658 if ((ctx
|| task_shared_vars
)
6659 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
6661 gimple_regimplify_operands (stmt
, gsi_p
);
6667 lower_omp (gimple_seq body
, omp_context
*ctx
)
6669 location_t saved_location
= input_location
;
6670 gimple_stmt_iterator gsi
= gsi_start (body
);
6671 for (gsi
= gsi_start (body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6672 lower_omp_1 (&gsi
, ctx
);
6673 input_location
= saved_location
;
6676 /* Main entry point. */
6679 execute_lower_omp (void)
6683 /* This pass always runs, to provide PROP_gimple_lomp.
6684 But there is nothing to do unless -fopenmp is given. */
6685 if (flag_openmp
== 0)
6688 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
6689 delete_omp_context
);
6691 body
= gimple_body (current_function_decl
);
6692 scan_omp (body
, NULL
);
6693 gcc_assert (taskreg_nesting_level
== 0);
6695 if (all_contexts
->root
)
6697 struct gimplify_ctx gctx
;
6699 if (task_shared_vars
)
6700 push_gimplify_context (&gctx
);
6701 lower_omp (body
, NULL
);
6702 if (task_shared_vars
)
6703 pop_gimplify_context (NULL
);
6708 splay_tree_delete (all_contexts
);
6709 all_contexts
= NULL
;
6711 BITMAP_FREE (task_shared_vars
);
6715 struct gimple_opt_pass pass_lower_omp
=
6719 "omplower", /* name */
6721 execute_lower_omp
, /* execute */
6724 0, /* static_pass_number */
6725 TV_NONE
, /* tv_id */
6726 PROP_gimple_any
, /* properties_required */
6727 PROP_gimple_lomp
, /* properties_provided */
6728 0, /* properties_destroyed */
6729 0, /* todo_flags_start */
6730 TODO_dump_func
/* todo_flags_finish */
6734 /* The following is a utility to diagnose OpenMP structured block violations.
6735 It is not part of the "omplower" pass, as that's invoked too late. It
6736 should be invoked by the respective front ends after gimplification. */
6738 static splay_tree all_labels
;
6740 /* Check for mismatched contexts and generate an error if needed. Return
6741 true if an error is detected. */
6744 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
6745 gimple branch_ctx
, gimple label_ctx
)
6747 if (label_ctx
== branch_ctx
)
6752 Previously we kept track of the label's entire context in diagnose_sb_[12]
6753 so we could traverse it and issue a correct "exit" or "enter" error
6754 message upon a structured block violation.
6756 We built the context by building a list with tree_cons'ing, but there is
6757 no easy counterpart in gimple tuples. It seems like far too much work
6758 for issuing exit/enter error messages. If someone really misses the
6759 distinct error message... patches welcome.
6763 /* Try to avoid confusing the user by producing and error message
6764 with correct "exit" or "enter" verbiage. We prefer "exit"
6765 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6766 if (branch_ctx
== NULL
)
6772 if (TREE_VALUE (label_ctx
) == branch_ctx
)
6777 label_ctx
= TREE_CHAIN (label_ctx
);
6782 error ("invalid exit from OpenMP structured block");
6784 error ("invalid entry to OpenMP structured block");
6787 /* If it's obvious we have an invalid entry, be specific about the error. */
6788 if (branch_ctx
== NULL
)
6789 error ("invalid entry to OpenMP structured block");
6791 /* Otherwise, be vague and lazy, but efficient. */
6792 error ("invalid branch to/from an OpenMP structured block");
6794 gsi_replace (gsi_p
, gimple_build_nop (), false);
6798 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6799 where each label is found. */
6802 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
6803 struct walk_stmt_info
*wi
)
6805 gimple context
= (gimple
) wi
->info
;
6806 gimple inner_context
;
6807 gimple stmt
= gsi_stmt (*gsi_p
);
6809 *handled_ops_p
= true;
6811 switch (gimple_code (stmt
))
6815 case GIMPLE_OMP_PARALLEL
:
6816 case GIMPLE_OMP_TASK
:
6817 case GIMPLE_OMP_SECTIONS
:
6818 case GIMPLE_OMP_SINGLE
:
6819 case GIMPLE_OMP_SECTION
:
6820 case GIMPLE_OMP_MASTER
:
6821 case GIMPLE_OMP_ORDERED
:
6822 case GIMPLE_OMP_CRITICAL
:
6823 /* The minimal context here is just the current OMP construct. */
6824 inner_context
= stmt
;
6825 wi
->info
= inner_context
;
6826 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6830 case GIMPLE_OMP_FOR
:
6831 inner_context
= stmt
;
6832 wi
->info
= inner_context
;
6833 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6835 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
6836 diagnose_sb_1
, NULL
, wi
);
6837 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6842 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
6843 (splay_tree_value
) context
);
6853 /* Pass 2: Check each branch and see if its context differs from that of
6854 the destination label's context. */
6857 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
6858 struct walk_stmt_info
*wi
)
6860 gimple context
= (gimple
) wi
->info
;
6862 gimple stmt
= gsi_stmt (*gsi_p
);
6864 *handled_ops_p
= true;
6866 switch (gimple_code (stmt
))
6870 case GIMPLE_OMP_PARALLEL
:
6871 case GIMPLE_OMP_TASK
:
6872 case GIMPLE_OMP_SECTIONS
:
6873 case GIMPLE_OMP_SINGLE
:
6874 case GIMPLE_OMP_SECTION
:
6875 case GIMPLE_OMP_MASTER
:
6876 case GIMPLE_OMP_ORDERED
:
6877 case GIMPLE_OMP_CRITICAL
:
6879 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
6883 case GIMPLE_OMP_FOR
:
6885 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6887 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
6888 diagnose_sb_2
, NULL
, wi
);
6889 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
6895 tree lab
= gimple_cond_true_label (stmt
);
6898 n
= splay_tree_lookup (all_labels
,
6899 (splay_tree_key
) lab
);
6900 diagnose_sb_0 (gsi_p
, context
,
6901 n
? (gimple
) n
->value
: NULL
);
6903 lab
= gimple_cond_false_label (stmt
);
6906 n
= splay_tree_lookup (all_labels
,
6907 (splay_tree_key
) lab
);
6908 diagnose_sb_0 (gsi_p
, context
,
6909 n
? (gimple
) n
->value
: NULL
);
6916 tree lab
= gimple_goto_dest (stmt
);
6917 if (TREE_CODE (lab
) != LABEL_DECL
)
6920 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
6921 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
6928 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
6930 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
6931 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
6932 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
6939 diagnose_sb_0 (gsi_p
, context
, NULL
);
6950 diagnose_omp_structured_block_errors (void)
6952 struct walk_stmt_info wi
;
6953 gimple_seq body
= gimple_body (current_function_decl
);
6955 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
6957 memset (&wi
, 0, sizeof (wi
));
6958 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
6960 memset (&wi
, 0, sizeof (wi
));
6961 wi
.want_locations
= true;
6962 walk_gimple_seq (body
, diagnose_sb_2
, NULL
, &wi
);
6964 splay_tree_delete (all_labels
);
6971 gate_diagnose_omp_blocks (void)
6973 return flag_openmp
!= 0;
6976 struct gimple_opt_pass pass_diagnose_omp_blocks
=
6980 "*diagnose_omp_blocks", /* name */
6981 gate_diagnose_omp_blocks
, /* gate */
6982 diagnose_omp_structured_block_errors
, /* execute */
6985 0, /* static_pass_number */
6986 TV_NONE
, /* tv_id */
6987 PROP_gimple_any
, /* properties_required */
6988 0, /* properties_provided */
6989 0, /* properties_destroyed */
6990 0, /* todo_flags_start */
6991 0, /* todo_flags_finish */
6995 #include "gt-omp-low.h"