1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
39 #include "tree-pass.h"
42 #include "splay-tree.h"
48 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
52 re-gimplifying things when variables have been replaced with complex
55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
59 /* Context structure. Used to store information about each parallel
60 directive in the code. */
62 typedef struct omp_context
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context
*outer
;
74 /* Map variables to fields in a structure that allows communication
75 between sending and receiving threads. */
81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map
;
89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
93 /* What to do with variables with implicitly determined sharing
95 enum omp_clause_default_kind default_kind
;
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
102 /* True if this parallel directive is nested within another. */
107 struct omp_for_data_loop
109 tree v
, n1
, n2
, step
;
110 enum tree_code cond_code
;
113 /* A structure describing the main elements of a parallel loop. */
117 struct omp_for_data_loop loop
;
122 bool have_nowait
, have_ordered
;
123 enum omp_clause_schedule_kind sched_kind
;
124 struct omp_for_data_loop
*loops
;
128 static splay_tree all_contexts
;
129 static int taskreg_nesting_level
;
130 struct omp_region
*root_omp_region
;
131 static bitmap task_shared_vars
;
133 static void scan_omp (gimple_seq
*, omp_context
*);
134 static tree
scan_omp_1_op (tree
*, int *, void *);
136 #define WALK_SUBSTMTS \
140 case GIMPLE_EH_FILTER: \
141 case GIMPLE_TRANSACTION: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
149 scan_omp_op (tree
*tp
, omp_context
*ctx
)
151 struct walk_stmt_info wi
;
153 memset (&wi
, 0, sizeof (wi
));
155 wi
.want_locations
= true;
157 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
160 static void lower_omp (gimple_seq
*, omp_context
*);
161 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
162 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
167 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
169 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
170 if (OMP_CLAUSE_CODE (clauses
) == kind
)
176 /* Return true if CTX is for an omp parallel. */
179 is_parallel_ctx (omp_context
*ctx
)
181 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
185 /* Return true if CTX is for an omp task. */
188 is_task_ctx (omp_context
*ctx
)
190 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
194 /* Return true if CTX is for an omp parallel or omp task. */
197 is_taskreg_ctx (omp_context
*ctx
)
199 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
204 /* Return true if REGION is a combined parallel+workshare region. */
207 is_combined_parallel (struct omp_region
*region
)
209 return region
->is_combined_parallel
;
213 /* Extract the header elements of parallel loop FOR_STMT and store
217 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
218 struct omp_for_data_loop
*loops
)
220 tree t
, var
, *collapse_iter
, *collapse_count
;
221 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
222 struct omp_for_data_loop
*loop
;
224 struct omp_for_data_loop dummy_loop
;
225 location_t loc
= gimple_location (for_stmt
);
226 bool simd
= gimple_omp_for_kind (for_stmt
) == GF_OMP_FOR_KIND_SIMD
;
228 fd
->for_stmt
= for_stmt
;
230 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
231 if (fd
->collapse
> 1)
234 fd
->loops
= &fd
->loop
;
236 fd
->have_nowait
= fd
->have_ordered
= false;
237 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
238 fd
->chunk_size
= NULL_TREE
;
239 collapse_iter
= NULL
;
240 collapse_count
= NULL
;
242 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
243 switch (OMP_CLAUSE_CODE (t
))
245 case OMP_CLAUSE_NOWAIT
:
246 fd
->have_nowait
= true;
248 case OMP_CLAUSE_ORDERED
:
249 fd
->have_ordered
= true;
251 case OMP_CLAUSE_SCHEDULE
:
252 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
253 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
255 case OMP_CLAUSE_COLLAPSE
:
256 if (fd
->collapse
> 1)
258 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
259 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
271 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
272 gcc_assert (fd
->chunk_size
== NULL
);
274 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
275 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
276 gcc_assert (fd
->chunk_size
== NULL
);
277 else if (fd
->chunk_size
== NULL
)
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
284 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
285 ? integer_zero_node
: integer_one_node
;
288 for (i
= 0; i
< fd
->collapse
; i
++)
290 if (fd
->collapse
== 1)
292 else if (loops
!= NULL
)
298 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
299 gcc_assert (SSA_VAR_P (loop
->v
));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
302 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
303 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
305 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
306 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
307 switch (loop
->cond_code
)
313 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
314 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, 1);
316 loop
->n2
= fold_build2_loc (loc
,
317 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
318 build_int_cst (TREE_TYPE (loop
->n2
), 1));
319 loop
->cond_code
= LT_EXPR
;
322 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
323 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, -1);
325 loop
->n2
= fold_build2_loc (loc
,
326 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
327 build_int_cst (TREE_TYPE (loop
->n2
), 1));
328 loop
->cond_code
= GT_EXPR
;
334 t
= gimple_omp_for_incr (for_stmt
, i
);
335 gcc_assert (TREE_OPERAND (t
, 0) == var
);
336 switch (TREE_CODE (t
))
339 loop
->step
= TREE_OPERAND (t
, 1);
341 case POINTER_PLUS_EXPR
:
342 loop
->step
= fold_convert (ssizetype
, TREE_OPERAND (t
, 1));
345 loop
->step
= TREE_OPERAND (t
, 1);
346 loop
->step
= fold_build1_loc (loc
,
347 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
356 if (fd
->collapse
== 1)
357 iter_type
= TREE_TYPE (loop
->v
);
359 || TYPE_PRECISION (iter_type
)
360 < TYPE_PRECISION (TREE_TYPE (loop
->v
)))
362 = build_nonstandard_integer_type
363 (TYPE_PRECISION (TREE_TYPE (loop
->v
)), 1);
365 else if (iter_type
!= long_long_unsigned_type_node
)
367 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
368 iter_type
= long_long_unsigned_type_node
;
369 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
370 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
371 >= TYPE_PRECISION (iter_type
))
375 if (loop
->cond_code
== LT_EXPR
)
376 n
= fold_build2_loc (loc
,
377 PLUS_EXPR
, TREE_TYPE (loop
->v
),
378 loop
->n2
, loop
->step
);
381 if (TREE_CODE (n
) != INTEGER_CST
382 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
383 iter_type
= long_long_unsigned_type_node
;
385 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
386 > TYPE_PRECISION (iter_type
))
390 if (loop
->cond_code
== LT_EXPR
)
393 n2
= fold_build2_loc (loc
,
394 PLUS_EXPR
, TREE_TYPE (loop
->v
),
395 loop
->n2
, loop
->step
);
399 n1
= fold_build2_loc (loc
,
400 MINUS_EXPR
, TREE_TYPE (loop
->v
),
401 loop
->n2
, loop
->step
);
404 if (TREE_CODE (n1
) != INTEGER_CST
405 || TREE_CODE (n2
) != INTEGER_CST
406 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
407 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
408 iter_type
= long_long_unsigned_type_node
;
412 if (collapse_count
&& *collapse_count
== NULL
)
414 t
= fold_binary (loop
->cond_code
, boolean_type_node
,
415 fold_convert (TREE_TYPE (loop
->v
), loop
->n1
),
416 fold_convert (TREE_TYPE (loop
->v
), loop
->n2
));
417 if (t
&& integer_zerop (t
))
418 count
= build_zero_cst (long_long_unsigned_type_node
);
419 else if ((i
== 0 || count
!= NULL_TREE
)
420 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
421 && TREE_CONSTANT (loop
->n1
)
422 && TREE_CONSTANT (loop
->n2
)
423 && TREE_CODE (loop
->step
) == INTEGER_CST
)
425 tree itype
= TREE_TYPE (loop
->v
);
427 if (POINTER_TYPE_P (itype
))
428 itype
= signed_type_for (itype
);
429 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
430 t
= fold_build2_loc (loc
,
432 fold_convert_loc (loc
, itype
, loop
->step
), t
);
433 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
434 fold_convert_loc (loc
, itype
, loop
->n2
));
435 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
436 fold_convert_loc (loc
, itype
, loop
->n1
));
437 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
438 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
439 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
440 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
441 fold_convert_loc (loc
, itype
,
444 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
445 fold_convert_loc (loc
, itype
, loop
->step
));
446 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
447 if (count
!= NULL_TREE
)
448 count
= fold_build2_loc (loc
,
449 MULT_EXPR
, long_long_unsigned_type_node
,
453 if (TREE_CODE (count
) != INTEGER_CST
)
456 else if (count
&& !integer_zerop (count
))
464 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
465 iter_type
= long_long_unsigned_type_node
;
467 iter_type
= long_integer_type_node
;
469 else if (collapse_iter
&& *collapse_iter
!= NULL
)
470 iter_type
= TREE_TYPE (*collapse_iter
);
471 fd
->iter_type
= iter_type
;
472 if (collapse_iter
&& *collapse_iter
== NULL
)
473 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
474 if (collapse_count
&& *collapse_count
== NULL
)
477 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
479 *collapse_count
= create_tmp_var (iter_type
, ".count");
482 if (fd
->collapse
> 1)
484 fd
->loop
.v
= *collapse_iter
;
485 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
486 fd
->loop
.n2
= *collapse_count
;
487 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
488 fd
->loop
.cond_code
= LT_EXPR
;
493 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
494 is the immediate dominator of PAR_ENTRY_BB, return true if there
495 are no data dependencies that would prevent expanding the parallel
496 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
498 When expanding a combined parallel+workshare region, the call to
499 the child function may need additional arguments in the case of
500 GIMPLE_OMP_FOR regions. In some cases, these arguments are
501 computed out of variables passed in from the parent to the child
502 via 'struct .omp_data_s'. For instance:
504 #pragma omp parallel for schedule (guided, i * 4)
509 # BLOCK 2 (PAR_ENTRY_BB)
511 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
513 # BLOCK 3 (WS_ENTRY_BB)
514 .omp_data_i = &.omp_data_o;
515 D.1667 = .omp_data_i->i;
517 #pragma omp for schedule (guided, D.1598)
519 When we outline the parallel region, the call to the child function
520 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
521 that value is computed *after* the call site. So, in principle we
522 cannot do the transformation.
524 To see whether the code in WS_ENTRY_BB blocks the combined
525 parallel+workshare call, we collect all the variables used in the
526 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
527 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
530 FIXME. If we had the SSA form built at this point, we could merely
531 hoist the code in block 3 into block 2 and be done with it. But at
532 this point we don't have dataflow information and though we could
533 hack something up here, it is really not worth the aggravation. */
536 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
538 struct omp_for_data fd
;
539 gimple ws_stmt
= last_stmt (ws_entry_bb
);
541 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
544 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
546 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
548 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
550 if (fd
.iter_type
!= long_integer_type_node
)
553 /* FIXME. We give up too easily here. If any of these arguments
554 are not constants, they will likely involve variables that have
555 been mapped into fields of .omp_data_s for sharing with the child
556 function. With appropriate data flow, it would be possible to
558 if (!is_gimple_min_invariant (fd
.loop
.n1
)
559 || !is_gimple_min_invariant (fd
.loop
.n2
)
560 || !is_gimple_min_invariant (fd
.loop
.step
)
561 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
568 /* Collect additional arguments needed to emit a combined
569 parallel+workshare call. WS_STMT is the workshare directive being
572 static vec
<tree
, va_gc
> *
573 get_ws_args_for (gimple ws_stmt
)
576 location_t loc
= gimple_location (ws_stmt
);
577 vec
<tree
, va_gc
> *ws_args
;
579 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
581 struct omp_for_data fd
;
583 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
585 vec_alloc (ws_args
, 3 + (fd
.chunk_size
!= 0));
587 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
588 ws_args
->quick_push (t
);
590 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
591 ws_args
->quick_push (t
);
593 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
594 ws_args
->quick_push (t
);
598 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
599 ws_args
->quick_push (t
);
604 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
606 /* Number of sections is equal to the number of edges from the
607 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
608 the exit of the sections region. */
609 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
610 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
611 vec_alloc (ws_args
, 1);
612 ws_args
->quick_push (t
);
620 /* Discover whether REGION is a combined parallel+workshare region. */
623 determine_parallel_type (struct omp_region
*region
)
625 basic_block par_entry_bb
, par_exit_bb
;
626 basic_block ws_entry_bb
, ws_exit_bb
;
628 if (region
== NULL
|| region
->inner
== NULL
629 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
630 || region
->inner
->cont
== NULL
)
633 /* We only support parallel+for and parallel+sections. */
634 if (region
->type
!= GIMPLE_OMP_PARALLEL
635 || (region
->inner
->type
!= GIMPLE_OMP_FOR
636 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
639 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
640 WS_EXIT_BB -> PAR_EXIT_BB. */
641 par_entry_bb
= region
->entry
;
642 par_exit_bb
= region
->exit
;
643 ws_entry_bb
= region
->inner
->entry
;
644 ws_exit_bb
= region
->inner
->exit
;
646 if (single_succ (par_entry_bb
) == ws_entry_bb
647 && single_succ (ws_exit_bb
) == par_exit_bb
648 && workshare_safe_to_combine_p (ws_entry_bb
)
649 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
650 || (last_and_only_stmt (ws_entry_bb
)
651 && last_and_only_stmt (par_exit_bb
))))
653 gimple ws_stmt
= last_stmt (ws_entry_bb
);
655 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
657 /* If this is a combined parallel loop, we need to determine
658 whether or not to use the combined library calls. There
659 are two cases where we do not apply the transformation:
660 static loops and any kind of ordered loop. In the first
661 case, we already open code the loop so there is no need
662 to do anything else. In the latter case, the combined
663 parallel loop call would still need extra synchronization
664 to implement ordered semantics, so there would not be any
665 gain in using the combined call. */
666 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
667 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
669 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
670 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
672 region
->is_combined_parallel
= false;
673 region
->inner
->is_combined_parallel
= false;
678 region
->is_combined_parallel
= true;
679 region
->inner
->is_combined_parallel
= true;
680 region
->ws_args
= get_ws_args_for (ws_stmt
);
685 /* Return true if EXPR is variable sized. */
688 is_variable_sized (const_tree expr
)
690 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
693 /* Return true if DECL is a reference type. */
696 is_reference (tree decl
)
698 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
701 /* Lookup variables in the decl or field splay trees. The "maybe" form
702 allows for the variable form to not have been entered, otherwise we
703 assert that the variable must have been entered. */
706 lookup_decl (tree var
, omp_context
*ctx
)
709 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
714 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
717 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
718 return n
? *n
: NULL_TREE
;
722 lookup_field (tree var
, omp_context
*ctx
)
725 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
726 return (tree
) n
->value
;
730 lookup_sfield (tree var
, omp_context
*ctx
)
733 n
= splay_tree_lookup (ctx
->sfield_map
734 ? ctx
->sfield_map
: ctx
->field_map
,
735 (splay_tree_key
) var
);
736 return (tree
) n
->value
;
740 maybe_lookup_field (tree var
, omp_context
*ctx
)
743 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
744 return n
? (tree
) n
->value
: NULL_TREE
;
747 /* Return true if DECL should be copied by pointer. SHARED_CTX is
748 the parallel context if DECL is to be shared. */
751 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
753 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
756 /* We can only use copy-in/copy-out semantics for shared variables
757 when we know the value is not accessible from an outer scope. */
760 /* ??? Trivially accessible from anywhere. But why would we even
761 be passing an address in this case? Should we simply assert
762 this to be false, or should we have a cleanup pass that removes
763 these from the list of mappings? */
764 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
767 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
768 without analyzing the expression whether or not its location
769 is accessible to anyone else. In the case of nested parallel
770 regions it certainly may be. */
771 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
774 /* Do not use copy-in/copy-out for variables that have their
776 if (TREE_ADDRESSABLE (decl
))
779 /* lower_send_shared_vars only uses copy-in, but not copy-out
781 if (TREE_READONLY (decl
)
782 || ((TREE_CODE (decl
) == RESULT_DECL
783 || TREE_CODE (decl
) == PARM_DECL
)
784 && DECL_BY_REFERENCE (decl
)))
787 /* Disallow copy-in/out in nested parallel if
788 decl is shared in outer parallel, otherwise
789 each thread could store the shared variable
790 in its own copy-in location, making the
791 variable no longer really shared. */
792 if (shared_ctx
->is_nested
)
796 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
797 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
804 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
805 c
; c
= OMP_CLAUSE_CHAIN (c
))
806 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
807 && OMP_CLAUSE_DECL (c
) == decl
)
811 goto maybe_mark_addressable_and_ret
;
815 /* For tasks avoid using copy-in/out. As tasks can be
816 deferred or executed in different thread, when GOMP_task
817 returns, the task hasn't necessarily terminated. */
818 if (is_task_ctx (shared_ctx
))
821 maybe_mark_addressable_and_ret
:
822 outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
823 if (is_gimple_reg (outer
))
825 /* Taking address of OUTER in lower_send_shared_vars
826 might need regimplification of everything that uses the
828 if (!task_shared_vars
)
829 task_shared_vars
= BITMAP_ALLOC (NULL
);
830 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
831 TREE_ADDRESSABLE (outer
) = 1;
840 /* Create a new VAR_DECL and copy information from VAR to it. */
843 copy_var_decl (tree var
, tree name
, tree type
)
845 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
847 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
848 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
849 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
850 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
851 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
852 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
853 TREE_NO_WARNING (copy
) = TREE_NO_WARNING (var
);
854 TREE_USED (copy
) = 1;
855 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
856 DECL_ATTRIBUTES (copy
) = DECL_ATTRIBUTES (var
);
861 /* Construct a new automatic decl similar to VAR. */
864 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
866 tree copy
= copy_var_decl (var
, name
, type
);
868 DECL_CONTEXT (copy
) = current_function_decl
;
869 DECL_CHAIN (copy
) = ctx
->block_vars
;
870 ctx
->block_vars
= copy
;
876 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
878 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
881 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
884 omp_build_component_ref (tree obj
, tree field
)
886 tree ret
= build3 (COMPONENT_REF
, TREE_TYPE (field
), obj
, field
, NULL
);
887 if (TREE_THIS_VOLATILE (field
))
888 TREE_THIS_VOLATILE (ret
) |= 1;
889 if (TREE_READONLY (field
))
890 TREE_READONLY (ret
) |= 1;
894 /* Build tree nodes to access the field for VAR on the receiver side. */
897 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
899 tree x
, field
= lookup_field (var
, ctx
);
901 /* If the receiver record type was remapped in the child function,
902 remap the field into the new record type. */
903 x
= maybe_lookup_field (field
, ctx
);
907 x
= build_simple_mem_ref (ctx
->receiver_decl
);
908 x
= omp_build_component_ref (x
, field
);
910 x
= build_simple_mem_ref (x
);
915 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
916 of a parallel, this is a component reference; for workshare constructs
917 this is some variable. */
920 build_outer_var_ref (tree var
, omp_context
*ctx
)
924 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
926 else if (is_variable_sized (var
))
928 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
929 x
= build_outer_var_ref (x
, ctx
);
930 x
= build_simple_mem_ref (x
);
932 else if (is_taskreg_ctx (ctx
))
934 bool by_ref
= use_pointer_for_field (var
, NULL
);
935 x
= build_receiver_ref (var
, by_ref
, ctx
);
937 else if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
938 && gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_SIMD
)
940 /* #pragma omp simd isn't a worksharing construct, and can reference even
941 private vars in its linear etc. clauses. */
943 if (ctx
->outer
&& is_taskreg_ctx (ctx
))
944 x
= lookup_decl (var
, ctx
->outer
);
946 x
= maybe_lookup_decl_in_outer_ctx (var
, ctx
);
951 x
= lookup_decl (var
, ctx
->outer
);
952 else if (is_reference (var
))
953 /* This can happen with orphaned constructs. If var is reference, it is
954 possible it is shared and as such valid. */
959 if (is_reference (var
))
960 x
= build_simple_mem_ref (x
);
965 /* Build tree nodes to access the field for VAR on the sender side. */
968 build_sender_ref (tree var
, omp_context
*ctx
)
970 tree field
= lookup_sfield (var
, ctx
);
971 return omp_build_component_ref (ctx
->sender_decl
, field
);
974 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
977 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
979 tree field
, type
, sfield
= NULL_TREE
;
981 gcc_assert ((mask
& 1) == 0
982 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
983 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
984 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
986 type
= TREE_TYPE (var
);
988 type
= build_pointer_type (type
);
989 else if ((mask
& 3) == 1 && is_reference (var
))
990 type
= TREE_TYPE (type
);
992 field
= build_decl (DECL_SOURCE_LOCATION (var
),
993 FIELD_DECL
, DECL_NAME (var
), type
);
995 /* Remember what variable this field was created for. This does have a
996 side effect of making dwarf2out ignore this member, so for helpful
997 debugging we clear it later in delete_omp_context. */
998 DECL_ABSTRACT_ORIGIN (field
) = var
;
999 if (type
== TREE_TYPE (var
))
1001 DECL_ALIGN (field
) = DECL_ALIGN (var
);
1002 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
1003 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
1006 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
1008 if ((mask
& 3) == 3)
1010 insert_field_into_struct (ctx
->record_type
, field
);
1011 if (ctx
->srecord_type
)
1013 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
1014 FIELD_DECL
, DECL_NAME (var
), type
);
1015 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
1016 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
1017 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
1018 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
1019 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1024 if (ctx
->srecord_type
== NULL_TREE
)
1028 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1029 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1030 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
1032 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
1033 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
1034 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
1035 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1036 splay_tree_insert (ctx
->sfield_map
,
1037 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
1038 (splay_tree_value
) sfield
);
1042 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
1043 : ctx
->srecord_type
, field
);
1047 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
1048 (splay_tree_value
) field
);
1049 if ((mask
& 2) && ctx
->sfield_map
)
1050 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
1051 (splay_tree_value
) sfield
);
1055 install_var_local (tree var
, omp_context
*ctx
)
1057 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1058 insert_decl_map (&ctx
->cb
, var
, new_var
);
1062 /* Adjust the replacement for DECL in CTX for the new context. This means
1063 copying the DECL_VALUE_EXPR, and fixing up the type. */
1066 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1068 tree new_decl
, size
;
1070 new_decl
= lookup_decl (decl
, ctx
);
1072 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1074 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1075 && DECL_HAS_VALUE_EXPR_P (decl
))
1077 tree ve
= DECL_VALUE_EXPR (decl
);
1078 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1079 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1080 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1083 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1085 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1086 if (size
== error_mark_node
)
1087 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1088 DECL_SIZE (new_decl
) = size
;
1090 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1091 if (size
== error_mark_node
)
1092 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1093 DECL_SIZE_UNIT (new_decl
) = size
;
1097 /* The callback for remap_decl. Search all containing contexts for a
1098 mapping of the variable; this avoids having to duplicate the splay
1099 tree ahead of time. We know a mapping doesn't already exist in the
1100 given context. Create new mappings to implement default semantics. */
1103 omp_copy_decl (tree var
, copy_body_data
*cb
)
1105 omp_context
*ctx
= (omp_context
*) cb
;
1108 if (TREE_CODE (var
) == LABEL_DECL
)
1110 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1111 DECL_CONTEXT (new_var
) = current_function_decl
;
1112 insert_decl_map (&ctx
->cb
, var
, new_var
);
1116 while (!is_taskreg_ctx (ctx
))
1121 new_var
= maybe_lookup_decl (var
, ctx
);
1126 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1129 return error_mark_node
;
1133 /* Return the parallel region associated with STMT. */
1135 /* Debugging dumps for parallel regions. */
1136 void dump_omp_region (FILE *, struct omp_region
*, int);
1137 void debug_omp_region (struct omp_region
*);
1138 void debug_all_omp_regions (void);
1140 /* Dump the parallel region tree rooted at REGION. */
1143 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1145 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1146 gimple_code_name
[region
->type
]);
1149 dump_omp_region (file
, region
->inner
, indent
+ 4);
1153 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1154 region
->cont
->index
);
1158 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1159 region
->exit
->index
);
1161 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1164 dump_omp_region (file
, region
->next
, indent
);
1168 debug_omp_region (struct omp_region
*region
)
1170 dump_omp_region (stderr
, region
, 0);
1174 debug_all_omp_regions (void)
1176 dump_omp_region (stderr
, root_omp_region
, 0);
1180 /* Create a new parallel region starting at STMT inside region PARENT. */
1183 new_omp_region (basic_block bb
, enum gimple_code type
,
1184 struct omp_region
*parent
)
1186 struct omp_region
*region
= XCNEW (struct omp_region
);
1188 region
->outer
= parent
;
1190 region
->type
= type
;
1194 /* This is a nested region. Add it to the list of inner
1195 regions in PARENT. */
1196 region
->next
= parent
->inner
;
1197 parent
->inner
= region
;
1201 /* This is a toplevel region. Add it to the list of toplevel
1202 regions in ROOT_OMP_REGION. */
1203 region
->next
= root_omp_region
;
1204 root_omp_region
= region
;
1210 /* Release the memory associated with the region tree rooted at REGION. */
1213 free_omp_region_1 (struct omp_region
*region
)
1215 struct omp_region
*i
, *n
;
1217 for (i
= region
->inner
; i
; i
= n
)
1220 free_omp_region_1 (i
);
1226 /* Release the memory for the entire omp region tree. */
1229 free_omp_regions (void)
1231 struct omp_region
*r
, *n
;
1232 for (r
= root_omp_region
; r
; r
= n
)
1235 free_omp_region_1 (r
);
1237 root_omp_region
= NULL
;
1241 /* Create a new context, with OUTER_CTX being the surrounding context. */
1243 static omp_context
*
1244 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1246 omp_context
*ctx
= XCNEW (omp_context
);
1248 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1249 (splay_tree_value
) ctx
);
1254 ctx
->outer
= outer_ctx
;
1255 ctx
->cb
= outer_ctx
->cb
;
1256 ctx
->cb
.block
= NULL
;
1257 ctx
->depth
= outer_ctx
->depth
+ 1;
1261 ctx
->cb
.src_fn
= current_function_decl
;
1262 ctx
->cb
.dst_fn
= current_function_decl
;
1263 ctx
->cb
.src_node
= cgraph_get_node (current_function_decl
);
1264 gcc_checking_assert (ctx
->cb
.src_node
);
1265 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1266 ctx
->cb
.src_cfun
= cfun
;
1267 ctx
->cb
.copy_decl
= omp_copy_decl
;
1268 ctx
->cb
.eh_lp_nr
= 0;
1269 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1273 ctx
->cb
.decl_map
= pointer_map_create ();
1278 static gimple_seq
maybe_catch_exception (gimple_seq
);
1280 /* Finalize task copyfn. */
1283 finalize_task_copyfn (gimple task_stmt
)
1285 struct function
*child_cfun
;
1287 gimple_seq seq
= NULL
, new_seq
;
1290 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1291 if (child_fn
== NULL_TREE
)
1294 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1295 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
1297 push_cfun (child_cfun
);
1298 bind
= gimplify_body (child_fn
, false);
1299 gimple_seq_add_stmt (&seq
, bind
);
1300 new_seq
= maybe_catch_exception (seq
);
1303 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1305 gimple_seq_add_stmt (&seq
, bind
);
1307 gimple_set_body (child_fn
, seq
);
1310 /* Inform the callgraph about the new function. */
1311 cgraph_add_new_function (child_fn
, false);
1314 /* Destroy a omp_context data structures. Called through the splay tree
1315 value delete callback. */
1318 delete_omp_context (splay_tree_value value
)
1320 omp_context
*ctx
= (omp_context
*) value
;
1322 pointer_map_destroy (ctx
->cb
.decl_map
);
1325 splay_tree_delete (ctx
->field_map
);
1326 if (ctx
->sfield_map
)
1327 splay_tree_delete (ctx
->sfield_map
);
1329 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1330 it produces corrupt debug information. */
1331 if (ctx
->record_type
)
1334 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1335 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1337 if (ctx
->srecord_type
)
1340 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1341 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1344 if (is_task_ctx (ctx
))
1345 finalize_task_copyfn (ctx
->stmt
);
1350 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1354 fixup_child_record_type (omp_context
*ctx
)
1356 tree f
, type
= ctx
->record_type
;
1358 /* ??? It isn't sufficient to just call remap_type here, because
1359 variably_modified_type_p doesn't work the way we expect for
1360 record types. Testing each field for whether it needs remapping
1361 and creating a new record by hand works, however. */
1362 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1363 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1367 tree name
, new_fields
= NULL
;
1369 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1370 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1371 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1372 TYPE_DECL
, name
, type
);
1373 TYPE_NAME (type
) = name
;
1375 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1377 tree new_f
= copy_node (f
);
1378 DECL_CONTEXT (new_f
) = type
;
1379 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1380 DECL_CHAIN (new_f
) = new_fields
;
1381 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1382 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1384 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1388 /* Arrange to be able to look up the receiver field
1389 given the sender field. */
1390 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1391 (splay_tree_value
) new_f
);
1393 TYPE_FIELDS (type
) = nreverse (new_fields
);
1397 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1400 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1401 specified by CLAUSES. */
1404 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1407 bool scan_array_reductions
= false;
1409 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1413 switch (OMP_CLAUSE_CODE (c
))
1415 case OMP_CLAUSE_PRIVATE
:
1416 decl
= OMP_CLAUSE_DECL (c
);
1417 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1419 else if (!is_variable_sized (decl
))
1420 install_var_local (decl
, ctx
);
1423 case OMP_CLAUSE_SHARED
:
1424 gcc_assert (is_taskreg_ctx (ctx
));
1425 decl
= OMP_CLAUSE_DECL (c
);
1426 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1427 || !is_variable_sized (decl
));
1428 /* Global variables don't need to be copied,
1429 the receiver side will use them directly. */
1430 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1432 by_ref
= use_pointer_for_field (decl
, ctx
);
1433 if (! TREE_READONLY (decl
)
1434 || TREE_ADDRESSABLE (decl
)
1436 || is_reference (decl
))
1438 install_var_field (decl
, by_ref
, 3, ctx
);
1439 install_var_local (decl
, ctx
);
1442 /* We don't need to copy const scalar vars back. */
1443 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1446 case OMP_CLAUSE_LASTPRIVATE
:
1447 /* Let the corresponding firstprivate clause create
1449 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1453 case OMP_CLAUSE_FIRSTPRIVATE
:
1454 case OMP_CLAUSE_REDUCTION
:
1455 case OMP_CLAUSE_LINEAR
:
1456 decl
= OMP_CLAUSE_DECL (c
);
1458 if (is_variable_sized (decl
))
1460 if (is_task_ctx (ctx
))
1461 install_var_field (decl
, false, 1, ctx
);
1464 else if (is_taskreg_ctx (ctx
))
1467 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1468 by_ref
= use_pointer_for_field (decl
, NULL
);
1470 if (is_task_ctx (ctx
)
1471 && (global
|| by_ref
|| is_reference (decl
)))
1473 install_var_field (decl
, false, 1, ctx
);
1475 install_var_field (decl
, by_ref
, 2, ctx
);
1478 install_var_field (decl
, by_ref
, 3, ctx
);
1480 install_var_local (decl
, ctx
);
1483 case OMP_CLAUSE_COPYPRIVATE
:
1484 case OMP_CLAUSE_COPYIN
:
1485 decl
= OMP_CLAUSE_DECL (c
);
1486 by_ref
= use_pointer_for_field (decl
, NULL
);
1487 install_var_field (decl
, by_ref
, 3, ctx
);
1490 case OMP_CLAUSE_DEFAULT
:
1491 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1494 case OMP_CLAUSE_FINAL
:
1496 case OMP_CLAUSE_NUM_THREADS
:
1497 case OMP_CLAUSE_SCHEDULE
:
1499 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1502 case OMP_CLAUSE_NOWAIT
:
1503 case OMP_CLAUSE_ORDERED
:
1504 case OMP_CLAUSE_COLLAPSE
:
1505 case OMP_CLAUSE_UNTIED
:
1506 case OMP_CLAUSE_MERGEABLE
:
1507 case OMP_CLAUSE_SAFELEN
:
1515 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1517 switch (OMP_CLAUSE_CODE (c
))
1519 case OMP_CLAUSE_LASTPRIVATE
:
1520 /* Let the corresponding firstprivate clause create
1522 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1523 scan_array_reductions
= true;
1524 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1528 case OMP_CLAUSE_PRIVATE
:
1529 case OMP_CLAUSE_FIRSTPRIVATE
:
1530 case OMP_CLAUSE_REDUCTION
:
1531 case OMP_CLAUSE_LINEAR
:
1532 decl
= OMP_CLAUSE_DECL (c
);
1533 if (is_variable_sized (decl
))
1534 install_var_local (decl
, ctx
);
1535 fixup_remapped_decl (decl
, ctx
,
1536 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1537 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1538 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1539 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1540 scan_array_reductions
= true;
1543 case OMP_CLAUSE_SHARED
:
1544 decl
= OMP_CLAUSE_DECL (c
);
1545 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1546 fixup_remapped_decl (decl
, ctx
, false);
1549 case OMP_CLAUSE_COPYPRIVATE
:
1550 case OMP_CLAUSE_COPYIN
:
1551 case OMP_CLAUSE_DEFAULT
:
1553 case OMP_CLAUSE_NUM_THREADS
:
1554 case OMP_CLAUSE_SCHEDULE
:
1555 case OMP_CLAUSE_NOWAIT
:
1556 case OMP_CLAUSE_ORDERED
:
1557 case OMP_CLAUSE_COLLAPSE
:
1558 case OMP_CLAUSE_UNTIED
:
1559 case OMP_CLAUSE_FINAL
:
1560 case OMP_CLAUSE_MERGEABLE
:
1561 case OMP_CLAUSE_SAFELEN
:
1569 if (scan_array_reductions
)
1570 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1571 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1572 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1574 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1575 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1577 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1578 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1579 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1582 /* Create a new name for omp child function. Returns an identifier. */
1584 static GTY(()) unsigned int tmp_ompfn_id_num
;
1587 create_omp_child_function_name (bool task_copy
)
1589 return (clone_function_name (current_function_decl
,
1590 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1593 /* Build a decl for the omp child function. It'll not contain a body
1594 yet, just the bare decl. */
1597 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1599 tree decl
, type
, name
, t
;
1601 name
= create_omp_child_function_name (task_copy
);
1603 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1604 ptr_type_node
, NULL_TREE
);
1606 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1608 decl
= build_decl (gimple_location (ctx
->stmt
),
1609 FUNCTION_DECL
, name
, type
);
1612 ctx
->cb
.dst_fn
= decl
;
1614 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1616 TREE_STATIC (decl
) = 1;
1617 TREE_USED (decl
) = 1;
1618 DECL_ARTIFICIAL (decl
) = 1;
1619 DECL_NAMELESS (decl
) = 1;
1620 DECL_IGNORED_P (decl
) = 0;
1621 TREE_PUBLIC (decl
) = 0;
1622 DECL_UNINLINABLE (decl
) = 1;
1623 DECL_EXTERNAL (decl
) = 0;
1624 DECL_CONTEXT (decl
) = NULL_TREE
;
1625 DECL_INITIAL (decl
) = make_node (BLOCK
);
1627 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1628 RESULT_DECL
, NULL_TREE
, void_type_node
);
1629 DECL_ARTIFICIAL (t
) = 1;
1630 DECL_IGNORED_P (t
) = 1;
1631 DECL_CONTEXT (t
) = decl
;
1632 DECL_RESULT (decl
) = t
;
1634 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1635 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1636 DECL_ARTIFICIAL (t
) = 1;
1637 DECL_NAMELESS (t
) = 1;
1638 DECL_ARG_TYPE (t
) = ptr_type_node
;
1639 DECL_CONTEXT (t
) = current_function_decl
;
1641 DECL_ARGUMENTS (decl
) = t
;
1643 ctx
->receiver_decl
= t
;
1646 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1647 PARM_DECL
, get_identifier (".omp_data_o"),
1649 DECL_ARTIFICIAL (t
) = 1;
1650 DECL_NAMELESS (t
) = 1;
1651 DECL_ARG_TYPE (t
) = ptr_type_node
;
1652 DECL_CONTEXT (t
) = current_function_decl
;
1654 TREE_ADDRESSABLE (t
) = 1;
1655 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1656 DECL_ARGUMENTS (decl
) = t
;
1659 /* Allocate memory for the function structure. The call to
1660 allocate_struct_function clobbers CFUN, so we need to restore
1662 push_struct_function (decl
);
1663 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1667 /* Scan an OpenMP parallel directive. */
1670 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1674 gimple stmt
= gsi_stmt (*gsi
);
1676 /* Ignore parallel directives with empty bodies, unless there
1677 are copyin clauses. */
1679 && empty_body_p (gimple_omp_body (stmt
))
1680 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1681 OMP_CLAUSE_COPYIN
) == NULL
)
1683 gsi_replace (gsi
, gimple_build_nop (), false);
1687 ctx
= new_omp_context (stmt
, outer_ctx
);
1688 if (taskreg_nesting_level
> 1)
1689 ctx
->is_nested
= true;
1690 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1691 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1692 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1693 name
= create_tmp_var_name (".omp_data_s");
1694 name
= build_decl (gimple_location (stmt
),
1695 TYPE_DECL
, name
, ctx
->record_type
);
1696 DECL_ARTIFICIAL (name
) = 1;
1697 DECL_NAMELESS (name
) = 1;
1698 TYPE_NAME (ctx
->record_type
) = name
;
1699 create_omp_child_function (ctx
, false);
1700 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1702 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1703 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1705 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1706 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1709 layout_type (ctx
->record_type
);
1710 fixup_child_record_type (ctx
);
1714 /* Scan an OpenMP task directive. */
1717 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1721 gimple stmt
= gsi_stmt (*gsi
);
1722 location_t loc
= gimple_location (stmt
);
1724 /* Ignore task directives with empty bodies. */
1726 && empty_body_p (gimple_omp_body (stmt
)))
1728 gsi_replace (gsi
, gimple_build_nop (), false);
1732 ctx
= new_omp_context (stmt
, outer_ctx
);
1733 if (taskreg_nesting_level
> 1)
1734 ctx
->is_nested
= true;
1735 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1736 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1737 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1738 name
= create_tmp_var_name (".omp_data_s");
1739 name
= build_decl (gimple_location (stmt
),
1740 TYPE_DECL
, name
, ctx
->record_type
);
1741 DECL_ARTIFICIAL (name
) = 1;
1742 DECL_NAMELESS (name
) = 1;
1743 TYPE_NAME (ctx
->record_type
) = name
;
1744 create_omp_child_function (ctx
, false);
1745 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1747 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1749 if (ctx
->srecord_type
)
1751 name
= create_tmp_var_name (".omp_data_a");
1752 name
= build_decl (gimple_location (stmt
),
1753 TYPE_DECL
, name
, ctx
->srecord_type
);
1754 DECL_ARTIFICIAL (name
) = 1;
1755 DECL_NAMELESS (name
) = 1;
1756 TYPE_NAME (ctx
->srecord_type
) = name
;
1757 create_omp_child_function (ctx
, true);
1760 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1762 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1764 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1765 t
= build_int_cst (long_integer_type_node
, 0);
1766 gimple_omp_task_set_arg_size (stmt
, t
);
1767 t
= build_int_cst (long_integer_type_node
, 1);
1768 gimple_omp_task_set_arg_align (stmt
, t
);
1772 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1773 /* Move VLA fields to the end. */
1774 p
= &TYPE_FIELDS (ctx
->record_type
);
1776 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1777 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1780 *p
= TREE_CHAIN (*p
);
1781 TREE_CHAIN (*q
) = NULL_TREE
;
1782 q
= &TREE_CHAIN (*q
);
1785 p
= &DECL_CHAIN (*p
);
1787 layout_type (ctx
->record_type
);
1788 fixup_child_record_type (ctx
);
1789 if (ctx
->srecord_type
)
1790 layout_type (ctx
->srecord_type
);
1791 t
= fold_convert_loc (loc
, long_integer_type_node
,
1792 TYPE_SIZE_UNIT (ctx
->record_type
));
1793 gimple_omp_task_set_arg_size (stmt
, t
);
1794 t
= build_int_cst (long_integer_type_node
,
1795 TYPE_ALIGN_UNIT (ctx
->record_type
));
1796 gimple_omp_task_set_arg_align (stmt
, t
);
1801 /* Scan an OpenMP loop directive. */
1804 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1809 ctx
= new_omp_context (stmt
, outer_ctx
);
1811 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1813 scan_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
1814 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1816 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1817 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1818 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1819 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1821 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1824 /* Scan an OpenMP sections directive. */
1827 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1831 ctx
= new_omp_context (stmt
, outer_ctx
);
1832 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1833 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1836 /* Scan an OpenMP single directive. */
1839 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1844 ctx
= new_omp_context (stmt
, outer_ctx
);
1845 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1846 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1847 name
= create_tmp_var_name (".omp_copy_s");
1848 name
= build_decl (gimple_location (stmt
),
1849 TYPE_DECL
, name
, ctx
->record_type
);
1850 TYPE_NAME (ctx
->record_type
) = name
;
1852 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1853 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1855 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1856 ctx
->record_type
= NULL
;
1858 layout_type (ctx
->record_type
);
1862 /* Check OpenMP nesting restrictions. */
1864 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1868 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
1869 && gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_SIMD
)
1871 error_at (gimple_location (stmt
),
1872 "OpenMP constructs may not be nested inside simd region");
1876 switch (gimple_code (stmt
))
1878 case GIMPLE_OMP_FOR
:
1879 if (gimple_omp_for_kind (stmt
) == GF_OMP_FOR_KIND_SIMD
)
1882 case GIMPLE_OMP_SECTIONS
:
1883 case GIMPLE_OMP_SINGLE
:
1885 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1886 switch (gimple_code (ctx
->stmt
))
1888 case GIMPLE_OMP_FOR
:
1889 case GIMPLE_OMP_SECTIONS
:
1890 case GIMPLE_OMP_SINGLE
:
1891 case GIMPLE_OMP_ORDERED
:
1892 case GIMPLE_OMP_MASTER
:
1893 case GIMPLE_OMP_TASK
:
1894 if (is_gimple_call (stmt
))
1896 error_at (gimple_location (stmt
),
1897 "barrier region may not be closely nested inside "
1898 "of work-sharing, critical, ordered, master or "
1899 "explicit task region");
1902 error_at (gimple_location (stmt
),
1903 "work-sharing region may not be closely nested inside "
1904 "of work-sharing, critical, ordered, master or explicit "
1907 case GIMPLE_OMP_PARALLEL
:
1913 case GIMPLE_OMP_MASTER
:
1914 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1915 switch (gimple_code (ctx
->stmt
))
1917 case GIMPLE_OMP_FOR
:
1918 case GIMPLE_OMP_SECTIONS
:
1919 case GIMPLE_OMP_SINGLE
:
1920 case GIMPLE_OMP_TASK
:
1921 error_at (gimple_location (stmt
),
1922 "master region may not be closely nested inside "
1923 "of work-sharing or explicit task region");
1925 case GIMPLE_OMP_PARALLEL
:
1931 case GIMPLE_OMP_ORDERED
:
1932 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1933 switch (gimple_code (ctx
->stmt
))
1935 case GIMPLE_OMP_CRITICAL
:
1936 case GIMPLE_OMP_TASK
:
1937 error_at (gimple_location (stmt
),
1938 "ordered region may not be closely nested inside "
1939 "of critical or explicit task region");
1941 case GIMPLE_OMP_FOR
:
1942 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1943 OMP_CLAUSE_ORDERED
) == NULL
)
1945 error_at (gimple_location (stmt
),
1946 "ordered region must be closely nested inside "
1947 "a loop region with an ordered clause");
1951 case GIMPLE_OMP_PARALLEL
:
1957 case GIMPLE_OMP_CRITICAL
:
1958 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1959 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1960 && (gimple_omp_critical_name (stmt
)
1961 == gimple_omp_critical_name (ctx
->stmt
)))
1963 error_at (gimple_location (stmt
),
1964 "critical region may not be nested inside a critical "
1965 "region with the same name");
1976 /* Helper function scan_omp.
1978 Callback for walk_tree or operators in walk_gimple_stmt used to
1979 scan for OpenMP directives in TP. */
1982 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1984 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1985 omp_context
*ctx
= (omp_context
*) wi
->info
;
1988 switch (TREE_CODE (t
))
1995 *tp
= remap_decl (t
, &ctx
->cb
);
1999 if (ctx
&& TYPE_P (t
))
2000 *tp
= remap_type (t
, &ctx
->cb
);
2001 else if (!DECL_P (t
))
2006 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
2007 if (tem
!= TREE_TYPE (t
))
2009 if (TREE_CODE (t
) == INTEGER_CST
)
2010 *tp
= build_int_cst_wide (tem
,
2011 TREE_INT_CST_LOW (t
),
2012 TREE_INT_CST_HIGH (t
));
2014 TREE_TYPE (t
) = tem
;
2025 /* Helper function for scan_omp.
2027 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2028 the current statement in GSI. */
2031 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
2032 struct walk_stmt_info
*wi
)
2034 gimple stmt
= gsi_stmt (*gsi
);
2035 omp_context
*ctx
= (omp_context
*) wi
->info
;
2037 if (gimple_has_location (stmt
))
2038 input_location
= gimple_location (stmt
);
2040 /* Check the OpenMP nesting restrictions. */
2043 bool remove
= false;
2044 if (is_gimple_omp (stmt
))
2045 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
2046 else if (is_gimple_call (stmt
))
2048 tree fndecl
= gimple_call_fndecl (stmt
);
2049 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
2050 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
2051 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
2055 stmt
= gimple_build_nop ();
2056 gsi_replace (gsi
, stmt
, false);
2060 *handled_ops_p
= true;
2062 switch (gimple_code (stmt
))
2064 case GIMPLE_OMP_PARALLEL
:
2065 taskreg_nesting_level
++;
2066 scan_omp_parallel (gsi
, ctx
);
2067 taskreg_nesting_level
--;
2070 case GIMPLE_OMP_TASK
:
2071 taskreg_nesting_level
++;
2072 scan_omp_task (gsi
, ctx
);
2073 taskreg_nesting_level
--;
2076 case GIMPLE_OMP_FOR
:
2077 scan_omp_for (stmt
, ctx
);
2080 case GIMPLE_OMP_SECTIONS
:
2081 scan_omp_sections (stmt
, ctx
);
2084 case GIMPLE_OMP_SINGLE
:
2085 scan_omp_single (stmt
, ctx
);
2088 case GIMPLE_OMP_SECTION
:
2089 case GIMPLE_OMP_MASTER
:
2090 case GIMPLE_OMP_ORDERED
:
2091 case GIMPLE_OMP_CRITICAL
:
2092 ctx
= new_omp_context (stmt
, ctx
);
2093 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2100 *handled_ops_p
= false;
2102 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2103 insert_decl_map (&ctx
->cb
, var
, var
);
2107 *handled_ops_p
= false;
2115 /* Scan all the statements starting at the current statement. CTX
2116 contains context information about the OpenMP directives and
2117 clauses found during the scan. */
2120 scan_omp (gimple_seq
*body_p
, omp_context
*ctx
)
2122 location_t saved_location
;
2123 struct walk_stmt_info wi
;
2125 memset (&wi
, 0, sizeof (wi
));
2127 wi
.want_locations
= true;
2129 saved_location
= input_location
;
2130 walk_gimple_seq_mod (body_p
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2131 input_location
= saved_location
;
2134 /* Re-gimplification and code generation routines. */
2136 /* Build a call to GOMP_barrier. */
2139 build_omp_barrier (void)
2141 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
), 0);
2144 /* If a context was created for STMT when it was scanned, return it. */
2146 static omp_context
*
2147 maybe_lookup_ctx (gimple stmt
)
2150 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2151 return n
? (omp_context
*) n
->value
: NULL
;
2155 /* Find the mapping for DECL in CTX or the immediately enclosing
2156 context that has a mapping for DECL.
2158 If CTX is a nested parallel directive, we may have to use the decl
2159 mappings created in CTX's parent context. Suppose that we have the
2160 following parallel nesting (variable UIDs showed for clarity):
2163 #omp parallel shared(iD.1562) -> outer parallel
2164 iD.1562 = iD.1562 + 1;
2166 #omp parallel shared (iD.1562) -> inner parallel
2167 iD.1562 = iD.1562 - 1;
2169 Each parallel structure will create a distinct .omp_data_s structure
2170 for copying iD.1562 in/out of the directive:
2172 outer parallel .omp_data_s.1.i -> iD.1562
2173 inner parallel .omp_data_s.2.i -> iD.1562
2175 A shared variable mapping will produce a copy-out operation before
2176 the parallel directive and a copy-in operation after it. So, in
2177 this case we would have:
2180 .omp_data_o.1.i = iD.1562;
2181 #omp parallel shared(iD.1562) -> outer parallel
2182 .omp_data_i.1 = &.omp_data_o.1
2183 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2185 .omp_data_o.2.i = iD.1562; -> **
2186 #omp parallel shared(iD.1562) -> inner parallel
2187 .omp_data_i.2 = &.omp_data_o.2
2188 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2191 ** This is a problem. The symbol iD.1562 cannot be referenced
2192 inside the body of the outer parallel region. But since we are
2193 emitting this copy operation while expanding the inner parallel
2194 directive, we need to access the CTX structure of the outer
2195 parallel directive to get the correct mapping:
2197 .omp_data_o.2.i = .omp_data_i.1->i
2199 Since there may be other workshare or parallel directives enclosing
2200 the parallel directive, it may be necessary to walk up the context
2201 parent chain. This is not a problem in general because nested
2202 parallelism happens only rarely. */
2205 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2210 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2211 t
= maybe_lookup_decl (decl
, up
);
2213 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2215 return t
? t
: decl
;
2219 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2220 in outer contexts. */
2223 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2228 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2229 t
= maybe_lookup_decl (decl
, up
);
2231 return t
? t
: decl
;
2235 /* Construct the initialization value for reduction CLAUSE. */
2238 omp_reduction_init (tree clause
, tree type
)
2240 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2241 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2248 case TRUTH_ORIF_EXPR
:
2249 case TRUTH_XOR_EXPR
:
2251 return build_zero_cst (type
);
2254 case TRUTH_AND_EXPR
:
2255 case TRUTH_ANDIF_EXPR
:
2257 return fold_convert_loc (loc
, type
, integer_one_node
);
2260 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2263 if (SCALAR_FLOAT_TYPE_P (type
))
2265 REAL_VALUE_TYPE max
, min
;
2266 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2269 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2272 real_maxval (&min
, 1, TYPE_MODE (type
));
2273 return build_real (type
, min
);
2277 gcc_assert (INTEGRAL_TYPE_P (type
));
2278 return TYPE_MIN_VALUE (type
);
2282 if (SCALAR_FLOAT_TYPE_P (type
))
2284 REAL_VALUE_TYPE max
;
2285 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2288 real_maxval (&max
, 0, TYPE_MODE (type
));
2289 return build_real (type
, max
);
2293 gcc_assert (INTEGRAL_TYPE_P (type
));
2294 return TYPE_MAX_VALUE (type
);
2302 /* Return maximum possible vectorization factor for the target. */
2309 || (!flag_tree_loop_vectorize
2310 && (global_options_set
.x_flag_tree_loop_vectorize
2311 || global_options_set
.x_flag_tree_vectorize
)))
2314 int vs
= targetm
.vectorize
.autovectorize_vector_sizes ();
2317 vs
= 1 << floor_log2 (vs
);
2320 enum machine_mode vqimode
= targetm
.vectorize
.preferred_simd_mode (QImode
);
2321 if (GET_MODE_CLASS (vqimode
) == MODE_VECTOR_INT
)
2322 return GET_MODE_NUNITS (vqimode
);
2326 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2330 lower_rec_simd_input_clauses (tree new_var
, omp_context
*ctx
, int &max_vf
,
2331 tree
&idx
, tree
&lane
, tree
&ivar
, tree
&lvar
)
2335 max_vf
= omp_max_vf ();
2338 tree c
= find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2339 OMP_CLAUSE_SAFELEN
);
2341 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c
), max_vf
) == -1)
2342 max_vf
= tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c
), 0);
2346 idx
= create_tmp_var (unsigned_type_node
, NULL
);
2347 lane
= create_tmp_var (unsigned_type_node
, NULL
);
2353 tree atype
= build_array_type_nelts (TREE_TYPE (new_var
), max_vf
);
2354 tree avar
= create_tmp_var_raw (atype
, NULL
);
2355 if (TREE_ADDRESSABLE (new_var
))
2356 TREE_ADDRESSABLE (avar
) = 1;
2357 DECL_ATTRIBUTES (avar
)
2358 = tree_cons (get_identifier ("omp simd array"), NULL
,
2359 DECL_ATTRIBUTES (avar
));
2360 gimple_add_tmp_var (avar
);
2361 ivar
= build4 (ARRAY_REF
, TREE_TYPE (new_var
), avar
, idx
,
2362 NULL_TREE
, NULL_TREE
);
2363 lvar
= build4 (ARRAY_REF
, TREE_TYPE (new_var
), avar
, lane
,
2364 NULL_TREE
, NULL_TREE
);
2365 SET_DECL_VALUE_EXPR (new_var
, lvar
);
2366 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2370 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2371 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2372 private variables. Initialization statements go in ILIST, while calls
2373 to destructors go in DLIST. */
2376 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2379 tree c
, dtor
, copyin_seq
, x
, ptr
;
2380 bool copyin_by_ref
= false;
2381 bool lastprivate_firstprivate
= false;
2383 bool is_simd
= (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
2384 && gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_SIMD
);
2386 tree lane
= NULL_TREE
, idx
= NULL_TREE
;
2387 tree ivar
= NULL_TREE
, lvar
= NULL_TREE
;
2388 gimple_seq llist
[2] = { NULL
, NULL
};
2392 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
2393 with data sharing clauses referencing variable sized vars. That
2394 is unnecessarily hard to support and very unlikely to result in
2395 vectorized code anyway. */
2397 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2398 switch (OMP_CLAUSE_CODE (c
))
2400 case OMP_CLAUSE_REDUCTION
:
2401 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2404 case OMP_CLAUSE_PRIVATE
:
2405 case OMP_CLAUSE_FIRSTPRIVATE
:
2406 case OMP_CLAUSE_LASTPRIVATE
:
2407 case OMP_CLAUSE_LINEAR
:
2408 if (is_variable_sized (OMP_CLAUSE_DECL (c
)))
2415 /* Do all the fixed sized types in the first pass, and the variable sized
2416 types in the second pass. This makes sure that the scalar arguments to
2417 the variable sized types are processed before we use them in the
2418 variable sized operations. */
2419 for (pass
= 0; pass
< 2; ++pass
)
2421 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2423 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2426 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2430 case OMP_CLAUSE_PRIVATE
:
2431 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2434 case OMP_CLAUSE_SHARED
:
2435 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2437 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2440 case OMP_CLAUSE_FIRSTPRIVATE
:
2441 case OMP_CLAUSE_COPYIN
:
2442 case OMP_CLAUSE_REDUCTION
:
2444 case OMP_CLAUSE_LINEAR
:
2446 case OMP_CLAUSE_LASTPRIVATE
:
2447 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2449 lastprivate_firstprivate
= true;
2458 new_var
= var
= OMP_CLAUSE_DECL (c
);
2459 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2460 new_var
= lookup_decl (var
, ctx
);
2462 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2467 else if (is_variable_sized (var
))
2469 /* For variable sized types, we need to allocate the
2470 actual storage here. Call alloca and store the
2471 result in the pointer decl that we created elsewhere. */
2475 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2480 ptr
= DECL_VALUE_EXPR (new_var
);
2481 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2482 ptr
= TREE_OPERAND (ptr
, 0);
2483 gcc_assert (DECL_P (ptr
));
2484 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2486 /* void *tmp = __builtin_alloca */
2487 atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2488 stmt
= gimple_build_call (atmp
, 1, x
);
2489 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2490 gimple_add_tmp_var (tmp
);
2491 gimple_call_set_lhs (stmt
, tmp
);
2493 gimple_seq_add_stmt (ilist
, stmt
);
2495 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2496 gimplify_assign (ptr
, x
, ilist
);
2499 else if (is_reference (var
))
2501 /* For references that are being privatized for Fortran,
2502 allocate new backing storage for the new pointer
2503 variable. This allows us to avoid changing all the
2504 code that expects a pointer to something that expects
2505 a direct variable. Note that this doesn't apply to
2506 C++, since reference types are disallowed in data
2507 sharing clauses there, except for NRV optimized
2512 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2513 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2515 x
= build_receiver_ref (var
, false, ctx
);
2516 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2518 else if (TREE_CONSTANT (x
))
2520 const char *name
= NULL
;
2521 if (DECL_NAME (var
))
2522 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2524 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2526 gimple_add_tmp_var (x
);
2527 TREE_ADDRESSABLE (x
) = 1;
2528 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2532 tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2533 x
= build_call_expr_loc (clause_loc
, atmp
, 1, x
);
2536 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2537 gimplify_assign (new_var
, x
, ilist
);
2539 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2541 else if (c_kind
== OMP_CLAUSE_REDUCTION
2542 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2550 switch (OMP_CLAUSE_CODE (c
))
2552 case OMP_CLAUSE_SHARED
:
2553 /* Shared global vars are just accessed directly. */
2554 if (is_global_var (new_var
))
2556 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2557 needs to be delayed until after fixup_child_record_type so
2558 that we get the correct type during the dereference. */
2559 by_ref
= use_pointer_for_field (var
, ctx
);
2560 x
= build_receiver_ref (var
, by_ref
, ctx
);
2561 SET_DECL_VALUE_EXPR (new_var
, x
);
2562 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2564 /* ??? If VAR is not passed by reference, and the variable
2565 hasn't been initialized yet, then we'll get a warning for
2566 the store into the omp_data_s structure. Ideally, we'd be
2567 able to notice this and not store anything at all, but
2568 we're generating code too early. Suppress the warning. */
2570 TREE_NO_WARNING (var
) = 1;
2573 case OMP_CLAUSE_LASTPRIVATE
:
2574 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2578 case OMP_CLAUSE_PRIVATE
:
2579 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2580 x
= build_outer_var_ref (var
, ctx
);
2581 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2583 if (is_task_ctx (ctx
))
2584 x
= build_receiver_ref (var
, false, ctx
);
2586 x
= build_outer_var_ref (var
, ctx
);
2591 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2594 tree y
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2595 if ((TREE_ADDRESSABLE (new_var
) || x
|| y
2596 || OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2597 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
2598 idx
, lane
, ivar
, lvar
))
2601 x
= lang_hooks
.decls
.omp_clause_default_ctor
2602 (c
, unshare_expr (ivar
), x
);
2604 gimplify_and_add (x
, &llist
[0]);
2607 y
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
2610 gimple_seq tseq
= NULL
;
2613 gimplify_stmt (&dtor
, &tseq
);
2614 gimple_seq_add_seq (&llist
[1], tseq
);
2621 gimplify_and_add (x
, ilist
);
2625 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2628 gimple_seq tseq
= NULL
;
2631 gimplify_stmt (&dtor
, &tseq
);
2632 gimple_seq_add_seq (dlist
, tseq
);
2636 case OMP_CLAUSE_LINEAR
:
2637 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c
))
2638 goto do_firstprivate
;
2639 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c
))
2642 x
= build_outer_var_ref (var
, ctx
);
2645 case OMP_CLAUSE_FIRSTPRIVATE
:
2646 if (is_task_ctx (ctx
))
2648 if (is_reference (var
) || is_variable_sized (var
))
2650 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2652 || use_pointer_for_field (var
, NULL
))
2654 x
= build_receiver_ref (var
, false, ctx
);
2655 SET_DECL_VALUE_EXPR (new_var
, x
);
2656 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2661 x
= build_outer_var_ref (var
, ctx
);
2664 if ((OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_LINEAR
2665 || TREE_ADDRESSABLE (new_var
))
2666 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
2667 idx
, lane
, ivar
, lvar
))
2669 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
)
2671 tree iv
= create_tmp_var (TREE_TYPE (new_var
), NULL
);
2672 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, iv
, x
);
2673 gimplify_and_add (x
, ilist
);
2674 gimple_stmt_iterator gsi
2675 = gsi_start_1 (gimple_omp_body_ptr (ctx
->stmt
));
2677 = gimple_build_assign (unshare_expr (lvar
), iv
);
2678 gsi_insert_before_without_update (&gsi
, g
,
2680 tree stept
= POINTER_TYPE_P (TREE_TYPE (x
))
2681 ? sizetype
: TREE_TYPE (x
);
2682 tree t
= fold_convert (stept
,
2683 OMP_CLAUSE_LINEAR_STEP (c
));
2684 enum tree_code code
= PLUS_EXPR
;
2685 if (POINTER_TYPE_P (TREE_TYPE (new_var
)))
2686 code
= POINTER_PLUS_EXPR
;
2687 g
= gimple_build_assign_with_ops (code
, iv
, iv
, t
);
2688 gsi_insert_before_without_update (&gsi
, g
,
2692 x
= lang_hooks
.decls
.omp_clause_copy_ctor
2693 (c
, unshare_expr (ivar
), x
);
2694 gimplify_and_add (x
, &llist
[0]);
2695 x
= lang_hooks
.decls
.omp_clause_dtor (c
, ivar
);
2698 gimple_seq tseq
= NULL
;
2701 gimplify_stmt (&dtor
, &tseq
);
2702 gimple_seq_add_seq (&llist
[1], tseq
);
2707 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2708 gimplify_and_add (x
, ilist
);
2711 case OMP_CLAUSE_COPYIN
:
2712 by_ref
= use_pointer_for_field (var
, NULL
);
2713 x
= build_receiver_ref (var
, by_ref
, ctx
);
2714 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2715 append_to_statement_list (x
, ©in_seq
);
2716 copyin_by_ref
|= by_ref
;
2719 case OMP_CLAUSE_REDUCTION
:
2720 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2722 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2723 x
= build_outer_var_ref (var
, ctx
);
2725 /* FIXME: Not handled yet. */
2726 gcc_assert (!is_simd
);
2727 if (is_reference (var
))
2728 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2729 SET_DECL_VALUE_EXPR (placeholder
, x
);
2730 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2731 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2732 gimple_seq_add_seq (ilist
,
2733 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2734 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2735 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2739 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2740 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2742 && lower_rec_simd_input_clauses (new_var
, ctx
, max_vf
,
2743 idx
, lane
, ivar
, lvar
))
2745 enum tree_code code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2746 tree ref
= build_outer_var_ref (var
, ctx
);
2748 gimplify_assign (unshare_expr (ivar
), x
, &llist
[0]);
2750 /* reduction(-:var) sums up the partial results, so it
2751 acts identically to reduction(+:var). */
2752 if (code
== MINUS_EXPR
)
2755 x
= build2 (code
, TREE_TYPE (ref
), ref
, ivar
);
2756 ref
= build_outer_var_ref (var
, ctx
);
2757 gimplify_assign (ref
, x
, &llist
[1]);
2761 gimplify_assign (new_var
, x
, ilist
);
2763 gimplify_assign (build_outer_var_ref (var
, ctx
),
2777 tree uid
= create_tmp_var (ptr_type_node
, "simduid");
2778 /* Don't want uninit warnings on simduid, it is always uninitialized,
2779 but we use it not for the value, but for the DECL_UID only. */
2780 TREE_NO_WARNING (uid
) = 1;
2782 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE
, 1, uid
);
2783 gimple_call_set_lhs (g
, lane
);
2784 gimple_stmt_iterator gsi
= gsi_start_1 (gimple_omp_body_ptr (ctx
->stmt
));
2785 gsi_insert_before_without_update (&gsi
, g
, GSI_SAME_STMT
);
2786 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE__SIMDUID_
);
2787 OMP_CLAUSE__SIMDUID__DECL (c
) = uid
;
2788 OMP_CLAUSE_CHAIN (c
) = gimple_omp_for_clauses (ctx
->stmt
);
2789 gimple_omp_for_set_clauses (ctx
->stmt
, c
);
2790 g
= gimple_build_assign_with_ops (INTEGER_CST
, lane
,
2791 build_int_cst (unsigned_type_node
, 0),
2793 gimple_seq_add_stmt (ilist
, g
);
2794 for (int i
= 0; i
< 2; i
++)
2797 tree vf
= create_tmp_var (unsigned_type_node
, NULL
);
2798 g
= gimple_build_call_internal (IFN_GOMP_SIMD_VF
, 1, uid
);
2799 gimple_call_set_lhs (g
, vf
);
2800 gimple_seq
*seq
= i
== 0 ? ilist
: dlist
;
2801 gimple_seq_add_stmt (seq
, g
);
2802 tree t
= build_int_cst (unsigned_type_node
, 0);
2803 g
= gimple_build_assign_with_ops (INTEGER_CST
, idx
, t
, NULL_TREE
);
2804 gimple_seq_add_stmt (seq
, g
);
2805 tree body
= create_artificial_label (UNKNOWN_LOCATION
);
2806 tree header
= create_artificial_label (UNKNOWN_LOCATION
);
2807 tree end
= create_artificial_label (UNKNOWN_LOCATION
);
2808 gimple_seq_add_stmt (seq
, gimple_build_goto (header
));
2809 gimple_seq_add_stmt (seq
, gimple_build_label (body
));
2810 gimple_seq_add_seq (seq
, llist
[i
]);
2811 t
= build_int_cst (unsigned_type_node
, 1);
2812 g
= gimple_build_assign_with_ops (PLUS_EXPR
, idx
, idx
, t
);
2813 gimple_seq_add_stmt (seq
, g
);
2814 gimple_seq_add_stmt (seq
, gimple_build_label (header
));
2815 g
= gimple_build_cond (LT_EXPR
, idx
, vf
, body
, end
);
2816 gimple_seq_add_stmt (seq
, g
);
2817 gimple_seq_add_stmt (seq
, gimple_build_label (end
));
2821 /* The copyin sequence is not to be executed by the main thread, since
2822 that would result in self-copies. Perhaps not visible to scalars,
2823 but it certainly is to C++ operator=. */
2826 x
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
),
2828 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2829 build_int_cst (TREE_TYPE (x
), 0));
2830 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2831 gimplify_and_add (x
, ilist
);
2834 /* If any copyin variable is passed by reference, we must ensure the
2835 master thread doesn't modify it before it is copied over in all
2836 threads. Similarly for variables in both firstprivate and
2837 lastprivate clauses we need to ensure the lastprivate copying
2838 happens after firstprivate copying in all threads. */
2839 if (copyin_by_ref
|| lastprivate_firstprivate
)
2841 /* Don't add any barrier for #pragma omp simd or
2842 #pragma omp distribute. */
2843 if (gimple_code (ctx
->stmt
) != GIMPLE_OMP_FOR
2844 || gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_FOR
)
2845 gimplify_and_add (build_omp_barrier (), ilist
);
2848 /* If max_vf is non-zero, then we can use only a vectorization factor
2849 up to the max_vf we chose. So stick it into the safelen clause. */
2852 tree c
= find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
2853 OMP_CLAUSE_SAFELEN
);
2855 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c
),
2858 c
= build_omp_clause (UNKNOWN_LOCATION
, OMP_CLAUSE_SAFELEN
);
2859 OMP_CLAUSE_SAFELEN_EXPR (c
) = build_int_cst (integer_type_node
,
2861 OMP_CLAUSE_CHAIN (c
) = gimple_omp_for_clauses (ctx
->stmt
);
2862 gimple_omp_for_set_clauses (ctx
->stmt
, c
);
2868 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2869 both parallel and workshare constructs. PREDICATE may be NULL if it's
2873 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2876 tree x
, c
, label
= NULL
, orig_clauses
= clauses
;
2877 bool par_clauses
= false;
2878 tree simduid
= NULL
, lastlane
= NULL
;
2880 /* Early exit if there are no lastprivate or linear clauses. */
2881 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
2882 if (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_LASTPRIVATE
2883 || (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_LINEAR
2884 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses
)))
2886 if (clauses
== NULL
)
2888 /* If this was a workshare clause, see if it had been combined
2889 with its parallel. In that case, look for the clauses on the
2890 parallel statement itself. */
2891 if (is_parallel_ctx (ctx
))
2895 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2898 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2899 OMP_CLAUSE_LASTPRIVATE
);
2900 if (clauses
== NULL
)
2908 tree label_true
, arm1
, arm2
;
2910 label
= create_artificial_label (UNKNOWN_LOCATION
);
2911 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2912 arm1
= TREE_OPERAND (predicate
, 0);
2913 arm2
= TREE_OPERAND (predicate
, 1);
2914 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2915 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2916 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2918 gimple_seq_add_stmt (stmt_list
, stmt
);
2919 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2922 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
2923 && gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_SIMD
)
2925 simduid
= find_omp_clause (orig_clauses
, OMP_CLAUSE__SIMDUID_
);
2927 simduid
= OMP_CLAUSE__SIMDUID__DECL (simduid
);
2930 for (c
= clauses
; c
;)
2933 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2935 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
2936 || (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
2937 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c
)))
2939 var
= OMP_CLAUSE_DECL (c
);
2940 new_var
= lookup_decl (var
, ctx
);
2942 if (simduid
&& DECL_HAS_VALUE_EXPR_P (new_var
))
2944 tree val
= DECL_VALUE_EXPR (new_var
);
2945 if (TREE_CODE (val
) == ARRAY_REF
2946 && VAR_P (TREE_OPERAND (val
, 0))
2947 && lookup_attribute ("omp simd array",
2948 DECL_ATTRIBUTES (TREE_OPERAND (val
,
2951 if (lastlane
== NULL
)
2953 lastlane
= create_tmp_var (unsigned_type_node
, NULL
);
2955 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE
,
2957 TREE_OPERAND (val
, 1));
2958 gimple_call_set_lhs (g
, lastlane
);
2959 gimple_seq_add_stmt (stmt_list
, g
);
2961 new_var
= build4 (ARRAY_REF
, TREE_TYPE (val
),
2962 TREE_OPERAND (val
, 0), lastlane
,
2963 NULL_TREE
, NULL_TREE
);
2967 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
2968 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2970 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2971 gimple_seq_add_seq (stmt_list
,
2972 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2973 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2976 x
= build_outer_var_ref (var
, ctx
);
2977 if (is_reference (var
))
2978 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2979 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2980 gimplify_and_add (x
, stmt_list
);
2982 c
= OMP_CLAUSE_CHAIN (c
);
2983 if (c
== NULL
&& !par_clauses
)
2985 /* If this was a workshare clause, see if it had been combined
2986 with its parallel. In that case, continue looking for the
2987 clauses also on the parallel statement itself. */
2988 if (is_parallel_ctx (ctx
))
2992 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2995 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2996 OMP_CLAUSE_LASTPRIVATE
);
3002 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
3006 /* Generate code to implement the REDUCTION clauses. */
3009 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
3011 gimple_seq sub_seq
= NULL
;
3016 /* SIMD reductions are handled in lower_rec_input_clauses. */
3017 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_FOR
3018 && gimple_omp_for_kind (ctx
->stmt
) == GF_OMP_FOR_KIND_SIMD
)
3021 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3022 update in that case, otherwise use a lock. */
3023 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
3024 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
3026 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
3028 /* Never use OMP_ATOMIC for array reductions. */
3038 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3040 tree var
, ref
, new_var
;
3041 enum tree_code code
;
3042 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
3044 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
3047 var
= OMP_CLAUSE_DECL (c
);
3048 new_var
= lookup_decl (var
, ctx
);
3049 if (is_reference (var
))
3050 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
3051 ref
= build_outer_var_ref (var
, ctx
);
3052 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
3054 /* reduction(-:var) sums up the partial results, so it acts
3055 identically to reduction(+:var). */
3056 if (code
== MINUS_EXPR
)
3061 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
3063 addr
= save_expr (addr
);
3064 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
3065 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
3066 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
3067 gimplify_and_add (x
, stmt_seqp
);
3071 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
3073 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
3075 if (is_reference (var
))
3076 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
3077 SET_DECL_VALUE_EXPR (placeholder
, ref
);
3078 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
3079 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
3080 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
3081 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
3082 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
3086 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
3087 ref
= build_outer_var_ref (var
, ctx
);
3088 gimplify_assign (ref
, x
, &sub_seq
);
3092 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
),
3094 gimple_seq_add_stmt (stmt_seqp
, stmt
);
3096 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
3098 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
),
3100 gimple_seq_add_stmt (stmt_seqp
, stmt
);
3104 /* Generate code to implement the COPYPRIVATE clauses. */
3107 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
3112 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3114 tree var
, new_var
, ref
, x
;
3116 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
3118 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
3121 var
= OMP_CLAUSE_DECL (c
);
3122 by_ref
= use_pointer_for_field (var
, NULL
);
3124 ref
= build_sender_ref (var
, ctx
);
3125 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
3128 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
3129 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
3131 gimplify_assign (ref
, x
, slist
);
3133 ref
= build_receiver_ref (var
, false, ctx
);
3136 ref
= fold_convert_loc (clause_loc
,
3137 build_pointer_type (TREE_TYPE (new_var
)),
3139 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
3141 if (is_reference (var
))
3143 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
3144 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
3145 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
3147 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
3148 gimplify_and_add (x
, rlist
);
3153 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3154 and REDUCTION from the sender (aka parent) side. */
3157 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
3162 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
3164 tree val
, ref
, x
, var
;
3165 bool by_ref
, do_in
= false, do_out
= false;
3166 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
3168 switch (OMP_CLAUSE_CODE (c
))
3170 case OMP_CLAUSE_PRIVATE
:
3171 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
3174 case OMP_CLAUSE_FIRSTPRIVATE
:
3175 case OMP_CLAUSE_COPYIN
:
3176 case OMP_CLAUSE_LASTPRIVATE
:
3177 case OMP_CLAUSE_REDUCTION
:
3183 val
= OMP_CLAUSE_DECL (c
);
3184 var
= lookup_decl_in_outer_ctx (val
, ctx
);
3186 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
3187 && is_global_var (var
))
3189 if (is_variable_sized (val
))
3191 by_ref
= use_pointer_for_field (val
, NULL
);
3193 switch (OMP_CLAUSE_CODE (c
))
3195 case OMP_CLAUSE_PRIVATE
:
3196 case OMP_CLAUSE_FIRSTPRIVATE
:
3197 case OMP_CLAUSE_COPYIN
:
3201 case OMP_CLAUSE_LASTPRIVATE
:
3202 if (by_ref
|| is_reference (val
))
3204 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
3211 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
3216 case OMP_CLAUSE_REDUCTION
:
3218 do_out
= !(by_ref
|| is_reference (val
));
3227 ref
= build_sender_ref (val
, ctx
);
3228 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
3229 gimplify_assign (ref
, x
, ilist
);
3230 if (is_task_ctx (ctx
))
3231 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
3236 ref
= build_sender_ref (val
, ctx
);
3237 gimplify_assign (var
, ref
, olist
);
3242 /* Generate code to implement SHARED from the sender (aka parent)
3243 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
3244 list things that got automatically shared. */
3247 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
3249 tree var
, ovar
, nvar
, f
, x
, record_type
;
3251 if (ctx
->record_type
== NULL
)
3254 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
3255 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
3257 ovar
= DECL_ABSTRACT_ORIGIN (f
);
3258 nvar
= maybe_lookup_decl (ovar
, ctx
);
3259 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
3262 /* If CTX is a nested parallel directive. Find the immediately
3263 enclosing parallel or workshare construct that contains a
3264 mapping for OVAR. */
3265 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
3267 if (use_pointer_for_field (ovar
, ctx
))
3269 x
= build_sender_ref (ovar
, ctx
);
3270 var
= build_fold_addr_expr (var
);
3271 gimplify_assign (x
, var
, ilist
);
3275 x
= build_sender_ref (ovar
, ctx
);
3276 gimplify_assign (x
, var
, ilist
);
3278 if (!TREE_READONLY (var
)
3279 /* We don't need to receive a new reference to a result
3280 or parm decl. In fact we may not store to it as we will
3281 invalidate any pending RSO and generate wrong gimple
3283 && !((TREE_CODE (var
) == RESULT_DECL
3284 || TREE_CODE (var
) == PARM_DECL
)
3285 && DECL_BY_REFERENCE (var
)))
3287 x
= build_sender_ref (ovar
, ctx
);
3288 gimplify_assign (var
, x
, olist
);
3295 /* A convenience function to build an empty GIMPLE_COND with just the
3299 gimple_build_cond_empty (tree cond
)
3301 enum tree_code pred_code
;
3304 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
3305 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
3309 /* Build the function calls to GOMP_parallel_start etc to actually
3310 generate the parallel operation. REGION is the parallel region
3311 being expanded. BB is the block where to insert the code. WS_ARGS
3312 will be set if this is a call to a combined parallel+workshare
3313 construct, it contains the list of additional arguments needed by
3314 the workshare construct. */
3317 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
3318 gimple entry_stmt
, vec
<tree
, va_gc
> *ws_args
)
3320 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
3321 gimple_stmt_iterator gsi
;
3323 enum built_in_function start_ix
;
3325 location_t clause_loc
;
3326 vec
<tree
, va_gc
> *args
;
3328 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
3330 /* Determine what flavor of GOMP_parallel_start we will be
3332 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
3333 if (is_combined_parallel (region
))
3335 switch (region
->inner
->type
)
3337 case GIMPLE_OMP_FOR
:
3338 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
3339 start_ix2
= ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
3340 + (region
->inner
->sched_kind
3341 == OMP_CLAUSE_SCHEDULE_RUNTIME
3342 ? 3 : region
->inner
->sched_kind
));
3343 start_ix
= (enum built_in_function
)start_ix2
;
3345 case GIMPLE_OMP_SECTIONS
:
3346 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
3353 /* By default, the value of NUM_THREADS is zero (selected at run time)
3354 and there is no conditional. */
3356 val
= build_int_cst (unsigned_type_node
, 0);
3358 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3360 cond
= OMP_CLAUSE_IF_EXPR (c
);
3362 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
3365 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
3366 clause_loc
= OMP_CLAUSE_LOCATION (c
);
3369 clause_loc
= gimple_location (entry_stmt
);
3371 /* Ensure 'val' is of the correct type. */
3372 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
3374 /* If we found the clause 'if (cond)', build either
3375 (cond != 0) or (cond ? val : 1u). */
3378 gimple_stmt_iterator gsi
;
3380 cond
= gimple_boolify (cond
);
3382 if (integer_zerop (val
))
3383 val
= fold_build2_loc (clause_loc
,
3384 EQ_EXPR
, unsigned_type_node
, cond
,
3385 build_int_cst (TREE_TYPE (cond
), 0));
3388 basic_block cond_bb
, then_bb
, else_bb
;
3389 edge e
, e_then
, e_else
;
3390 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
3392 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
3393 if (gimple_in_ssa_p (cfun
))
3395 tmp_then
= make_ssa_name (tmp_var
, NULL
);
3396 tmp_else
= make_ssa_name (tmp_var
, NULL
);
3397 tmp_join
= make_ssa_name (tmp_var
, NULL
);
3406 e
= split_block (bb
, NULL
);
3411 then_bb
= create_empty_bb (cond_bb
);
3412 else_bb
= create_empty_bb (then_bb
);
3413 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
3414 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
3416 stmt
= gimple_build_cond_empty (cond
);
3417 gsi
= gsi_start_bb (cond_bb
);
3418 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3420 gsi
= gsi_start_bb (then_bb
);
3421 stmt
= gimple_build_assign (tmp_then
, val
);
3422 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3424 gsi
= gsi_start_bb (else_bb
);
3425 stmt
= gimple_build_assign
3426 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
3427 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3429 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3430 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3433 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
3434 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
3436 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3437 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3439 if (gimple_in_ssa_p (cfun
))
3441 gimple phi
= create_phi_node (tmp_join
, bb
);
3442 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3443 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3449 gsi
= gsi_start_bb (bb
);
3450 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3451 false, GSI_CONTINUE_LINKING
);
3454 gsi
= gsi_last_bb (bb
);
3455 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3457 t1
= null_pointer_node
;
3459 t1
= build_fold_addr_expr (t
);
3460 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3462 vec_alloc (args
, 3 + vec_safe_length (ws_args
));
3463 args
->quick_push (t2
);
3464 args
->quick_push (t1
);
3465 args
->quick_push (val
);
3467 args
->splice (*ws_args
);
3469 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
3470 builtin_decl_explicit (start_ix
), args
);
3472 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3473 false, GSI_CONTINUE_LINKING
);
3475 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3477 t
= null_pointer_node
;
3479 t
= build_fold_addr_expr (t
);
3480 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3481 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3482 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3483 false, GSI_CONTINUE_LINKING
);
3485 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3486 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END
),
3488 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3489 false, GSI_CONTINUE_LINKING
);
3493 /* Build the function call to GOMP_task to actually
3494 generate the task operation. BB is the block where to insert the code. */
3497 expand_task_call (basic_block bb
, gimple entry_stmt
)
3499 tree t
, t1
, t2
, t3
, flags
, cond
, c
, c2
, clauses
;
3500 gimple_stmt_iterator gsi
;
3501 location_t loc
= gimple_location (entry_stmt
);
3503 clauses
= gimple_omp_task_clauses (entry_stmt
);
3505 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3507 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3509 cond
= boolean_true_node
;
3511 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3512 c2
= find_omp_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
3513 flags
= build_int_cst (unsigned_type_node
,
3514 (c
? 1 : 0) + (c2
? 4 : 0));
3516 c
= find_omp_clause (clauses
, OMP_CLAUSE_FINAL
);
3519 c
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c
));
3520 c
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, c
,
3521 build_int_cst (unsigned_type_node
, 2),
3522 build_int_cst (unsigned_type_node
, 0));
3523 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, c
);
3526 gsi
= gsi_last_bb (bb
);
3527 t
= gimple_omp_task_data_arg (entry_stmt
);
3529 t2
= null_pointer_node
;
3531 t2
= build_fold_addr_expr_loc (loc
, t
);
3532 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3533 t
= gimple_omp_task_copy_fn (entry_stmt
);
3535 t3
= null_pointer_node
;
3537 t3
= build_fold_addr_expr_loc (loc
, t
);
3539 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
3541 gimple_omp_task_arg_size (entry_stmt
),
3542 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3544 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3545 false, GSI_CONTINUE_LINKING
);
3549 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3550 catch handler and return it. This prevents programs from violating the
3551 structured block semantics with throws. */
3554 maybe_catch_exception (gimple_seq body
)
3559 if (!flag_exceptions
)
3562 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
3563 decl
= lang_hooks
.eh_protect_cleanup_actions ();
3565 decl
= builtin_decl_explicit (BUILT_IN_TRAP
);
3567 g
= gimple_build_eh_must_not_throw (decl
);
3568 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3571 return gimple_seq_alloc_with_stmt (g
);
3574 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3577 vec2chain (vec
<tree
, va_gc
> *v
)
3579 tree chain
= NULL_TREE
, t
;
3582 FOR_EACH_VEC_SAFE_ELT_REVERSE (v
, ix
, t
)
3584 DECL_CHAIN (t
) = chain
;
3592 /* Remove barriers in REGION->EXIT's block. Note that this is only
3593 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3594 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3595 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3599 remove_exit_barrier (struct omp_region
*region
)
3601 gimple_stmt_iterator gsi
;
3602 basic_block exit_bb
;
3606 int any_addressable_vars
= -1;
3608 exit_bb
= region
->exit
;
3610 /* If the parallel region doesn't return, we don't have REGION->EXIT
3615 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3616 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3617 statements that can appear in between are extremely limited -- no
3618 memory operations at all. Here, we allow nothing at all, so the
3619 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3620 gsi
= gsi_last_bb (exit_bb
);
3621 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3623 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3626 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3628 gsi
= gsi_last_bb (e
->src
);
3629 if (gsi_end_p (gsi
))
3631 stmt
= gsi_stmt (gsi
);
3632 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3633 && !gimple_omp_return_nowait_p (stmt
))
3635 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3636 in many cases. If there could be tasks queued, the barrier
3637 might be needed to let the tasks run before some local
3638 variable of the parallel that the task uses as shared
3639 runs out of scope. The task can be spawned either
3640 from within current function (this would be easy to check)
3641 or from some function it calls and gets passed an address
3642 of such a variable. */
3643 if (any_addressable_vars
< 0)
3645 gimple parallel_stmt
= last_stmt (region
->entry
);
3646 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3647 tree local_decls
, block
, decl
;
3650 any_addressable_vars
= 0;
3651 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
3652 if (TREE_ADDRESSABLE (decl
))
3654 any_addressable_vars
= 1;
3657 for (block
= gimple_block (stmt
);
3658 !any_addressable_vars
3660 && TREE_CODE (block
) == BLOCK
;
3661 block
= BLOCK_SUPERCONTEXT (block
))
3663 for (local_decls
= BLOCK_VARS (block
);
3665 local_decls
= DECL_CHAIN (local_decls
))
3666 if (TREE_ADDRESSABLE (local_decls
))
3668 any_addressable_vars
= 1;
3671 if (block
== gimple_block (parallel_stmt
))
3675 if (!any_addressable_vars
)
3676 gimple_omp_return_set_nowait (stmt
);
3682 remove_exit_barriers (struct omp_region
*region
)
3684 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3685 remove_exit_barrier (region
);
3689 region
= region
->inner
;
3690 remove_exit_barriers (region
);
3691 while (region
->next
)
3693 region
= region
->next
;
3694 remove_exit_barriers (region
);
3699 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3700 calls. These can't be declared as const functions, but
3701 within one parallel body they are constant, so they can be
3702 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3703 which are declared const. Similarly for task body, except
3704 that in untied task omp_get_thread_num () can change at any task
3705 scheduling point. */
3708 optimize_omp_library_calls (gimple entry_stmt
)
3711 gimple_stmt_iterator gsi
;
3712 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3713 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
3714 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3715 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
3716 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3717 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3718 OMP_CLAUSE_UNTIED
) != NULL
);
3721 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3723 gimple call
= gsi_stmt (gsi
);
3726 if (is_gimple_call (call
)
3727 && (decl
= gimple_call_fndecl (call
))
3728 && DECL_EXTERNAL (decl
)
3729 && TREE_PUBLIC (decl
)
3730 && DECL_INITIAL (decl
) == NULL
)
3734 if (DECL_NAME (decl
) == thr_num_id
)
3736 /* In #pragma omp task untied omp_get_thread_num () can change
3737 during the execution of the task region. */
3740 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3742 else if (DECL_NAME (decl
) == num_thr_id
)
3743 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3747 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3748 || gimple_call_num_args (call
) != 0)
3751 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3754 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3755 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3756 TREE_TYPE (TREE_TYPE (built_in
))))
3759 gimple_call_set_fndecl (call
, built_in
);
3764 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
3768 expand_omp_regimplify_p (tree
*tp
, int *walk_subtrees
, void *)
3772 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
3773 if (TREE_CODE (t
) == VAR_DECL
&& DECL_HAS_VALUE_EXPR_P (t
))
3776 if (TREE_CODE (t
) == ADDR_EXPR
)
3777 recompute_tree_invariant_for_addr_expr (t
);
3779 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
3783 /* Prepend TO = FROM assignment before *GSI_P. */
3786 expand_omp_build_assign (gimple_stmt_iterator
*gsi_p
, tree to
, tree from
)
3788 bool simple_p
= DECL_P (to
) && TREE_ADDRESSABLE (to
);
3789 from
= force_gimple_operand_gsi (gsi_p
, from
, simple_p
, NULL_TREE
,
3790 true, GSI_SAME_STMT
);
3791 gimple stmt
= gimple_build_assign (to
, from
);
3792 gsi_insert_before (gsi_p
, stmt
, GSI_SAME_STMT
);
3793 if (walk_tree (&from
, expand_omp_regimplify_p
, NULL
, NULL
)
3794 || walk_tree (&to
, expand_omp_regimplify_p
, NULL
, NULL
))
3796 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3797 gimple_regimplify_operands (stmt
, &gsi
);
3801 /* Expand the OpenMP parallel or task directive starting at REGION. */
3804 expand_omp_taskreg (struct omp_region
*region
)
3806 basic_block entry_bb
, exit_bb
, new_bb
;
3807 struct function
*child_cfun
;
3808 tree child_fn
, block
, t
;
3809 gimple_stmt_iterator gsi
;
3810 gimple entry_stmt
, stmt
;
3812 vec
<tree
, va_gc
> *ws_args
;
3814 entry_stmt
= last_stmt (region
->entry
);
3815 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3816 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3818 entry_bb
= region
->entry
;
3819 exit_bb
= region
->exit
;
3821 if (is_combined_parallel (region
))
3822 ws_args
= region
->ws_args
;
3826 if (child_cfun
->cfg
)
3828 /* Due to inlining, it may happen that we have already outlined
3829 the region, in which case all we need to do is make the
3830 sub-graph unreachable and emit the parallel call. */
3831 edge entry_succ_e
, exit_succ_e
;
3832 gimple_stmt_iterator gsi
;
3834 entry_succ_e
= single_succ_edge (entry_bb
);
3836 gsi
= gsi_last_bb (entry_bb
);
3837 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3838 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3839 gsi_remove (&gsi
, true);
3844 exit_succ_e
= single_succ_edge (exit_bb
);
3845 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3847 remove_edge_and_dominated_blocks (entry_succ_e
);
3851 unsigned srcidx
, dstidx
, num
;
3853 /* If the parallel region needs data sent from the parent
3854 function, then the very first statement (except possible
3855 tree profile counter updates) of the parallel body
3856 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3857 &.OMP_DATA_O is passed as an argument to the child function,
3858 we need to replace it with the argument as seen by the child
3861 In most cases, this will end up being the identity assignment
3862 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3863 a function call that has been inlined, the original PARM_DECL
3864 .OMP_DATA_I may have been converted into a different local
3865 variable. In which case, we need to keep the assignment. */
3866 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3868 basic_block entry_succ_bb
= single_succ (entry_bb
);
3869 gimple_stmt_iterator gsi
;
3871 gimple parcopy_stmt
= NULL
;
3873 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3877 gcc_assert (!gsi_end_p (gsi
));
3878 stmt
= gsi_stmt (gsi
);
3879 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3882 if (gimple_num_ops (stmt
) == 2)
3884 tree arg
= gimple_assign_rhs1 (stmt
);
3886 /* We're ignore the subcode because we're
3887 effectively doing a STRIP_NOPS. */
3889 if (TREE_CODE (arg
) == ADDR_EXPR
3890 && TREE_OPERAND (arg
, 0)
3891 == gimple_omp_taskreg_data_arg (entry_stmt
))
3893 parcopy_stmt
= stmt
;
3899 gcc_assert (parcopy_stmt
!= NULL
);
3900 arg
= DECL_ARGUMENTS (child_fn
);
3902 if (!gimple_in_ssa_p (cfun
))
3904 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3905 gsi_remove (&gsi
, true);
3908 /* ?? Is setting the subcode really necessary ?? */
3909 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3910 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3915 /* If we are in ssa form, we must load the value from the default
3916 definition of the argument. That should not be defined now,
3917 since the argument is not used uninitialized. */
3918 gcc_assert (ssa_default_def (cfun
, arg
) == NULL
);
3919 narg
= make_ssa_name (arg
, gimple_build_nop ());
3920 set_ssa_default_def (cfun
, arg
, narg
);
3921 /* ?? Is setting the subcode really necessary ?? */
3922 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3923 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3924 update_stmt (parcopy_stmt
);
3928 /* Declare local variables needed in CHILD_CFUN. */
3929 block
= DECL_INITIAL (child_fn
);
3930 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
3931 /* The gimplifier could record temporaries in parallel/task block
3932 rather than in containing function's local_decls chain,
3933 which would mean cgraph missed finalizing them. Do it now. */
3934 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
3935 if (TREE_CODE (t
) == VAR_DECL
3937 && !DECL_EXTERNAL (t
))
3938 varpool_finalize_decl (t
);
3939 DECL_SAVED_TREE (child_fn
) = NULL
;
3940 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3941 gimple_set_body (child_fn
, NULL
);
3942 TREE_USED (block
) = 1;
3944 /* Reset DECL_CONTEXT on function arguments. */
3945 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
3946 DECL_CONTEXT (t
) = child_fn
;
3948 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3949 so that it can be moved to the child function. */
3950 gsi
= gsi_last_bb (entry_bb
);
3951 stmt
= gsi_stmt (gsi
);
3952 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3953 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3954 gsi_remove (&gsi
, true);
3955 e
= split_block (entry_bb
, stmt
);
3957 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3959 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3962 gsi
= gsi_last_bb (exit_bb
);
3963 gcc_assert (!gsi_end_p (gsi
)
3964 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3965 stmt
= gimple_build_return (NULL
);
3966 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3967 gsi_remove (&gsi
, true);
3970 /* Move the parallel region into CHILD_CFUN. */
3972 if (gimple_in_ssa_p (cfun
))
3974 init_tree_ssa (child_cfun
);
3975 init_ssa_operands (child_cfun
);
3976 child_cfun
->gimple_df
->in_ssa_p
= true;
3980 block
= gimple_block (entry_stmt
);
3982 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3984 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3985 /* When the OMP expansion process cannot guarantee an up-to-date
3986 loop tree arrange for the child function to fixup loops. */
3987 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
3988 child_cfun
->x_current_loops
->state
|= LOOPS_NEED_FIXUP
;
3990 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3991 num
= vec_safe_length (child_cfun
->local_decls
);
3992 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
3994 t
= (*child_cfun
->local_decls
)[srcidx
];
3995 if (DECL_CONTEXT (t
) == cfun
->decl
)
3997 if (srcidx
!= dstidx
)
3998 (*child_cfun
->local_decls
)[dstidx
] = t
;
4002 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
4004 /* Inform the callgraph about the new function. */
4005 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
= cfun
->curr_properties
;
4006 cgraph_add_new_function (child_fn
, true);
4008 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4009 fixed in a following pass. */
4010 push_cfun (child_cfun
);
4012 optimize_omp_library_calls (entry_stmt
);
4013 rebuild_cgraph_edges ();
4015 /* Some EH regions might become dead, see PR34608. If
4016 pass_cleanup_cfg isn't the first pass to happen with the
4017 new child, these dead EH edges might cause problems.
4018 Clean them up now. */
4019 if (flag_exceptions
)
4022 bool changed
= false;
4025 changed
|= gimple_purge_dead_eh_edges (bb
);
4027 cleanup_tree_cfg ();
4029 if (gimple_in_ssa_p (cfun
))
4030 update_ssa (TODO_update_ssa
);
4034 /* Emit a library call to launch the children threads. */
4035 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
4036 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
4038 expand_task_call (new_bb
, entry_stmt
);
4039 if (gimple_in_ssa_p (cfun
))
4040 update_ssa (TODO_update_ssa_only_virtuals
);
4044 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4045 of the combined collapse > 1 loop constructs, generate code like:
4046 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4051 count3 = (adj + N32 - N31) / STEP3;
4052 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4057 count2 = (adj + N22 - N21) / STEP2;
4058 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4063 count1 = (adj + N12 - N11) / STEP1;
4064 count = count1 * count2 * count3;
4065 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4067 and set ZERO_ITER_BB to that bb. */
4069 /* NOTE: It *could* be better to moosh all of the BBs together,
4070 creating one larger BB with all the computation and the unexpected
4071 jump at the end. I.e.
4073 bool zero3, zero2, zero1, zero;
4076 count3 = (N32 - N31) /[cl] STEP3;
4078 count2 = (N22 - N21) /[cl] STEP2;
4080 count1 = (N12 - N11) /[cl] STEP1;
4081 zero = zero3 || zero2 || zero1;
4082 count = count1 * count2 * count3;
4083 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4085 After all, we expect the zero=false, and thus we expect to have to
4086 evaluate all of the comparison expressions, so short-circuiting
4087 oughtn't be a win. Since the condition isn't protecting a
4088 denominator, we're not concerned about divide-by-zero, so we can
4089 fully evaluate count even if a numerator turned out to be wrong.
4091 It seems like putting this all together would create much better
4092 scheduling opportunities, and less pressure on the chip's branch
4096 expand_omp_for_init_counts (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
4097 basic_block
&entry_bb
, tree
*counts
,
4098 basic_block
&zero_iter_bb
, int &first_zero_iter
,
4099 basic_block
&l2_dom_bb
)
4101 tree t
, type
= TREE_TYPE (fd
->loop
.v
);
4106 /* Collapsed loops need work for expansion into SSA form. */
4107 gcc_assert (!gimple_in_ssa_p (cfun
));
4109 for (i
= 0; i
< fd
->collapse
; i
++)
4111 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
4113 if (SSA_VAR_P (fd
->loop
.n2
)
4114 && ((t
= fold_binary (fd
->loops
[i
].cond_code
, boolean_type_node
,
4115 fold_convert (itype
, fd
->loops
[i
].n1
),
4116 fold_convert (itype
, fd
->loops
[i
].n2
)))
4117 == NULL_TREE
|| !integer_onep (t
)))
4120 n1
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n1
));
4121 n1
= force_gimple_operand_gsi (gsi
, n1
, true, NULL_TREE
,
4122 true, GSI_SAME_STMT
);
4123 n2
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n2
));
4124 n2
= force_gimple_operand_gsi (gsi
, n2
, true, NULL_TREE
,
4125 true, GSI_SAME_STMT
);
4126 stmt
= gimple_build_cond (fd
->loops
[i
].cond_code
, n1
, n2
,
4127 NULL_TREE
, NULL_TREE
);
4128 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
4129 if (walk_tree (gimple_cond_lhs_ptr (stmt
),
4130 expand_omp_regimplify_p
, NULL
, NULL
)
4131 || walk_tree (gimple_cond_rhs_ptr (stmt
),
4132 expand_omp_regimplify_p
, NULL
, NULL
))
4134 *gsi
= gsi_for_stmt (stmt
);
4135 gimple_regimplify_operands (stmt
, gsi
);
4137 e
= split_block (entry_bb
, stmt
);
4138 if (zero_iter_bb
== NULL
)
4140 first_zero_iter
= i
;
4141 zero_iter_bb
= create_empty_bb (entry_bb
);
4143 add_bb_to_loop (zero_iter_bb
, entry_bb
->loop_father
);
4144 *gsi
= gsi_after_labels (zero_iter_bb
);
4145 stmt
= gimple_build_assign (fd
->loop
.n2
,
4146 build_zero_cst (type
));
4147 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
4148 set_immediate_dominator (CDI_DOMINATORS
, zero_iter_bb
,
4151 ne
= make_edge (entry_bb
, zero_iter_bb
, EDGE_FALSE_VALUE
);
4152 ne
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
4153 e
->flags
= EDGE_TRUE_VALUE
;
4154 e
->probability
= REG_BR_PROB_BASE
- ne
->probability
;
4155 if (l2_dom_bb
== NULL
)
4156 l2_dom_bb
= entry_bb
;
4158 *gsi
= gsi_last_bb (entry_bb
);
4161 if (POINTER_TYPE_P (itype
))
4162 itype
= signed_type_for (itype
);
4163 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
4165 t
= fold_build2 (PLUS_EXPR
, itype
,
4166 fold_convert (itype
, fd
->loops
[i
].step
), t
);
4167 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
4168 fold_convert (itype
, fd
->loops
[i
].n2
));
4169 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
4170 fold_convert (itype
, fd
->loops
[i
].n1
));
4171 /* ?? We could probably use CEIL_DIV_EXPR instead of
4172 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
4173 generate the same code in the end because generically we
4174 don't know that the values involved must be negative for
4176 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
4177 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4178 fold_build1 (NEGATE_EXPR
, itype
, t
),
4179 fold_build1 (NEGATE_EXPR
, itype
,
4180 fold_convert (itype
,
4181 fd
->loops
[i
].step
)));
4183 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
4184 fold_convert (itype
, fd
->loops
[i
].step
));
4185 t
= fold_convert (type
, t
);
4186 if (TREE_CODE (t
) == INTEGER_CST
)
4190 counts
[i
] = create_tmp_reg (type
, ".count");
4191 expand_omp_build_assign (gsi
, counts
[i
], t
);
4193 if (SSA_VAR_P (fd
->loop
.n2
))
4198 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
4199 expand_omp_build_assign (gsi
, fd
->loop
.n2
, t
);
4205 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
4207 V3 = N31 + (T % count3) * STEP3;
4209 V2 = N21 + (T % count2) * STEP2;
4211 V1 = N11 + T * STEP1;
4212 if this loop doesn't have an inner loop construct combined with it. */
4215 expand_omp_for_init_vars (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
4216 tree
*counts
, tree startvar
)
4219 tree type
= TREE_TYPE (fd
->loop
.v
);
4220 tree tem
= create_tmp_reg (type
, ".tem");
4221 gimple stmt
= gimple_build_assign (tem
, startvar
);
4222 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
4224 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
4226 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
, t
;
4228 if (POINTER_TYPE_P (vtype
))
4229 itype
= signed_type_for (vtype
);
4231 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
4234 t
= fold_convert (itype
, t
);
4235 t
= fold_build2 (MULT_EXPR
, itype
, t
,
4236 fold_convert (itype
, fd
->loops
[i
].step
));
4237 if (POINTER_TYPE_P (vtype
))
4238 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
4240 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
4241 t
= force_gimple_operand_gsi (gsi
, t
,
4242 DECL_P (fd
->loops
[i
].v
)
4243 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
4245 GSI_CONTINUE_LINKING
);
4246 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4247 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
4250 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
4251 t
= force_gimple_operand_gsi (gsi
, t
, false, NULL_TREE
,
4252 false, GSI_CONTINUE_LINKING
);
4253 stmt
= gimple_build_assign (tem
, t
);
4254 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
4260 /* Helper function for expand_omp_for_*. Generate code like:
4263 if (V3 cond3 N32) goto BODY_BB; else goto L11;
4267 if (V2 cond2 N22) goto BODY_BB; else goto L12;
4274 extract_omp_for_update_vars (struct omp_for_data
*fd
, basic_block cont_bb
,
4275 basic_block body_bb
)
4277 basic_block last_bb
, bb
, collapse_bb
= NULL
;
4279 gimple_stmt_iterator gsi
;
4285 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
4287 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
4289 bb
= create_empty_bb (last_bb
);
4291 add_bb_to_loop (bb
, last_bb
->loop_father
);
4292 gsi
= gsi_start_bb (bb
);
4294 if (i
< fd
->collapse
- 1)
4296 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
4297 e
->probability
= REG_BR_PROB_BASE
/ 8;
4299 t
= fd
->loops
[i
+ 1].n1
;
4300 t
= force_gimple_operand_gsi (&gsi
, t
,
4301 DECL_P (fd
->loops
[i
+ 1].v
)
4302 && TREE_ADDRESSABLE (fd
->loops
[i
4305 GSI_CONTINUE_LINKING
);
4306 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
4307 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4312 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
4314 if (POINTER_TYPE_P (vtype
))
4315 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
4317 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
, fd
->loops
[i
].step
);
4318 t
= force_gimple_operand_gsi (&gsi
, t
,
4319 DECL_P (fd
->loops
[i
].v
)
4320 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
4321 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
4322 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4323 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4327 t
= fd
->loops
[i
].n2
;
4328 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4329 false, GSI_CONTINUE_LINKING
);
4330 tree v
= fd
->loops
[i
].v
;
4331 if (DECL_P (v
) && TREE_ADDRESSABLE (v
))
4332 v
= force_gimple_operand_gsi (&gsi
, v
, true, NULL_TREE
,
4333 false, GSI_CONTINUE_LINKING
);
4334 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
, v
, t
);
4335 stmt
= gimple_build_cond_empty (t
);
4336 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4337 e
= make_edge (bb
, body_bb
, EDGE_TRUE_VALUE
);
4338 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4341 make_edge (bb
, body_bb
, EDGE_FALLTHRU
);
4349 /* A subroutine of expand_omp_for. Generate code for a parallel
4350 loop with any schedule. Given parameters:
4352 for (V = N1; V cond N2; V += STEP) BODY;
4354 where COND is "<" or ">", we generate pseudocode
4356 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
4357 if (more) goto L0; else goto L3;
4364 if (V cond iend) goto L1; else goto L2;
4366 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4369 If this is a combined omp parallel loop, instead of the call to
4370 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
4372 For collapsed loops, given parameters:
4374 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4375 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4376 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4379 we generate pseudocode
4381 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
4386 count3 = (adj + N32 - N31) / STEP3;
4387 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
4392 count2 = (adj + N22 - N21) / STEP2;
4393 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
4398 count1 = (adj + N12 - N11) / STEP1;
4399 count = count1 * count2 * count3;
4404 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
4405 if (more) goto L0; else goto L3;
4409 V3 = N31 + (T % count3) * STEP3;
4411 V2 = N21 + (T % count2) * STEP2;
4413 V1 = N11 + T * STEP1;
4418 if (V < iend) goto L10; else goto L2;
4421 if (V3 cond3 N32) goto L1; else goto L11;
4425 if (V2 cond2 N22) goto L1; else goto L12;
4431 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4437 expand_omp_for_generic (struct omp_region
*region
,
4438 struct omp_for_data
*fd
,
4439 enum built_in_function start_fn
,
4440 enum built_in_function next_fn
)
4442 tree type
, istart0
, iend0
, iend
;
4443 tree t
, vmain
, vback
, bias
= NULL_TREE
;
4444 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
4445 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
4446 gimple_stmt_iterator gsi
;
4448 bool in_combined_parallel
= is_combined_parallel (region
);
4449 bool broken_loop
= region
->cont
== NULL
;
4451 tree
*counts
= NULL
;
4454 gcc_assert (!broken_loop
|| !in_combined_parallel
);
4455 gcc_assert (fd
->iter_type
== long_integer_type_node
4456 || !in_combined_parallel
);
4458 type
= TREE_TYPE (fd
->loop
.v
);
4459 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
4460 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
4461 TREE_ADDRESSABLE (istart0
) = 1;
4462 TREE_ADDRESSABLE (iend0
) = 1;
4464 /* See if we need to bias by LLONG_MIN. */
4465 if (fd
->iter_type
== long_long_unsigned_type_node
4466 && TREE_CODE (type
) == INTEGER_TYPE
4467 && !TYPE_UNSIGNED (type
))
4471 if (fd
->loop
.cond_code
== LT_EXPR
)
4474 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
4478 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
4481 if (TREE_CODE (n1
) != INTEGER_CST
4482 || TREE_CODE (n2
) != INTEGER_CST
4483 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
4484 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
4487 entry_bb
= region
->entry
;
4488 cont_bb
= region
->cont
;
4490 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4491 gcc_assert (broken_loop
4492 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4493 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4494 l1_bb
= single_succ (l0_bb
);
4497 l2_bb
= create_empty_bb (cont_bb
);
4498 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
4499 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4503 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
4504 exit_bb
= region
->exit
;
4506 gsi
= gsi_last_bb (entry_bb
);
4508 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4509 if (fd
->collapse
> 1)
4511 int first_zero_iter
= -1;
4512 basic_block zero_iter_bb
= NULL
, l2_dom_bb
= NULL
;
4514 counts
= XALLOCAVEC (tree
, fd
->collapse
);
4515 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
4516 zero_iter_bb
, first_zero_iter
,
4521 /* Some counts[i] vars might be uninitialized if
4522 some loop has zero iterations. But the body shouldn't
4523 be executed in that case, so just avoid uninit warnings. */
4524 for (i
= first_zero_iter
; i
< fd
->collapse
; i
++)
4525 if (SSA_VAR_P (counts
[i
]))
4526 TREE_NO_WARNING (counts
[i
]) = 1;
4528 e
= split_block (entry_bb
, gsi_stmt (gsi
));
4530 make_edge (zero_iter_bb
, entry_bb
, EDGE_FALLTHRU
);
4531 gsi
= gsi_last_bb (entry_bb
);
4532 set_immediate_dominator (CDI_DOMINATORS
, entry_bb
,
4533 get_immediate_dominator (CDI_DOMINATORS
,
4537 if (in_combined_parallel
)
4539 /* In a combined parallel loop, emit a call to
4540 GOMP_loop_foo_next. */
4541 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
4542 build_fold_addr_expr (istart0
),
4543 build_fold_addr_expr (iend0
));
4547 tree t0
, t1
, t2
, t3
, t4
;
4548 /* If this is not a combined parallel loop, emit a call to
4549 GOMP_loop_foo_start in ENTRY_BB. */
4550 t4
= build_fold_addr_expr (iend0
);
4551 t3
= build_fold_addr_expr (istart0
);
4552 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
4555 if (POINTER_TYPE_P (TREE_TYPE (t0
))
4556 && TYPE_PRECISION (TREE_TYPE (t0
))
4557 != TYPE_PRECISION (fd
->iter_type
))
4559 /* Avoid casting pointers to integer of a different size. */
4560 tree itype
= signed_type_for (type
);
4561 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, t1
));
4562 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, t0
));
4566 t1
= fold_convert (fd
->iter_type
, t1
);
4567 t0
= fold_convert (fd
->iter_type
, t0
);
4571 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
4572 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
4574 if (fd
->iter_type
== long_integer_type_node
)
4578 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
4579 t
= build_call_expr (builtin_decl_explicit (start_fn
),
4580 6, t0
, t1
, t2
, t
, t3
, t4
);
4583 t
= build_call_expr (builtin_decl_explicit (start_fn
),
4584 5, t0
, t1
, t2
, t3
, t4
);
4592 /* The GOMP_loop_ull_*start functions have additional boolean
4593 argument, true for < loops and false for > loops.
4594 In Fortran, the C bool type can be different from
4595 boolean_type_node. */
4596 bfn_decl
= builtin_decl_explicit (start_fn
);
4597 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
4598 t5
= build_int_cst (c_bool_type
,
4599 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
4602 tree bfn_decl
= builtin_decl_explicit (start_fn
);
4603 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
4604 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
4607 t
= build_call_expr (builtin_decl_explicit (start_fn
),
4608 6, t5
, t0
, t1
, t2
, t3
, t4
);
4611 if (TREE_TYPE (t
) != boolean_type_node
)
4612 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4613 t
, build_int_cst (TREE_TYPE (t
), 0));
4614 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4615 true, GSI_SAME_STMT
);
4616 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4618 /* Remove the GIMPLE_OMP_FOR statement. */
4619 gsi_remove (&gsi
, true);
4621 /* Iteration setup for sequential loop goes in L0_BB. */
4622 tree startvar
= fd
->loop
.v
;
4623 tree endvar
= NULL_TREE
;
4625 gsi
= gsi_start_bb (l0_bb
);
4628 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
4629 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
4630 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
4631 t
= fold_convert (TREE_TYPE (startvar
), t
);
4632 t
= force_gimple_operand_gsi (&gsi
, t
,
4634 && TREE_ADDRESSABLE (startvar
),
4635 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
4636 stmt
= gimple_build_assign (startvar
, t
);
4637 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4641 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
4642 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
4643 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
4644 t
= fold_convert (TREE_TYPE (startvar
), t
);
4645 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4646 false, GSI_CONTINUE_LINKING
);
4649 stmt
= gimple_build_assign (endvar
, iend
);
4650 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4652 if (fd
->collapse
> 1)
4653 expand_omp_for_init_vars (fd
, &gsi
, counts
, startvar
);
4657 /* Code to control the increment and predicate for the sequential
4658 loop goes in the CONT_BB. */
4659 gsi
= gsi_last_bb (cont_bb
);
4660 stmt
= gsi_stmt (gsi
);
4661 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4662 vmain
= gimple_omp_continue_control_use (stmt
);
4663 vback
= gimple_omp_continue_control_def (stmt
);
4665 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4668 if (POINTER_TYPE_P (type
))
4669 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
4671 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4672 t
= force_gimple_operand_gsi (&gsi
, t
,
4674 && TREE_ADDRESSABLE (vback
),
4675 NULL_TREE
, true, GSI_SAME_STMT
);
4676 stmt
= gimple_build_assign (vback
, t
);
4677 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4679 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
4680 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
,
4682 stmt
= gimple_build_cond_empty (t
);
4683 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4686 /* Remove GIMPLE_OMP_CONTINUE. */
4687 gsi_remove (&gsi
, true);
4689 if (fd
->collapse
> 1)
4690 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, l1_bb
);
4692 /* Emit code to get the next parallel iteration in L2_BB. */
4693 gsi
= gsi_start_bb (l2_bb
);
4695 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
4696 build_fold_addr_expr (istart0
),
4697 build_fold_addr_expr (iend0
));
4698 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4699 false, GSI_CONTINUE_LINKING
);
4700 if (TREE_TYPE (t
) != boolean_type_node
)
4701 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4702 t
, build_int_cst (TREE_TYPE (t
), 0));
4703 stmt
= gimple_build_cond_empty (t
);
4704 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4707 /* Add the loop cleanup function. */
4708 gsi
= gsi_last_bb (exit_bb
);
4709 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4710 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
4712 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
4713 stmt
= gimple_build_call (t
, 0);
4714 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4715 gsi_remove (&gsi
, true);
4717 /* Connect the new blocks. */
4718 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4719 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4725 e
= find_edge (cont_bb
, l3_bb
);
4726 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4728 phis
= phi_nodes (l3_bb
);
4729 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4731 gimple phi
= gsi_stmt (gsi
);
4732 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4733 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4737 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4739 add_bb_to_loop (l2_bb
, cont_bb
->loop_father
);
4740 e
= find_edge (cont_bb
, l1_bb
);
4741 /* OMP4 placeholder for gimple_omp_for_combined_p (fd->for_stmt). */
4744 else if (fd
->collapse
> 1)
4747 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4750 e
->flags
= EDGE_TRUE_VALUE
;
4753 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4754 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4758 e
= find_edge (cont_bb
, l2_bb
);
4759 e
->flags
= EDGE_FALLTHRU
;
4761 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4763 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4764 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4765 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4766 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4767 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4768 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4769 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4770 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4772 struct loop
*outer_loop
= alloc_loop ();
4773 outer_loop
->header
= l0_bb
;
4774 outer_loop
->latch
= l2_bb
;
4775 add_loop (outer_loop
, l0_bb
->loop_father
);
4777 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4780 struct loop
*loop
= alloc_loop ();
4781 loop
->header
= l1_bb
;
4782 /* The loop may have multiple latches. */
4783 add_loop (loop
, outer_loop
);
4789 /* A subroutine of expand_omp_for. Generate code for a parallel
4790 loop with static schedule and no specified chunk size. Given
4793 for (V = N1; V cond N2; V += STEP) BODY;
4795 where COND is "<" or ">", we generate pseudocode
4797 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4802 if ((__typeof (V)) -1 > 0 && cond is >)
4803 n = -(adj + N2 - N1) / -STEP;
4805 n = (adj + N2 - N1) / STEP;
4808 if (threadid < tt) goto L3; else goto L4;
4813 s0 = q * threadid + tt;
4816 if (s0 >= e0) goto L2; else goto L0;
4822 if (V cond e) goto L1;
4827 expand_omp_for_static_nochunk (struct omp_region
*region
,
4828 struct omp_for_data
*fd
)
4830 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
4831 tree type
, itype
, vmain
, vback
;
4832 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
4833 basic_block body_bb
, cont_bb
;
4835 gimple_stmt_iterator gsi
;
4839 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4840 if (POINTER_TYPE_P (type
))
4841 itype
= signed_type_for (type
);
4843 entry_bb
= region
->entry
;
4844 cont_bb
= region
->cont
;
4845 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4846 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4847 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4848 body_bb
= single_succ (seq_start_bb
);
4849 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4850 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4851 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4852 exit_bb
= region
->exit
;
4854 /* Iteration space partitioning goes in ENTRY_BB. */
4855 gsi
= gsi_last_bb (entry_bb
);
4856 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4858 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
4859 fold_convert (type
, fd
->loop
.n1
),
4860 fold_convert (type
, fd
->loop
.n2
));
4861 if (TYPE_UNSIGNED (type
)
4862 && (t
== NULL_TREE
|| !integer_onep (t
)))
4865 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
4866 n1
= force_gimple_operand_gsi (&gsi
, n1
, true, NULL_TREE
,
4867 true, GSI_SAME_STMT
);
4868 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
4869 n2
= force_gimple_operand_gsi (&gsi
, n2
, true, NULL_TREE
,
4870 true, GSI_SAME_STMT
);
4871 stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
4872 NULL_TREE
, NULL_TREE
);
4873 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4874 if (walk_tree (gimple_cond_lhs_ptr (stmt
),
4875 expand_omp_regimplify_p
, NULL
, NULL
)
4876 || walk_tree (gimple_cond_rhs_ptr (stmt
),
4877 expand_omp_regimplify_p
, NULL
, NULL
))
4879 gsi
= gsi_for_stmt (stmt
);
4880 gimple_regimplify_operands (stmt
, &gsi
);
4882 ep
= split_block (entry_bb
, stmt
);
4883 ep
->flags
= EDGE_TRUE_VALUE
;
4884 entry_bb
= ep
->dest
;
4885 ep
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
4886 ep
= make_edge (ep
->src
, fin_bb
, EDGE_FALSE_VALUE
);
4887 ep
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
4888 if (gimple_in_ssa_p (cfun
))
4890 int dest_idx
= find_edge (entry_bb
, fin_bb
)->dest_idx
;
4891 for (gsi
= gsi_start_phis (fin_bb
);
4892 !gsi_end_p (gsi
); gsi_next (&gsi
))
4894 gimple phi
= gsi_stmt (gsi
);
4895 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
4896 ep
, UNKNOWN_LOCATION
);
4899 gsi
= gsi_last_bb (entry_bb
);
4902 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4903 t
= fold_convert (itype
, t
);
4904 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4905 true, GSI_SAME_STMT
);
4907 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4908 t
= fold_convert (itype
, t
);
4909 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4910 true, GSI_SAME_STMT
);
4913 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4914 true, NULL_TREE
, true, GSI_SAME_STMT
);
4916 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4917 true, NULL_TREE
, true, GSI_SAME_STMT
);
4919 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4920 true, NULL_TREE
, true, GSI_SAME_STMT
);
4922 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4923 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4924 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4925 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4926 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4927 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4928 fold_build1 (NEGATE_EXPR
, itype
, t
),
4929 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4931 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4932 t
= fold_convert (itype
, t
);
4933 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4935 q
= create_tmp_reg (itype
, "q");
4936 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4937 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4938 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
4940 tt
= create_tmp_reg (itype
, "tt");
4941 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
4942 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4943 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
4945 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
4946 stmt
= gimple_build_cond_empty (t
);
4947 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4949 second_bb
= split_block (entry_bb
, stmt
)->dest
;
4950 gsi
= gsi_last_bb (second_bb
);
4951 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4953 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
4955 stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, q
, q
,
4956 build_int_cst (itype
, 1));
4957 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4959 third_bb
= split_block (second_bb
, stmt
)->dest
;
4960 gsi
= gsi_last_bb (third_bb
);
4961 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4963 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4964 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
4965 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4967 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4968 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4970 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4971 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4973 /* Remove the GIMPLE_OMP_FOR statement. */
4974 gsi_remove (&gsi
, true);
4976 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4977 gsi
= gsi_start_bb (seq_start_bb
);
4979 t
= fold_convert (itype
, s0
);
4980 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4981 if (POINTER_TYPE_P (type
))
4982 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4984 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4985 t
= force_gimple_operand_gsi (&gsi
, t
,
4987 && TREE_ADDRESSABLE (fd
->loop
.v
),
4988 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
4989 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4990 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4992 t
= fold_convert (itype
, e0
);
4993 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4994 if (POINTER_TYPE_P (type
))
4995 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4997 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4998 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4999 false, GSI_CONTINUE_LINKING
);
5001 /* The code controlling the sequential loop replaces the
5002 GIMPLE_OMP_CONTINUE. */
5003 gsi
= gsi_last_bb (cont_bb
);
5004 stmt
= gsi_stmt (gsi
);
5005 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
5006 vmain
= gimple_omp_continue_control_use (stmt
);
5007 vback
= gimple_omp_continue_control_def (stmt
);
5009 if (POINTER_TYPE_P (type
))
5010 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
5012 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
5013 t
= force_gimple_operand_gsi (&gsi
, t
,
5014 DECL_P (vback
) && TREE_ADDRESSABLE (vback
),
5015 NULL_TREE
, true, GSI_SAME_STMT
);
5016 stmt
= gimple_build_assign (vback
, t
);
5017 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
5019 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
5020 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
, e
);
5021 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
5023 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5024 gsi_remove (&gsi
, true);
5026 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5027 gsi
= gsi_last_bb (exit_bb
);
5028 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
5029 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
5030 false, GSI_SAME_STMT
);
5031 gsi_remove (&gsi
, true);
5033 /* Connect all the blocks. */
5034 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
5035 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
5036 ep
= find_edge (entry_bb
, second_bb
);
5037 ep
->flags
= EDGE_TRUE_VALUE
;
5038 ep
->probability
= REG_BR_PROB_BASE
/ 4;
5039 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
5040 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
5042 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
5043 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
5045 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
5046 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
5047 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
5048 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
5049 recompute_dominator (CDI_DOMINATORS
, body_bb
));
5050 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
5051 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
5053 struct loop
*loop
= alloc_loop ();
5054 loop
->header
= body_bb
;
5055 loop
->latch
= cont_bb
;
5056 add_loop (loop
, body_bb
->loop_father
);
5060 /* A subroutine of expand_omp_for. Generate code for a parallel
5061 loop with static schedule and a specified chunk size. Given
5064 for (V = N1; V cond N2; V += STEP) BODY;
5066 where COND is "<" or ">", we generate pseudocode
5068 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5073 if ((__typeof (V)) -1 > 0 && cond is >)
5074 n = -(adj + N2 - N1) / -STEP;
5076 n = (adj + N2 - N1) / STEP;
5078 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
5079 here so that V is defined
5080 if the loop is not entered
5082 s0 = (trip * nthreads + threadid) * CHUNK;
5083 e0 = min(s0 + CHUNK, n);
5084 if (s0 < n) goto L1; else goto L4;
5091 if (V cond e) goto L2; else goto L3;
5099 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
5101 tree n
, s0
, e0
, e
, t
;
5102 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
5103 tree type
, itype
, v_main
, v_back
, v_extra
;
5104 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
5105 basic_block trip_update_bb
, cont_bb
, fin_bb
;
5106 gimple_stmt_iterator si
;
5110 itype
= type
= TREE_TYPE (fd
->loop
.v
);
5111 if (POINTER_TYPE_P (type
))
5112 itype
= signed_type_for (type
);
5114 entry_bb
= region
->entry
;
5115 se
= split_block (entry_bb
, last_stmt (entry_bb
));
5117 iter_part_bb
= se
->dest
;
5118 cont_bb
= region
->cont
;
5119 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
5120 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
5121 == FALLTHRU_EDGE (cont_bb
)->dest
);
5122 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
5123 body_bb
= single_succ (seq_start_bb
);
5124 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
5125 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
5126 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
5127 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
5128 exit_bb
= region
->exit
;
5130 /* Trip and adjustment setup goes in ENTRY_BB. */
5131 si
= gsi_last_bb (entry_bb
);
5132 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
5134 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
5135 fold_convert (type
, fd
->loop
.n1
),
5136 fold_convert (type
, fd
->loop
.n2
));
5137 if (TYPE_UNSIGNED (type
)
5138 && (t
== NULL_TREE
|| !integer_onep (t
)))
5141 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
5142 n1
= force_gimple_operand_gsi (&si
, n1
, true, NULL_TREE
,
5143 true, GSI_SAME_STMT
);
5144 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
5145 n2
= force_gimple_operand_gsi (&si
, n2
, true, NULL_TREE
,
5146 true, GSI_SAME_STMT
);
5147 stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
5148 NULL_TREE
, NULL_TREE
);
5149 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5150 if (walk_tree (gimple_cond_lhs_ptr (stmt
),
5151 expand_omp_regimplify_p
, NULL
, NULL
)
5152 || walk_tree (gimple_cond_rhs_ptr (stmt
),
5153 expand_omp_regimplify_p
, NULL
, NULL
))
5155 si
= gsi_for_stmt (stmt
);
5156 gimple_regimplify_operands (stmt
, &si
);
5158 se
= split_block (entry_bb
, stmt
);
5159 se
->flags
= EDGE_TRUE_VALUE
;
5160 entry_bb
= se
->dest
;
5161 se
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
5162 se
= make_edge (se
->src
, fin_bb
, EDGE_FALSE_VALUE
);
5163 se
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
5164 if (gimple_in_ssa_p (cfun
))
5166 int dest_idx
= find_edge (entry_bb
, fin_bb
)->dest_idx
;
5167 for (si
= gsi_start_phis (fin_bb
);
5168 !gsi_end_p (si
); gsi_next (&si
))
5170 gimple phi
= gsi_stmt (si
);
5171 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
5172 se
, UNKNOWN_LOCATION
);
5175 si
= gsi_last_bb (entry_bb
);
5178 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
5179 t
= fold_convert (itype
, t
);
5180 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5181 true, GSI_SAME_STMT
);
5183 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
5184 t
= fold_convert (itype
, t
);
5185 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5186 true, GSI_SAME_STMT
);
5189 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
5190 true, NULL_TREE
, true, GSI_SAME_STMT
);
5192 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
5193 true, NULL_TREE
, true, GSI_SAME_STMT
);
5195 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
5196 true, NULL_TREE
, true, GSI_SAME_STMT
);
5198 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
5199 true, NULL_TREE
, true, GSI_SAME_STMT
);
5201 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
5202 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
5203 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
5204 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
5205 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
5206 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
5207 fold_build1 (NEGATE_EXPR
, itype
, t
),
5208 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
5210 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
5211 t
= fold_convert (itype
, t
);
5212 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5213 true, GSI_SAME_STMT
);
5215 trip_var
= create_tmp_reg (itype
, ".trip");
5216 if (gimple_in_ssa_p (cfun
))
5218 trip_init
= make_ssa_name (trip_var
, NULL
);
5219 trip_main
= make_ssa_name (trip_var
, NULL
);
5220 trip_back
= make_ssa_name (trip_var
, NULL
);
5224 trip_init
= trip_var
;
5225 trip_main
= trip_var
;
5226 trip_back
= trip_var
;
5229 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
5230 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5232 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
5233 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
5234 if (POINTER_TYPE_P (type
))
5235 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
5237 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
5238 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5239 true, GSI_SAME_STMT
);
5241 /* Remove the GIMPLE_OMP_FOR. */
5242 gsi_remove (&si
, true);
5244 /* Iteration space partitioning goes in ITER_PART_BB. */
5245 si
= gsi_last_bb (iter_part_bb
);
5247 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
5248 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
5249 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
5250 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5251 false, GSI_CONTINUE_LINKING
);
5253 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
5254 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
5255 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5256 false, GSI_CONTINUE_LINKING
);
5258 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
5259 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
5261 /* Setup code for sequential iteration goes in SEQ_START_BB. */
5262 si
= gsi_start_bb (seq_start_bb
);
5264 t
= fold_convert (itype
, s0
);
5265 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
5266 if (POINTER_TYPE_P (type
))
5267 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
5269 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
5270 t
= force_gimple_operand_gsi (&si
, t
,
5272 && TREE_ADDRESSABLE (fd
->loop
.v
),
5273 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
5274 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
5275 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
5277 t
= fold_convert (itype
, e0
);
5278 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
5279 if (POINTER_TYPE_P (type
))
5280 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
5282 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
5283 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5284 false, GSI_CONTINUE_LINKING
);
5286 /* The code controlling the sequential loop goes in CONT_BB,
5287 replacing the GIMPLE_OMP_CONTINUE. */
5288 si
= gsi_last_bb (cont_bb
);
5289 stmt
= gsi_stmt (si
);
5290 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
5291 v_main
= gimple_omp_continue_control_use (stmt
);
5292 v_back
= gimple_omp_continue_control_def (stmt
);
5294 if (POINTER_TYPE_P (type
))
5295 t
= fold_build_pointer_plus (v_main
, fd
->loop
.step
);
5297 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
5298 if (DECL_P (v_back
) && TREE_ADDRESSABLE (v_back
))
5299 t
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
5300 true, GSI_SAME_STMT
);
5301 stmt
= gimple_build_assign (v_back
, t
);
5302 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5304 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
5305 DECL_P (v_back
) && TREE_ADDRESSABLE (v_back
)
5307 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
5309 /* Remove GIMPLE_OMP_CONTINUE. */
5310 gsi_remove (&si
, true);
5312 /* Trip update code goes into TRIP_UPDATE_BB. */
5313 si
= gsi_start_bb (trip_update_bb
);
5315 t
= build_int_cst (itype
, 1);
5316 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
5317 stmt
= gimple_build_assign (trip_back
, t
);
5318 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
5320 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5321 si
= gsi_last_bb (exit_bb
);
5322 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
5323 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
5324 false, GSI_SAME_STMT
);
5325 gsi_remove (&si
, true);
5327 /* Connect the new blocks. */
5328 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
5329 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
5331 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
5332 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
5334 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
5336 if (gimple_in_ssa_p (cfun
))
5338 gimple_stmt_iterator psi
;
5341 edge_var_map_vector
*head
;
5345 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
5346 remove arguments of the phi nodes in fin_bb. We need to create
5347 appropriate phi nodes in iter_part_bb instead. */
5348 se
= single_pred_edge (fin_bb
);
5349 re
= single_succ_edge (trip_update_bb
);
5350 head
= redirect_edge_var_map_vector (re
);
5351 ene
= single_succ_edge (entry_bb
);
5353 psi
= gsi_start_phis (fin_bb
);
5354 for (i
= 0; !gsi_end_p (psi
) && head
->iterate (i
, &vm
);
5355 gsi_next (&psi
), ++i
)
5358 source_location locus
;
5360 phi
= gsi_stmt (psi
);
5361 t
= gimple_phi_result (phi
);
5362 gcc_assert (t
== redirect_edge_var_map_result (vm
));
5363 nphi
= create_phi_node (t
, iter_part_bb
);
5365 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
5366 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
5368 /* A special case -- fd->loop.v is not yet computed in
5369 iter_part_bb, we need to use v_extra instead. */
5370 if (t
== fd
->loop
.v
)
5372 add_phi_arg (nphi
, t
, ene
, locus
);
5373 locus
= redirect_edge_var_map_location (vm
);
5374 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
5376 gcc_assert (!gsi_end_p (psi
) && i
== head
->length ());
5377 redirect_edge_var_map_clear (re
);
5380 psi
= gsi_start_phis (fin_bb
);
5381 if (gsi_end_p (psi
))
5383 remove_phi_node (&psi
, false);
5386 /* Make phi node for trip. */
5387 phi
= create_phi_node (trip_main
, iter_part_bb
);
5388 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
5390 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
5394 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
5395 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
5396 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
5397 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
5398 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
5399 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
5400 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
5401 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
5402 recompute_dominator (CDI_DOMINATORS
, body_bb
));
5404 struct loop
*trip_loop
= alloc_loop ();
5405 trip_loop
->header
= iter_part_bb
;
5406 trip_loop
->latch
= trip_update_bb
;
5407 add_loop (trip_loop
, iter_part_bb
->loop_father
);
5409 struct loop
*loop
= alloc_loop ();
5410 loop
->header
= body_bb
;
5411 loop
->latch
= cont_bb
;
5412 add_loop (loop
, trip_loop
);
5415 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
5416 loop. Given parameters:
5418 for (V = N1; V cond N2; V += STEP) BODY;
5420 where COND is "<" or ">", we generate pseudocode
5428 if (V cond N2) goto L0; else goto L2;
5431 For collapsed loops, given parameters:
5433 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5434 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5435 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5438 we generate pseudocode
5444 count3 = (adj + N32 - N31) / STEP3;
5449 count2 = (adj + N22 - N21) / STEP2;
5454 count1 = (adj + N12 - N11) / STEP1;
5455 count = count1 * count2 * count3;
5465 V2 += (V3 cond3 N32) ? 0 : STEP2;
5466 V3 = (V3 cond3 N32) ? V3 : N31;
5467 V1 += (V2 cond2 N22) ? 0 : STEP1;
5468 V2 = (V2 cond2 N22) ? V2 : N21;
5470 if (V < count) goto L0; else goto L2;
5476 expand_omp_simd (struct omp_region
*region
, struct omp_for_data
*fd
)
5479 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, l2_bb
, l2_dom_bb
;
5480 gimple_stmt_iterator gsi
;
5482 bool broken_loop
= region
->cont
== NULL
;
5484 tree
*counts
= NULL
;
5486 tree safelen
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
5487 OMP_CLAUSE_SAFELEN
);
5488 tree simduid
= find_omp_clause (gimple_omp_for_clauses (fd
->for_stmt
),
5489 OMP_CLAUSE__SIMDUID_
);
5492 type
= TREE_TYPE (fd
->loop
.v
);
5493 entry_bb
= region
->entry
;
5494 cont_bb
= region
->cont
;
5495 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
5496 gcc_assert (broken_loop
5497 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
5498 l0_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
5501 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l0_bb
);
5502 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
5503 l1_bb
= split_block (cont_bb
, last_stmt (cont_bb
))->dest
;
5504 l2_bb
= BRANCH_EDGE (entry_bb
)->dest
;
5508 BRANCH_EDGE (entry_bb
)->flags
&= ~EDGE_ABNORMAL
;
5509 l1_bb
= split_edge (BRANCH_EDGE (entry_bb
));
5510 l2_bb
= single_succ (l1_bb
);
5512 exit_bb
= region
->exit
;
5515 gsi
= gsi_last_bb (entry_bb
);
5517 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
5518 /* Not needed in SSA form right now. */
5519 gcc_assert (!gimple_in_ssa_p (cfun
));
5520 if (fd
->collapse
> 1)
5522 int first_zero_iter
= -1;
5523 basic_block zero_iter_bb
= l2_bb
;
5525 counts
= XALLOCAVEC (tree
, fd
->collapse
);
5526 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
5527 zero_iter_bb
, first_zero_iter
,
5530 if (l2_dom_bb
== NULL
)
5535 /* Place holder for gimple_omp_for_combined_into_p() in
5536 the upcoming gomp-4_0-branch merge. */;
5539 expand_omp_build_assign (&gsi
, fd
->loop
.v
,
5540 fold_convert (type
, fd
->loop
.n1
));
5541 if (fd
->collapse
> 1)
5542 for (i
= 0; i
< fd
->collapse
; i
++)
5544 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
5545 if (POINTER_TYPE_P (itype
))
5546 itype
= signed_type_for (itype
);
5547 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
), fd
->loops
[i
].n1
);
5548 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
5552 /* Remove the GIMPLE_OMP_FOR statement. */
5553 gsi_remove (&gsi
, true);
5557 /* Code to control the increment goes in the CONT_BB. */
5558 gsi
= gsi_last_bb (cont_bb
);
5559 stmt
= gsi_stmt (gsi
);
5560 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
5562 if (POINTER_TYPE_P (type
))
5563 t
= fold_build_pointer_plus (fd
->loop
.v
, fd
->loop
.step
);
5565 t
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.v
, fd
->loop
.step
);
5566 expand_omp_build_assign (&gsi
, fd
->loop
.v
, t
);
5568 if (fd
->collapse
> 1)
5570 i
= fd
->collapse
- 1;
5571 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
].v
)))
5573 t
= fold_convert (sizetype
, fd
->loops
[i
].step
);
5574 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, t
);
5578 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
),
5580 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
5583 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
5585 for (i
= fd
->collapse
- 1; i
> 0; i
--)
5587 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
5588 tree itype2
= TREE_TYPE (fd
->loops
[i
- 1].v
);
5589 if (POINTER_TYPE_P (itype2
))
5590 itype2
= signed_type_for (itype2
);
5591 t
= build3 (COND_EXPR
, itype2
,
5592 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
5594 fold_convert (itype
, fd
->loops
[i
].n2
)),
5595 build_int_cst (itype2
, 0),
5596 fold_convert (itype2
, fd
->loops
[i
- 1].step
));
5597 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
- 1].v
)))
5598 t
= fold_build_pointer_plus (fd
->loops
[i
- 1].v
, t
);
5600 t
= fold_build2 (PLUS_EXPR
, itype2
, fd
->loops
[i
- 1].v
, t
);
5601 expand_omp_build_assign (&gsi
, fd
->loops
[i
- 1].v
, t
);
5603 t
= build3 (COND_EXPR
, itype
,
5604 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
5606 fold_convert (itype
, fd
->loops
[i
].n2
)),
5608 fold_convert (itype
, fd
->loops
[i
].n1
));
5609 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
5613 /* Remove GIMPLE_OMP_CONTINUE. */
5614 gsi_remove (&gsi
, true);
5617 /* Emit the condition in L1_BB. */
5618 gsi
= gsi_start_bb (l1_bb
);
5620 t
= fold_convert (type
, n2
);
5621 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5622 false, GSI_CONTINUE_LINKING
);
5623 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, fd
->loop
.v
, t
);
5624 stmt
= gimple_build_cond_empty (t
);
5625 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
5626 if (walk_tree (gimple_cond_lhs_ptr (stmt
), expand_omp_regimplify_p
,
5628 || walk_tree (gimple_cond_rhs_ptr (stmt
), expand_omp_regimplify_p
,
5631 gsi
= gsi_for_stmt (stmt
);
5632 gimple_regimplify_operands (stmt
, &gsi
);
5635 /* Remove GIMPLE_OMP_RETURN. */
5636 gsi
= gsi_last_bb (exit_bb
);
5637 gsi_remove (&gsi
, true);
5639 /* Connect the new blocks. */
5640 remove_edge (FALLTHRU_EDGE (entry_bb
));
5644 remove_edge (BRANCH_EDGE (entry_bb
));
5645 make_edge (entry_bb
, l1_bb
, EDGE_FALLTHRU
);
5647 e
= BRANCH_EDGE (l1_bb
);
5648 ne
= FALLTHRU_EDGE (l1_bb
);
5649 e
->flags
= EDGE_TRUE_VALUE
;
5653 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
5655 ne
= single_succ_edge (l1_bb
);
5656 e
= make_edge (l1_bb
, l0_bb
, EDGE_TRUE_VALUE
);
5659 ne
->flags
= EDGE_FALSE_VALUE
;
5660 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
5661 ne
->probability
= REG_BR_PROB_BASE
/ 8;
5663 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
, entry_bb
);
5664 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
, l2_dom_bb
);
5665 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
, l1_bb
);
5669 struct loop
*loop
= alloc_loop ();
5670 loop
->header
= l1_bb
;
5671 loop
->latch
= e
->dest
;
5672 add_loop (loop
, l1_bb
->loop_father
);
5673 if (safelen
== NULL_TREE
)
5674 loop
->safelen
= INT_MAX
;
5677 safelen
= OMP_CLAUSE_SAFELEN_EXPR (safelen
);
5678 if (!host_integerp (safelen
, 1)
5679 || (unsigned HOST_WIDE_INT
) tree_low_cst (safelen
, 1)
5681 loop
->safelen
= INT_MAX
;
5683 loop
->safelen
= tree_low_cst (safelen
, 1);
5684 if (loop
->safelen
== 1)
5689 loop
->simduid
= OMP_CLAUSE__SIMDUID__DECL (simduid
);
5690 cfun
->has_simduid_loops
= true;
5692 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
5694 if ((flag_tree_loop_vectorize
5695 || (!global_options_set
.x_flag_tree_loop_vectorize
5696 && !global_options_set
.x_flag_tree_vectorize
))
5697 && loop
->safelen
> 1)
5699 loop
->force_vect
= true;
5700 cfun
->has_force_vect_loops
= true;
5706 /* Expand the OpenMP loop defined by REGION. */
5709 expand_omp_for (struct omp_region
*region
)
5711 struct omp_for_data fd
;
5712 struct omp_for_data_loop
*loops
;
5715 = (struct omp_for_data_loop
*)
5716 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
5717 * sizeof (struct omp_for_data_loop
));
5718 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
5719 region
->sched_kind
= fd
.sched_kind
;
5721 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
5722 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
5723 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
5726 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
5727 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
5728 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
5731 /* If there isn't a continue then this is a degerate case where
5732 the introduction of abnormal edges during lowering will prevent
5733 original loops from being detected. Fix that up. */
5734 loops_state_set (LOOPS_NEED_FIXUP
);
5736 if (gimple_omp_for_kind (fd
.for_stmt
) == GF_OMP_FOR_KIND_SIMD
)
5737 expand_omp_simd (region
, &fd
);
5738 else if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
5741 && region
->cont
!= NULL
)
5743 if (fd
.chunk_size
== NULL
)
5744 expand_omp_for_static_nochunk (region
, &fd
);
5746 expand_omp_for_static_chunk (region
, &fd
);
5750 int fn_index
, start_ix
, next_ix
;
5752 gcc_assert (gimple_omp_for_kind (fd
.for_stmt
)
5753 == GF_OMP_FOR_KIND_FOR
);
5754 if (fd
.chunk_size
== NULL
5755 && fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
5756 fd
.chunk_size
= integer_zero_node
;
5757 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
5758 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
5759 ? 3 : fd
.sched_kind
;
5760 fn_index
+= fd
.have_ordered
* 4;
5761 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
5762 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
5763 if (fd
.iter_type
== long_long_unsigned_type_node
)
5765 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5766 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
5767 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5768 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
5770 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
5771 (enum built_in_function
) next_ix
);
5774 if (gimple_in_ssa_p (cfun
))
5775 update_ssa (TODO_update_ssa_only_virtuals
);
5779 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
5781 v = GOMP_sections_start (n);
5798 v = GOMP_sections_next ();
5803 If this is a combined parallel sections, replace the call to
5804 GOMP_sections_start with call to GOMP_sections_next. */
5807 expand_omp_sections (struct omp_region
*region
)
5809 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
5810 vec
<tree
> label_vec
;
5812 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
5813 gimple_stmt_iterator si
, switch_si
;
5814 gimple sections_stmt
, stmt
, cont
;
5817 struct omp_region
*inner
;
5819 bool exit_reachable
= region
->cont
!= NULL
;
5821 gcc_assert (region
->exit
!= NULL
);
5822 entry_bb
= region
->entry
;
5823 l0_bb
= single_succ (entry_bb
);
5824 l1_bb
= region
->cont
;
5825 l2_bb
= region
->exit
;
5826 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
5827 l2
= gimple_block_label (l2_bb
);
5830 /* This can happen if there are reductions. */
5831 len
= EDGE_COUNT (l0_bb
->succs
);
5832 gcc_assert (len
> 0);
5833 e
= EDGE_SUCC (l0_bb
, len
- 1);
5834 si
= gsi_last_bb (e
->dest
);
5837 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
5838 l2
= gimple_block_label (e
->dest
);
5840 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
5842 si
= gsi_last_bb (e
->dest
);
5844 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
5846 l2
= gimple_block_label (e
->dest
);
5852 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
5854 default_bb
= create_empty_bb (l0_bb
);
5856 /* We will build a switch() with enough cases for all the
5857 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5858 and a default case to abort if something goes wrong. */
5859 len
= EDGE_COUNT (l0_bb
->succs
);
5861 /* Use vec::quick_push on label_vec throughout, since we know the size
5863 label_vec
.create (len
);
5865 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5866 GIMPLE_OMP_SECTIONS statement. */
5867 si
= gsi_last_bb (entry_bb
);
5868 sections_stmt
= gsi_stmt (si
);
5869 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
5870 vin
= gimple_omp_sections_control (sections_stmt
);
5871 if (!is_combined_parallel (region
))
5873 /* If we are not inside a combined parallel+sections region,
5874 call GOMP_sections_start. */
5875 t
= build_int_cst (unsigned_type_node
, len
- 1);
5876 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
5877 stmt
= gimple_build_call (u
, 1, t
);
5881 /* Otherwise, call GOMP_sections_next. */
5882 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
5883 stmt
= gimple_build_call (u
, 0);
5885 gimple_call_set_lhs (stmt
, vin
);
5886 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
5887 gsi_remove (&si
, true);
5889 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5891 switch_si
= gsi_last_bb (l0_bb
);
5892 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
5895 cont
= last_stmt (l1_bb
);
5896 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
5897 vmain
= gimple_omp_continue_control_use (cont
);
5898 vnext
= gimple_omp_continue_control_def (cont
);
5906 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
5907 label_vec
.quick_push (t
);
5910 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
5911 for (inner
= region
->inner
, casei
= 1;
5913 inner
= inner
->next
, i
++, casei
++)
5915 basic_block s_entry_bb
, s_exit_bb
;
5917 /* Skip optional reduction region. */
5918 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
5925 s_entry_bb
= inner
->entry
;
5926 s_exit_bb
= inner
->exit
;
5928 t
= gimple_block_label (s_entry_bb
);
5929 u
= build_int_cst (unsigned_type_node
, casei
);
5930 u
= build_case_label (u
, NULL
, t
);
5931 label_vec
.quick_push (u
);
5933 si
= gsi_last_bb (s_entry_bb
);
5934 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
5935 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
5936 gsi_remove (&si
, true);
5937 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
5939 if (s_exit_bb
== NULL
)
5942 si
= gsi_last_bb (s_exit_bb
);
5943 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
5944 gsi_remove (&si
, true);
5946 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
5949 /* Error handling code goes in DEFAULT_BB. */
5950 t
= gimple_block_label (default_bb
);
5951 u
= build_case_label (NULL
, NULL
, t
);
5952 make_edge (l0_bb
, default_bb
, 0);
5954 add_bb_to_loop (default_bb
, current_loops
->tree_root
);
5956 stmt
= gimple_build_switch (vmain
, u
, label_vec
);
5957 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
5958 gsi_remove (&switch_si
, true);
5959 label_vec
.release ();
5961 si
= gsi_start_bb (default_bb
);
5962 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
5963 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
5969 /* Code to get the next section goes in L1_BB. */
5970 si
= gsi_last_bb (l1_bb
);
5971 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
5973 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
5974 stmt
= gimple_build_call (bfn_decl
, 0);
5975 gimple_call_set_lhs (stmt
, vnext
);
5976 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
5977 gsi_remove (&si
, true);
5979 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
5982 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5983 si
= gsi_last_bb (l2_bb
);
5984 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
5985 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
5987 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
5988 stmt
= gimple_build_call (t
, 0);
5989 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
5990 gsi_remove (&si
, true);
5992 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
5996 /* Expand code for an OpenMP single directive. We've already expanded
5997 much of the code, here we simply place the GOMP_barrier call. */
6000 expand_omp_single (struct omp_region
*region
)
6002 basic_block entry_bb
, exit_bb
;
6003 gimple_stmt_iterator si
;
6004 bool need_barrier
= false;
6006 entry_bb
= region
->entry
;
6007 exit_bb
= region
->exit
;
6009 si
= gsi_last_bb (entry_bb
);
6010 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
6011 be removed. We need to ensure that the thread that entered the single
6012 does not exit before the data is copied out by the other threads. */
6013 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
6014 OMP_CLAUSE_COPYPRIVATE
))
6015 need_barrier
= true;
6016 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
6017 gsi_remove (&si
, true);
6018 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
6020 si
= gsi_last_bb (exit_bb
);
6021 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
6022 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
6023 false, GSI_SAME_STMT
);
6024 gsi_remove (&si
, true);
6025 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
6029 /* Generic expansion for OpenMP synchronization directives: master,
6030 ordered and critical. All we need to do here is remove the entry
6031 and exit markers for REGION. */
6034 expand_omp_synch (struct omp_region
*region
)
6036 basic_block entry_bb
, exit_bb
;
6037 gimple_stmt_iterator si
;
6039 entry_bb
= region
->entry
;
6040 exit_bb
= region
->exit
;
6042 si
= gsi_last_bb (entry_bb
);
6043 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
6044 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
6045 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
6046 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
6047 gsi_remove (&si
, true);
6048 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
6052 si
= gsi_last_bb (exit_bb
);
6053 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
6054 gsi_remove (&si
, true);
6055 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
6059 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6060 operation as a normal volatile load. */
6063 expand_omp_atomic_load (basic_block load_bb
, tree addr
,
6064 tree loaded_val
, int index
)
6066 enum built_in_function tmpbase
;
6067 gimple_stmt_iterator gsi
;
6068 basic_block store_bb
;
6071 tree decl
, call
, type
, itype
;
6073 gsi
= gsi_last_bb (load_bb
);
6074 stmt
= gsi_stmt (gsi
);
6075 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
6076 loc
= gimple_location (stmt
);
6078 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6079 is smaller than word size, then expand_atomic_load assumes that the load
6080 is atomic. We could avoid the builtin entirely in this case. */
6082 tmpbase
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
6083 decl
= builtin_decl_explicit (tmpbase
);
6084 if (decl
== NULL_TREE
)
6087 type
= TREE_TYPE (loaded_val
);
6088 itype
= TREE_TYPE (TREE_TYPE (decl
));
6090 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
6091 build_int_cst (NULL
, MEMMODEL_RELAXED
));
6092 if (!useless_type_conversion_p (type
, itype
))
6093 call
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
6094 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
6096 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6097 gsi_remove (&gsi
, true);
6099 store_bb
= single_succ (load_bb
);
6100 gsi
= gsi_last_bb (store_bb
);
6101 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
6102 gsi_remove (&gsi
, true);
6104 if (gimple_in_ssa_p (cfun
))
6105 update_ssa (TODO_update_ssa_no_phi
);
6110 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6111 operation as a normal volatile store. */
6114 expand_omp_atomic_store (basic_block load_bb
, tree addr
,
6115 tree loaded_val
, tree stored_val
, int index
)
6117 enum built_in_function tmpbase
;
6118 gimple_stmt_iterator gsi
;
6119 basic_block store_bb
= single_succ (load_bb
);
6122 tree decl
, call
, type
, itype
;
6123 enum machine_mode imode
;
6126 gsi
= gsi_last_bb (load_bb
);
6127 stmt
= gsi_stmt (gsi
);
6128 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
6130 /* If the load value is needed, then this isn't a store but an exchange. */
6131 exchange
= gimple_omp_atomic_need_value_p (stmt
);
6133 gsi
= gsi_last_bb (store_bb
);
6134 stmt
= gsi_stmt (gsi
);
6135 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_STORE
);
6136 loc
= gimple_location (stmt
);
6138 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6139 is smaller than word size, then expand_atomic_store assumes that the store
6140 is atomic. We could avoid the builtin entirely in this case. */
6142 tmpbase
= (exchange
? BUILT_IN_ATOMIC_EXCHANGE_N
: BUILT_IN_ATOMIC_STORE_N
);
6143 tmpbase
= (enum built_in_function
) ((int) tmpbase
+ index
+ 1);
6144 decl
= builtin_decl_explicit (tmpbase
);
6145 if (decl
== NULL_TREE
)
6148 type
= TREE_TYPE (stored_val
);
6150 /* Dig out the type of the function's second argument. */
6151 itype
= TREE_TYPE (decl
);
6152 itype
= TYPE_ARG_TYPES (itype
);
6153 itype
= TREE_CHAIN (itype
);
6154 itype
= TREE_VALUE (itype
);
6155 imode
= TYPE_MODE (itype
);
6157 if (exchange
&& !can_atomic_exchange_p (imode
, true))
6160 if (!useless_type_conversion_p (itype
, type
))
6161 stored_val
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, itype
, stored_val
);
6162 call
= build_call_expr_loc (loc
, decl
, 3, addr
, stored_val
,
6163 build_int_cst (NULL
, MEMMODEL_RELAXED
));
6166 if (!useless_type_conversion_p (type
, itype
))
6167 call
= build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
6168 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
6171 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6172 gsi_remove (&gsi
, true);
6174 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6175 gsi
= gsi_last_bb (load_bb
);
6176 gsi_remove (&gsi
, true);
6178 if (gimple_in_ssa_p (cfun
))
6179 update_ssa (TODO_update_ssa_no_phi
);
6184 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6185 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
6186 size of the data type, and thus usable to find the index of the builtin
6187 decl. Returns false if the expression is not of the proper form. */
6190 expand_omp_atomic_fetch_op (basic_block load_bb
,
6191 tree addr
, tree loaded_val
,
6192 tree stored_val
, int index
)
6194 enum built_in_function oldbase
, newbase
, tmpbase
;
6195 tree decl
, itype
, call
;
6197 basic_block store_bb
= single_succ (load_bb
);
6198 gimple_stmt_iterator gsi
;
6201 enum tree_code code
;
6202 bool need_old
, need_new
;
6203 enum machine_mode imode
;
6205 /* We expect to find the following sequences:
6208 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
6211 val = tmp OP something; (or: something OP tmp)
6212 GIMPLE_OMP_STORE (val)
6214 ???FIXME: Allow a more flexible sequence.
6215 Perhaps use data flow to pick the statements.
6219 gsi
= gsi_after_labels (store_bb
);
6220 stmt
= gsi_stmt (gsi
);
6221 loc
= gimple_location (stmt
);
6222 if (!is_gimple_assign (stmt
))
6225 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
6227 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
6228 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
6229 gcc_checking_assert (!need_old
|| !need_new
);
6231 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
6234 /* Check for one of the supported fetch-op operations. */
6235 code
= gimple_assign_rhs_code (stmt
);
6239 case POINTER_PLUS_EXPR
:
6240 oldbase
= BUILT_IN_ATOMIC_FETCH_ADD_N
;
6241 newbase
= BUILT_IN_ATOMIC_ADD_FETCH_N
;
6244 oldbase
= BUILT_IN_ATOMIC_FETCH_SUB_N
;
6245 newbase
= BUILT_IN_ATOMIC_SUB_FETCH_N
;
6248 oldbase
= BUILT_IN_ATOMIC_FETCH_AND_N
;
6249 newbase
= BUILT_IN_ATOMIC_AND_FETCH_N
;
6252 oldbase
= BUILT_IN_ATOMIC_FETCH_OR_N
;
6253 newbase
= BUILT_IN_ATOMIC_OR_FETCH_N
;
6256 oldbase
= BUILT_IN_ATOMIC_FETCH_XOR_N
;
6257 newbase
= BUILT_IN_ATOMIC_XOR_FETCH_N
;
6263 /* Make sure the expression is of the proper form. */
6264 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
6265 rhs
= gimple_assign_rhs2 (stmt
);
6266 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
6267 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
6268 rhs
= gimple_assign_rhs1 (stmt
);
6272 tmpbase
= ((enum built_in_function
)
6273 ((need_new
? newbase
: oldbase
) + index
+ 1));
6274 decl
= builtin_decl_explicit (tmpbase
);
6275 if (decl
== NULL_TREE
)
6277 itype
= TREE_TYPE (TREE_TYPE (decl
));
6278 imode
= TYPE_MODE (itype
);
6280 /* We could test all of the various optabs involved, but the fact of the
6281 matter is that (with the exception of i486 vs i586 and xadd) all targets
6282 that support any atomic operaton optab also implements compare-and-swap.
6283 Let optabs.c take care of expanding any compare-and-swap loop. */
6284 if (!can_compare_and_swap_p (imode
, true))
6287 gsi
= gsi_last_bb (load_bb
);
6288 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
6290 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6291 It only requires that the operation happen atomically. Thus we can
6292 use the RELAXED memory model. */
6293 call
= build_call_expr_loc (loc
, decl
, 3, addr
,
6294 fold_convert_loc (loc
, itype
, rhs
),
6295 build_int_cst (NULL
, MEMMODEL_RELAXED
));
6297 if (need_old
|| need_new
)
6299 lhs
= need_old
? loaded_val
: stored_val
;
6300 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
6301 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
6304 call
= fold_convert_loc (loc
, void_type_node
, call
);
6305 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6306 gsi_remove (&gsi
, true);
6308 gsi
= gsi_last_bb (store_bb
);
6309 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
6310 gsi_remove (&gsi
, true);
6311 gsi
= gsi_last_bb (store_bb
);
6312 gsi_remove (&gsi
, true);
6314 if (gimple_in_ssa_p (cfun
))
6315 update_ssa (TODO_update_ssa_no_phi
);
6320 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6324 newval = rhs; // with oldval replacing *addr in rhs
6325 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6326 if (oldval != newval)
6329 INDEX is log2 of the size of the data type, and thus usable to find the
6330 index of the builtin decl. */
6333 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
6334 tree addr
, tree loaded_val
, tree stored_val
,
6337 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
6338 tree type
, itype
, cmpxchg
, iaddr
;
6339 gimple_stmt_iterator si
;
6340 basic_block loop_header
= single_succ (load_bb
);
6343 enum built_in_function fncode
;
6345 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6346 order to use the RELAXED memory model effectively. */
6347 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6349 cmpxchg
= builtin_decl_explicit (fncode
);
6350 if (cmpxchg
== NULL_TREE
)
6352 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
6353 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
6355 if (!can_compare_and_swap_p (TYPE_MODE (itype
), true))
6358 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6359 si
= gsi_last_bb (load_bb
);
6360 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
6362 /* For floating-point values, we'll need to view-convert them to integers
6363 so that we can perform the atomic compare and swap. Simplify the
6364 following code by always setting up the "i"ntegral variables. */
6365 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
6369 iaddr
= create_tmp_reg (build_pointer_type_for_mode (itype
, ptr_mode
,
6372 = force_gimple_operand_gsi (&si
,
6373 fold_convert (TREE_TYPE (iaddr
), addr
),
6374 false, NULL_TREE
, true, GSI_SAME_STMT
);
6375 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
6376 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6377 loadedi
= create_tmp_var (itype
, NULL
);
6378 if (gimple_in_ssa_p (cfun
))
6379 loadedi
= make_ssa_name (loadedi
, NULL
);
6384 loadedi
= loaded_val
;
6388 = force_gimple_operand_gsi (&si
,
6389 build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)),
6391 build_int_cst (TREE_TYPE (iaddr
), 0)),
6392 true, NULL_TREE
, true, GSI_SAME_STMT
);
6394 /* Move the value to the LOADEDI temporary. */
6395 if (gimple_in_ssa_p (cfun
))
6397 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
6398 phi
= create_phi_node (loadedi
, loop_header
);
6399 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
6403 gsi_insert_before (&si
,
6404 gimple_build_assign (loadedi
, initial
),
6406 if (loadedi
!= loaded_val
)
6408 gimple_stmt_iterator gsi2
;
6411 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
6412 gsi2
= gsi_start_bb (loop_header
);
6413 if (gimple_in_ssa_p (cfun
))
6416 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
6417 true, GSI_SAME_STMT
);
6418 stmt
= gimple_build_assign (loaded_val
, x
);
6419 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
6423 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
6424 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
6425 true, GSI_SAME_STMT
);
6428 gsi_remove (&si
, true);
6430 si
= gsi_last_bb (store_bb
);
6431 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
6434 storedi
= stored_val
;
6437 force_gimple_operand_gsi (&si
,
6438 build1 (VIEW_CONVERT_EXPR
, itype
,
6439 stored_val
), true, NULL_TREE
, true,
6442 /* Build the compare&swap statement. */
6443 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
6444 new_storedi
= force_gimple_operand_gsi (&si
,
6445 fold_convert (TREE_TYPE (loadedi
),
6448 true, GSI_SAME_STMT
);
6450 if (gimple_in_ssa_p (cfun
))
6454 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
6455 stmt
= gimple_build_assign (old_vali
, loadedi
);
6456 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6458 stmt
= gimple_build_assign (loadedi
, new_storedi
);
6459 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6462 /* Note that we always perform the comparison as an integer, even for
6463 floating point. This allows the atomic operation to properly
6464 succeed even with NaNs and -0.0. */
6465 stmt
= gimple_build_cond_empty
6466 (build2 (NE_EXPR
, boolean_type_node
,
6467 new_storedi
, old_vali
));
6468 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6471 e
= single_succ_edge (store_bb
);
6472 e
->flags
&= ~EDGE_FALLTHRU
;
6473 e
->flags
|= EDGE_FALSE_VALUE
;
6475 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
6477 /* Copy the new value to loadedi (we already did that before the condition
6478 if we are not in SSA). */
6479 if (gimple_in_ssa_p (cfun
))
6481 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
6482 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
6485 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6486 gsi_remove (&si
, true);
6488 struct loop
*loop
= alloc_loop ();
6489 loop
->header
= loop_header
;
6490 loop
->latch
= store_bb
;
6491 add_loop (loop
, loop_header
->loop_father
);
6493 if (gimple_in_ssa_p (cfun
))
6494 update_ssa (TODO_update_ssa_no_phi
);
6499 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6501 GOMP_atomic_start ();
6505 The result is not globally atomic, but works so long as all parallel
6506 references are within #pragma omp atomic directives. According to
6507 responses received from omp@openmp.org, appears to be within spec.
6508 Which makes sense, since that's how several other compilers handle
6509 this situation as well.
6510 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6511 expanding. STORED_VAL is the operand of the matching
6512 GIMPLE_OMP_ATOMIC_STORE.
6515 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
6519 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
6524 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
6525 tree addr
, tree loaded_val
, tree stored_val
)
6527 gimple_stmt_iterator si
;
6531 si
= gsi_last_bb (load_bb
);
6532 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
6534 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
6535 t
= build_call_expr (t
, 0);
6536 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6538 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
6539 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6540 gsi_remove (&si
, true);
6542 si
= gsi_last_bb (store_bb
);
6543 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
6545 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
6547 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6549 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
6550 t
= build_call_expr (t
, 0);
6551 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6552 gsi_remove (&si
, true);
6554 if (gimple_in_ssa_p (cfun
))
6555 update_ssa (TODO_update_ssa_no_phi
);
6559 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6560 using expand_omp_atomic_fetch_op. If it failed, we try to
6561 call expand_omp_atomic_pipeline, and if it fails too, the
6562 ultimate fallback is wrapping the operation in a mutex
6563 (expand_omp_atomic_mutex). REGION is the atomic region built
6564 by build_omp_regions_1(). */
6567 expand_omp_atomic (struct omp_region
*region
)
6569 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
6570 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
6571 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
6572 tree addr
= gimple_omp_atomic_load_rhs (load
);
6573 tree stored_val
= gimple_omp_atomic_store_val (store
);
6574 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
6575 HOST_WIDE_INT index
;
6577 /* Make sure the type is one of the supported sizes. */
6578 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
6579 index
= exact_log2 (index
);
6580 if (index
>= 0 && index
<= 4)
6582 unsigned int align
= TYPE_ALIGN_UNIT (type
);
6584 /* __sync builtins require strict data alignment. */
6585 if (exact_log2 (align
) >= index
)
6588 if (loaded_val
== stored_val
6589 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
6590 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
6591 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
6592 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
, index
))
6596 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
6597 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
6598 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
6599 && store_bb
== single_succ (load_bb
)
6600 && first_stmt (store_bb
) == store
6601 && expand_omp_atomic_store (load_bb
, addr
, loaded_val
,
6605 /* When possible, use specialized atomic update functions. */
6606 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
6607 && store_bb
== single_succ (load_bb
)
6608 && expand_omp_atomic_fetch_op (load_bb
, addr
,
6609 loaded_val
, stored_val
, index
))
6612 /* If we don't have specialized __sync builtins, try and implement
6613 as a compare and swap loop. */
6614 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
6615 loaded_val
, stored_val
, index
))
6620 /* The ultimate fallback is wrapping the operation in a mutex. */
6621 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
6625 /* Expand the parallel region tree rooted at REGION. Expansion
6626 proceeds in depth-first order. Innermost regions are expanded
6627 first. This way, parallel regions that require a new function to
6628 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
6629 internal dependencies in their body. */
6632 expand_omp (struct omp_region
*region
)
6636 location_t saved_location
;
6638 /* First, determine whether this is a combined parallel+workshare
6640 if (region
->type
== GIMPLE_OMP_PARALLEL
)
6641 determine_parallel_type (region
);
6644 expand_omp (region
->inner
);
6646 saved_location
= input_location
;
6647 if (gimple_has_location (last_stmt (region
->entry
)))
6648 input_location
= gimple_location (last_stmt (region
->entry
));
6650 switch (region
->type
)
6652 case GIMPLE_OMP_PARALLEL
:
6653 case GIMPLE_OMP_TASK
:
6654 expand_omp_taskreg (region
);
6657 case GIMPLE_OMP_FOR
:
6658 expand_omp_for (region
);
6661 case GIMPLE_OMP_SECTIONS
:
6662 expand_omp_sections (region
);
6665 case GIMPLE_OMP_SECTION
:
6666 /* Individual omp sections are handled together with their
6667 parent GIMPLE_OMP_SECTIONS region. */
6670 case GIMPLE_OMP_SINGLE
:
6671 expand_omp_single (region
);
6674 case GIMPLE_OMP_MASTER
:
6675 case GIMPLE_OMP_ORDERED
:
6676 case GIMPLE_OMP_CRITICAL
:
6677 expand_omp_synch (region
);
6680 case GIMPLE_OMP_ATOMIC_LOAD
:
6681 expand_omp_atomic (region
);
6688 input_location
= saved_location
;
6689 region
= region
->next
;
6694 /* Helper for build_omp_regions. Scan the dominator tree starting at
6695 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
6696 true, the function ends once a single tree is built (otherwise, whole
6697 forest of OMP constructs may be built). */
6700 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
6703 gimple_stmt_iterator gsi
;
6707 gsi
= gsi_last_bb (bb
);
6708 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
6710 struct omp_region
*region
;
6711 enum gimple_code code
;
6713 stmt
= gsi_stmt (gsi
);
6714 code
= gimple_code (stmt
);
6715 if (code
== GIMPLE_OMP_RETURN
)
6717 /* STMT is the return point out of region PARENT. Mark it
6718 as the exit point and make PARENT the immediately
6719 enclosing region. */
6720 gcc_assert (parent
);
6723 parent
= parent
->outer
;
6725 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
6727 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
6728 GIMPLE_OMP_RETURN, but matches with
6729 GIMPLE_OMP_ATOMIC_LOAD. */
6730 gcc_assert (parent
);
6731 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
6734 parent
= parent
->outer
;
6737 else if (code
== GIMPLE_OMP_CONTINUE
)
6739 gcc_assert (parent
);
6742 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
6744 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
6745 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
6750 /* Otherwise, this directive becomes the parent for a new
6752 region
= new_omp_region (bb
, code
, parent
);
6757 if (single_tree
&& !parent
)
6760 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
6762 son
= next_dom_son (CDI_DOMINATORS
, son
))
6763 build_omp_regions_1 (son
, parent
, single_tree
);
6766 /* Builds the tree of OMP regions rooted at ROOT, storing it to
6770 build_omp_regions_root (basic_block root
)
6772 gcc_assert (root_omp_region
== NULL
);
6773 build_omp_regions_1 (root
, NULL
, true);
6774 gcc_assert (root_omp_region
!= NULL
);
6777 /* Expands omp construct (and its subconstructs) starting in HEAD. */
6780 omp_expand_local (basic_block head
)
6782 build_omp_regions_root (head
);
6783 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6785 fprintf (dump_file
, "\nOMP region tree\n\n");
6786 dump_omp_region (dump_file
, root_omp_region
, 0);
6787 fprintf (dump_file
, "\n");
6790 remove_exit_barriers (root_omp_region
);
6791 expand_omp (root_omp_region
);
6793 free_omp_regions ();
6796 /* Scan the CFG and build a tree of OMP regions. Return the root of
6797 the OMP region tree. */
6800 build_omp_regions (void)
6802 gcc_assert (root_omp_region
== NULL
);
6803 calculate_dominance_info (CDI_DOMINATORS
);
6804 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
6807 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
6810 execute_expand_omp (void)
6812 build_omp_regions ();
6814 if (!root_omp_region
)
6819 fprintf (dump_file
, "\nOMP region tree\n\n");
6820 dump_omp_region (dump_file
, root_omp_region
, 0);
6821 fprintf (dump_file
, "\n");
6824 remove_exit_barriers (root_omp_region
);
6826 expand_omp (root_omp_region
);
6828 cleanup_tree_cfg ();
6830 free_omp_regions ();
6835 /* OMP expansion -- the default pass, run before creation of SSA form. */
6838 gate_expand_omp (void)
6840 return (flag_openmp
!= 0 && !seen_error ());
6845 const pass_data pass_data_expand_omp
=
6847 GIMPLE_PASS
, /* type */
6848 "ompexp", /* name */
6849 OPTGROUP_NONE
, /* optinfo_flags */
6850 true, /* has_gate */
6851 true, /* has_execute */
6852 TV_NONE
, /* tv_id */
6853 PROP_gimple_any
, /* properties_required */
6854 0, /* properties_provided */
6855 0, /* properties_destroyed */
6856 0, /* todo_flags_start */
6857 0, /* todo_flags_finish */
6860 class pass_expand_omp
: public gimple_opt_pass
6863 pass_expand_omp(gcc::context
*ctxt
)
6864 : gimple_opt_pass(pass_data_expand_omp
, ctxt
)
6867 /* opt_pass methods: */
6868 bool gate () { return gate_expand_omp (); }
6869 unsigned int execute () { return execute_expand_omp (); }
6871 }; // class pass_expand_omp
6876 make_pass_expand_omp (gcc::context
*ctxt
)
6878 return new pass_expand_omp (ctxt
);
6881 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
6883 /* Lower the OpenMP sections directive in the current statement in GSI_P.
6884 CTX is the enclosing OMP context for the current statement. */
6887 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6889 tree block
, control
;
6890 gimple_stmt_iterator tgsi
;
6891 gimple stmt
, new_stmt
, bind
, t
;
6892 gimple_seq ilist
, dlist
, olist
, new_body
;
6893 struct gimplify_ctx gctx
;
6895 stmt
= gsi_stmt (*gsi_p
);
6897 push_gimplify_context (&gctx
);
6901 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
6902 &ilist
, &dlist
, ctx
);
6904 new_body
= gimple_omp_body (stmt
);
6905 gimple_omp_set_body (stmt
, NULL
);
6906 tgsi
= gsi_start (new_body
);
6907 for (; !gsi_end_p (tgsi
); gsi_next (&tgsi
))
6912 sec_start
= gsi_stmt (tgsi
);
6913 sctx
= maybe_lookup_ctx (sec_start
);
6916 lower_omp (gimple_omp_body_ptr (sec_start
), sctx
);
6917 gsi_insert_seq_after (&tgsi
, gimple_omp_body (sec_start
),
6918 GSI_CONTINUE_LINKING
);
6919 gimple_omp_set_body (sec_start
, NULL
);
6921 if (gsi_one_before_end_p (tgsi
))
6923 gimple_seq l
= NULL
;
6924 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
6926 gsi_insert_seq_after (&tgsi
, l
, GSI_CONTINUE_LINKING
);
6927 gimple_omp_section_set_last (sec_start
);
6930 gsi_insert_after (&tgsi
, gimple_build_omp_return (false),
6931 GSI_CONTINUE_LINKING
);
6934 block
= make_node (BLOCK
);
6935 bind
= gimple_build_bind (NULL
, new_body
, block
);
6938 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
6940 block
= make_node (BLOCK
);
6941 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6942 gsi_replace (gsi_p
, new_stmt
, true);
6944 pop_gimplify_context (new_stmt
);
6945 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6946 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6947 if (BLOCK_VARS (block
))
6948 TREE_USED (block
) = 1;
6951 gimple_seq_add_seq (&new_body
, ilist
);
6952 gimple_seq_add_stmt (&new_body
, stmt
);
6953 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
6954 gimple_seq_add_stmt (&new_body
, bind
);
6956 control
= create_tmp_var (unsigned_type_node
, ".section");
6957 t
= gimple_build_omp_continue (control
, control
);
6958 gimple_omp_sections_set_control (stmt
, control
);
6959 gimple_seq_add_stmt (&new_body
, t
);
6961 gimple_seq_add_seq (&new_body
, olist
);
6962 gimple_seq_add_seq (&new_body
, dlist
);
6964 new_body
= maybe_catch_exception (new_body
);
6966 t
= gimple_build_omp_return
6967 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
6968 OMP_CLAUSE_NOWAIT
));
6969 gimple_seq_add_stmt (&new_body
, t
);
6971 gimple_bind_set_body (new_stmt
, new_body
);
6975 /* A subroutine of lower_omp_single. Expand the simple form of
6976 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
6978 if (GOMP_single_start ())
6980 [ GOMP_barrier (); ] -> unless 'nowait' is present.
6982 FIXME. It may be better to delay expanding the logic of this until
6983 pass_expand_omp. The expanded logic may make the job more difficult
6984 to a synchronization analysis pass. */
6987 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
6989 location_t loc
= gimple_location (single_stmt
);
6990 tree tlabel
= create_artificial_label (loc
);
6991 tree flabel
= create_artificial_label (loc
);
6995 decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START
);
6996 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
6997 call
= gimple_build_call (decl
, 0);
6998 gimple_call_set_lhs (call
, lhs
);
6999 gimple_seq_add_stmt (pre_p
, call
);
7001 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
7002 fold_convert_loc (loc
, TREE_TYPE (lhs
),
7005 gimple_seq_add_stmt (pre_p
, cond
);
7006 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
7007 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
7008 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
7012 /* A subroutine of lower_omp_single. Expand the simple form of
7013 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
7015 #pragma omp single copyprivate (a, b, c)
7017 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
7020 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
7026 GOMP_single_copy_end (©out);
7037 FIXME. It may be better to delay expanding the logic of this until
7038 pass_expand_omp. The expanded logic may make the job more difficult
7039 to a synchronization analysis pass. */
7042 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
7044 tree ptr_type
, t
, l0
, l1
, l2
, bfn_decl
;
7045 gimple_seq copyin_seq
;
7046 location_t loc
= gimple_location (single_stmt
);
7048 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
7050 ptr_type
= build_pointer_type (ctx
->record_type
);
7051 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
7053 l0
= create_artificial_label (loc
);
7054 l1
= create_artificial_label (loc
);
7055 l2
= create_artificial_label (loc
);
7057 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START
);
7058 t
= build_call_expr_loc (loc
, bfn_decl
, 0);
7059 t
= fold_convert_loc (loc
, ptr_type
, t
);
7060 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
7062 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
7063 build_int_cst (ptr_type
, 0));
7064 t
= build3 (COND_EXPR
, void_type_node
, t
,
7065 build_and_jump (&l0
), build_and_jump (&l1
));
7066 gimplify_and_add (t
, pre_p
);
7068 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
7070 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
7073 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
7076 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
7077 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END
);
7078 t
= build_call_expr_loc (loc
, bfn_decl
, 1, t
);
7079 gimplify_and_add (t
, pre_p
);
7081 t
= build_and_jump (&l2
);
7082 gimplify_and_add (t
, pre_p
);
7084 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
7086 gimple_seq_add_seq (pre_p
, copyin_seq
);
7088 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
7092 /* Expand code for an OpenMP single directive. */
7095 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
7098 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
7099 gimple_seq bind_body
, dlist
;
7100 struct gimplify_ctx gctx
;
7102 push_gimplify_context (&gctx
);
7104 block
= make_node (BLOCK
);
7105 bind
= gimple_build_bind (NULL
, NULL
, block
);
7106 gsi_replace (gsi_p
, bind
, true);
7109 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
7110 &bind_body
, &dlist
, ctx
);
7111 lower_omp (gimple_omp_body_ptr (single_stmt
), ctx
);
7113 gimple_seq_add_stmt (&bind_body
, single_stmt
);
7115 if (ctx
->record_type
)
7116 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
7118 lower_omp_single_simple (single_stmt
, &bind_body
);
7120 gimple_omp_set_body (single_stmt
, NULL
);
7122 gimple_seq_add_seq (&bind_body
, dlist
);
7124 bind_body
= maybe_catch_exception (bind_body
);
7126 t
= gimple_build_omp_return
7127 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
7128 OMP_CLAUSE_NOWAIT
));
7129 gimple_seq_add_stmt (&bind_body
, t
);
7130 gimple_bind_set_body (bind
, bind_body
);
7132 pop_gimplify_context (bind
);
7134 gimple_bind_append_vars (bind
, ctx
->block_vars
);
7135 BLOCK_VARS (block
) = ctx
->block_vars
;
7136 if (BLOCK_VARS (block
))
7137 TREE_USED (block
) = 1;
7141 /* Expand code for an OpenMP master directive. */
7144 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
7146 tree block
, lab
= NULL
, x
, bfn_decl
;
7147 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
7148 location_t loc
= gimple_location (stmt
);
7150 struct gimplify_ctx gctx
;
7152 push_gimplify_context (&gctx
);
7154 block
= make_node (BLOCK
);
7155 bind
= gimple_build_bind (NULL
, NULL
, block
);
7156 gsi_replace (gsi_p
, bind
, true);
7157 gimple_bind_add_stmt (bind
, stmt
);
7159 bfn_decl
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
7160 x
= build_call_expr_loc (loc
, bfn_decl
, 0);
7161 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
7162 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
7164 gimplify_and_add (x
, &tseq
);
7165 gimple_bind_add_seq (bind
, tseq
);
7167 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
7168 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
7169 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
7170 gimple_omp_set_body (stmt
, NULL
);
7172 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
7174 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
7176 pop_gimplify_context (bind
);
7178 gimple_bind_append_vars (bind
, ctx
->block_vars
);
7179 BLOCK_VARS (block
) = ctx
->block_vars
;
7183 /* Expand code for an OpenMP ordered directive. */
7186 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
7189 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
7190 struct gimplify_ctx gctx
;
7192 push_gimplify_context (&gctx
);
7194 block
= make_node (BLOCK
);
7195 bind
= gimple_build_bind (NULL
, NULL
, block
);
7196 gsi_replace (gsi_p
, bind
, true);
7197 gimple_bind_add_stmt (bind
, stmt
);
7199 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START
),
7201 gimple_bind_add_stmt (bind
, x
);
7203 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
7204 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
7205 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
7206 gimple_omp_set_body (stmt
, NULL
);
7208 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END
), 0);
7209 gimple_bind_add_stmt (bind
, x
);
7211 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
7213 pop_gimplify_context (bind
);
7215 gimple_bind_append_vars (bind
, ctx
->block_vars
);
7216 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
7220 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
7221 substitution of a couple of function calls. But in the NAMED case,
7222 requires that languages coordinate a symbol name. It is therefore
7223 best put here in common code. */
7225 static GTY((param1_is (tree
), param2_is (tree
)))
7226 splay_tree critical_name_mutexes
;
7229 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
7232 tree name
, lock
, unlock
;
7233 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
7234 location_t loc
= gimple_location (stmt
);
7236 struct gimplify_ctx gctx
;
7238 name
= gimple_omp_critical_name (stmt
);
7244 if (!critical_name_mutexes
)
7245 critical_name_mutexes
7246 = splay_tree_new_ggc (splay_tree_compare_pointers
,
7247 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
7248 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
7250 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
7255 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
7257 new_str
= ACONCAT ((".gomp_critical_user_",
7258 IDENTIFIER_POINTER (name
), NULL
));
7259 DECL_NAME (decl
) = get_identifier (new_str
);
7260 TREE_PUBLIC (decl
) = 1;
7261 TREE_STATIC (decl
) = 1;
7262 DECL_COMMON (decl
) = 1;
7263 DECL_ARTIFICIAL (decl
) = 1;
7264 DECL_IGNORED_P (decl
) = 1;
7265 varpool_finalize_decl (decl
);
7267 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
7268 (splay_tree_value
) decl
);
7271 decl
= (tree
) n
->value
;
7273 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START
);
7274 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
7276 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END
);
7277 unlock
= build_call_expr_loc (loc
, unlock
, 1,
7278 build_fold_addr_expr_loc (loc
, decl
));
7282 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START
);
7283 lock
= build_call_expr_loc (loc
, lock
, 0);
7285 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END
);
7286 unlock
= build_call_expr_loc (loc
, unlock
, 0);
7289 push_gimplify_context (&gctx
);
7291 block
= make_node (BLOCK
);
7292 bind
= gimple_build_bind (NULL
, NULL
, block
);
7293 gsi_replace (gsi_p
, bind
, true);
7294 gimple_bind_add_stmt (bind
, stmt
);
7296 tbody
= gimple_bind_body (bind
);
7297 gimplify_and_add (lock
, &tbody
);
7298 gimple_bind_set_body (bind
, tbody
);
7300 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
7301 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
7302 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
7303 gimple_omp_set_body (stmt
, NULL
);
7305 tbody
= gimple_bind_body (bind
);
7306 gimplify_and_add (unlock
, &tbody
);
7307 gimple_bind_set_body (bind
, tbody
);
7309 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
7311 pop_gimplify_context (bind
);
7312 gimple_bind_append_vars (bind
, ctx
->block_vars
);
7313 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
7317 /* A subroutine of lower_omp_for. Generate code to emit the predicate
7318 for a lastprivate clause. Given a loop control predicate of (V
7319 cond N2), we gate the clause on (!(V cond N2)). The lowered form
7320 is appended to *DLIST, iterator initialization is appended to
7324 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
7325 gimple_seq
*dlist
, struct omp_context
*ctx
)
7327 tree clauses
, cond
, vinit
;
7328 enum tree_code cond_code
;
7331 cond_code
= fd
->loop
.cond_code
;
7332 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
7334 /* When possible, use a strict equality expression. This can let VRP
7335 type optimizations deduce the value and remove a copy. */
7336 if (host_integerp (fd
->loop
.step
, 0))
7338 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
7339 if (step
== 1 || step
== -1)
7340 cond_code
= EQ_EXPR
;
7343 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
7345 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
7347 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
7348 if (!gimple_seq_empty_p (stmts
))
7350 gimple_seq_add_seq (&stmts
, *dlist
);
7353 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
7354 vinit
= fd
->loop
.n1
;
7355 if (cond_code
== EQ_EXPR
7356 && host_integerp (fd
->loop
.n2
, 0)
7357 && ! integer_zerop (fd
->loop
.n2
))
7358 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
7360 vinit
= unshare_expr (vinit
);
7362 /* Initialize the iterator variable, so that threads that don't execute
7363 any iterations don't execute the lastprivate clauses by accident. */
7364 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
7369 /* Lower code for an OpenMP loop directive. */
7372 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
7375 struct omp_for_data fd
;
7376 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
7377 gimple_seq omp_for_body
, body
, dlist
;
7379 struct gimplify_ctx gctx
;
7381 push_gimplify_context (&gctx
);
7383 lower_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
7385 block
= make_node (BLOCK
);
7386 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
7387 /* Replace at gsi right away, so that 'stmt' is no member
7388 of a sequence anymore as we're going to add to to a different
7390 gsi_replace (gsi_p
, new_stmt
, true);
7392 /* Move declaration of temporaries in the loop body before we make
7394 omp_for_body
= gimple_omp_body (stmt
);
7395 if (!gimple_seq_empty_p (omp_for_body
)
7396 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
7398 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
7399 gimple_bind_append_vars (new_stmt
, vars
);
7402 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
7405 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
7406 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
7408 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
7410 /* Lower the header expressions. At this point, we can assume that
7411 the header is of the form:
7413 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
7415 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
7416 using the .omp_data_s mapping, if needed. */
7417 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
7419 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
7420 if (!is_gimple_min_invariant (*rhs_p
))
7421 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
7423 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
7424 if (!is_gimple_min_invariant (*rhs_p
))
7425 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
7427 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
7428 if (!is_gimple_min_invariant (*rhs_p
))
7429 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
7432 /* Once lowered, extract the bounds and clauses. */
7433 extract_omp_for_data (stmt
, &fd
, NULL
);
7435 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
7437 gimple_seq_add_stmt (&body
, stmt
);
7438 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
7440 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
7443 /* After the loop, add exit clauses. */
7444 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
7445 gimple_seq_add_seq (&body
, dlist
);
7447 body
= maybe_catch_exception (body
);
7449 /* Region exit marker goes at the end of the loop body. */
7450 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
7452 pop_gimplify_context (new_stmt
);
7454 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
7455 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
7456 if (BLOCK_VARS (block
))
7457 TREE_USED (block
) = 1;
7459 gimple_bind_set_body (new_stmt
, body
);
7460 gimple_omp_set_body (stmt
, NULL
);
7461 gimple_omp_for_set_pre_body (stmt
, NULL
);
7464 /* Callback for walk_stmts. Check if the current statement only contains
7465 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
7468 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
7469 bool *handled_ops_p
,
7470 struct walk_stmt_info
*wi
)
7472 int *info
= (int *) wi
->info
;
7473 gimple stmt
= gsi_stmt (*gsi_p
);
7475 *handled_ops_p
= true;
7476 switch (gimple_code (stmt
))
7480 case GIMPLE_OMP_FOR
:
7481 case GIMPLE_OMP_SECTIONS
:
7482 *info
= *info
== 0 ? 1 : -1;
7491 struct omp_taskcopy_context
7493 /* This field must be at the beginning, as we do "inheritance": Some
7494 callback functions for tree-inline.c (e.g., omp_copy_decl)
7495 receive a copy_body_data pointer that is up-casted to an
7496 omp_context pointer. */
7502 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
7504 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
7506 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
7507 return create_tmp_var (TREE_TYPE (var
), NULL
);
7513 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
7515 tree name
, new_fields
= NULL
, type
, f
;
7517 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
7518 name
= DECL_NAME (TYPE_NAME (orig_type
));
7519 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
7520 TYPE_DECL
, name
, type
);
7521 TYPE_NAME (type
) = name
;
7523 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
7525 tree new_f
= copy_node (f
);
7526 DECL_CONTEXT (new_f
) = type
;
7527 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
7528 TREE_CHAIN (new_f
) = new_fields
;
7529 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
7530 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
7531 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
7534 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
7536 TYPE_FIELDS (type
) = nreverse (new_fields
);
7541 /* Create task copyfn. */
7544 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
7546 struct function
*child_cfun
;
7547 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
7548 tree record_type
, srecord_type
, bind
, list
;
7549 bool record_needs_remap
= false, srecord_needs_remap
= false;
7551 struct omp_taskcopy_context tcctx
;
7552 struct gimplify_ctx gctx
;
7553 location_t loc
= gimple_location (task_stmt
);
7555 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
7556 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
7557 gcc_assert (child_cfun
->cfg
== NULL
);
7558 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
7560 /* Reset DECL_CONTEXT on function arguments. */
7561 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
7562 DECL_CONTEXT (t
) = child_fn
;
7564 /* Populate the function. */
7565 push_gimplify_context (&gctx
);
7566 push_cfun (child_cfun
);
7568 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
7569 TREE_SIDE_EFFECTS (bind
) = 1;
7571 DECL_SAVED_TREE (child_fn
) = bind
;
7572 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
7574 /* Remap src and dst argument types if needed. */
7575 record_type
= ctx
->record_type
;
7576 srecord_type
= ctx
->srecord_type
;
7577 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
7578 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
7580 record_needs_remap
= true;
7583 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
7584 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
7586 srecord_needs_remap
= true;
7590 if (record_needs_remap
|| srecord_needs_remap
)
7592 memset (&tcctx
, '\0', sizeof (tcctx
));
7593 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
7594 tcctx
.cb
.dst_fn
= child_fn
;
7595 tcctx
.cb
.src_node
= cgraph_get_node (tcctx
.cb
.src_fn
);
7596 gcc_checking_assert (tcctx
.cb
.src_node
);
7597 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
7598 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
7599 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
7600 tcctx
.cb
.eh_lp_nr
= 0;
7601 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
7602 tcctx
.cb
.decl_map
= pointer_map_create ();
7605 if (record_needs_remap
)
7606 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
7607 if (srecord_needs_remap
)
7608 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
7611 tcctx
.cb
.decl_map
= NULL
;
7613 arg
= DECL_ARGUMENTS (child_fn
);
7614 TREE_TYPE (arg
) = build_pointer_type (record_type
);
7615 sarg
= DECL_CHAIN (arg
);
7616 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
7618 /* First pass: initialize temporaries used in record_type and srecord_type
7619 sizes and field offsets. */
7620 if (tcctx
.cb
.decl_map
)
7621 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
7622 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
7626 decl
= OMP_CLAUSE_DECL (c
);
7627 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
7630 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
7631 sf
= (tree
) n
->value
;
7632 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
7633 src
= build_simple_mem_ref_loc (loc
, sarg
);
7634 src
= omp_build_component_ref (src
, sf
);
7635 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
7636 append_to_statement_list (t
, &list
);
7639 /* Second pass: copy shared var pointers and copy construct non-VLA
7640 firstprivate vars. */
7641 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
7642 switch (OMP_CLAUSE_CODE (c
))
7644 case OMP_CLAUSE_SHARED
:
7645 decl
= OMP_CLAUSE_DECL (c
);
7646 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
7649 f
= (tree
) n
->value
;
7650 if (tcctx
.cb
.decl_map
)
7651 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
7652 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
7653 sf
= (tree
) n
->value
;
7654 if (tcctx
.cb
.decl_map
)
7655 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
7656 src
= build_simple_mem_ref_loc (loc
, sarg
);
7657 src
= omp_build_component_ref (src
, sf
);
7658 dst
= build_simple_mem_ref_loc (loc
, arg
);
7659 dst
= omp_build_component_ref (dst
, f
);
7660 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
7661 append_to_statement_list (t
, &list
);
7663 case OMP_CLAUSE_FIRSTPRIVATE
:
7664 decl
= OMP_CLAUSE_DECL (c
);
7665 if (is_variable_sized (decl
))
7667 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
7670 f
= (tree
) n
->value
;
7671 if (tcctx
.cb
.decl_map
)
7672 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
7673 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
7676 sf
= (tree
) n
->value
;
7677 if (tcctx
.cb
.decl_map
)
7678 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
7679 src
= build_simple_mem_ref_loc (loc
, sarg
);
7680 src
= omp_build_component_ref (src
, sf
);
7681 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
7682 src
= build_simple_mem_ref_loc (loc
, src
);
7686 dst
= build_simple_mem_ref_loc (loc
, arg
);
7687 dst
= omp_build_component_ref (dst
, f
);
7688 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
7689 append_to_statement_list (t
, &list
);
7691 case OMP_CLAUSE_PRIVATE
:
7692 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
7694 decl
= OMP_CLAUSE_DECL (c
);
7695 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
7696 f
= (tree
) n
->value
;
7697 if (tcctx
.cb
.decl_map
)
7698 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
7699 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
7702 sf
= (tree
) n
->value
;
7703 if (tcctx
.cb
.decl_map
)
7704 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
7705 src
= build_simple_mem_ref_loc (loc
, sarg
);
7706 src
= omp_build_component_ref (src
, sf
);
7707 if (use_pointer_for_field (decl
, NULL
))
7708 src
= build_simple_mem_ref_loc (loc
, src
);
7712 dst
= build_simple_mem_ref_loc (loc
, arg
);
7713 dst
= omp_build_component_ref (dst
, f
);
7714 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
7715 append_to_statement_list (t
, &list
);
7721 /* Last pass: handle VLA firstprivates. */
7722 if (tcctx
.cb
.decl_map
)
7723 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
7724 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
7728 decl
= OMP_CLAUSE_DECL (c
);
7729 if (!is_variable_sized (decl
))
7731 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
7734 f
= (tree
) n
->value
;
7735 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
7736 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
7737 ind
= DECL_VALUE_EXPR (decl
);
7738 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
7739 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
7740 n
= splay_tree_lookup (ctx
->sfield_map
,
7741 (splay_tree_key
) TREE_OPERAND (ind
, 0));
7742 sf
= (tree
) n
->value
;
7743 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
7744 src
= build_simple_mem_ref_loc (loc
, sarg
);
7745 src
= omp_build_component_ref (src
, sf
);
7746 src
= build_simple_mem_ref_loc (loc
, src
);
7747 dst
= build_simple_mem_ref_loc (loc
, arg
);
7748 dst
= omp_build_component_ref (dst
, f
);
7749 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
7750 append_to_statement_list (t
, &list
);
7751 n
= splay_tree_lookup (ctx
->field_map
,
7752 (splay_tree_key
) TREE_OPERAND (ind
, 0));
7753 df
= (tree
) n
->value
;
7754 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
7755 ptr
= build_simple_mem_ref_loc (loc
, arg
);
7756 ptr
= omp_build_component_ref (ptr
, df
);
7757 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
7758 build_fold_addr_expr_loc (loc
, dst
));
7759 append_to_statement_list (t
, &list
);
7762 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
7763 append_to_statement_list (t
, &list
);
7765 if (tcctx
.cb
.decl_map
)
7766 pointer_map_destroy (tcctx
.cb
.decl_map
);
7767 pop_gimplify_context (NULL
);
7768 BIND_EXPR_BODY (bind
) = list
;
7772 /* Lower the OpenMP parallel or task directive in the current statement
7773 in GSI_P. CTX holds context information for the directive. */
7776 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
7780 gimple stmt
= gsi_stmt (*gsi_p
);
7781 gimple par_bind
, bind
;
7782 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
7783 struct gimplify_ctx gctx
;
7784 location_t loc
= gimple_location (stmt
);
7786 clauses
= gimple_omp_taskreg_clauses (stmt
);
7787 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
7788 par_body
= gimple_bind_body (par_bind
);
7789 child_fn
= ctx
->cb
.dst_fn
;
7790 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
7791 && !gimple_omp_parallel_combined_p (stmt
))
7793 struct walk_stmt_info wi
;
7796 memset (&wi
, 0, sizeof (wi
));
7799 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
7801 gimple_omp_parallel_set_combined_p (stmt
, true);
7803 if (ctx
->srecord_type
)
7804 create_task_copyfn (stmt
, ctx
);
7806 push_gimplify_context (&gctx
);
7810 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
7811 lower_omp (&par_body
, ctx
);
7812 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
7813 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
7815 /* Declare all the variables created by mapping and the variables
7816 declared in the scope of the parallel body. */
7817 record_vars_into (ctx
->block_vars
, child_fn
);
7818 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
7820 if (ctx
->record_type
)
7823 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
7824 : ctx
->record_type
, ".omp_data_o");
7825 DECL_NAMELESS (ctx
->sender_decl
) = 1;
7826 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
7827 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
7832 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
7833 lower_send_shared_vars (&ilist
, &olist
, ctx
);
7835 /* Once all the expansions are done, sequence all the different
7836 fragments inside gimple_omp_body. */
7840 if (ctx
->record_type
)
7842 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
7843 /* fixup_child_record_type might have changed receiver_decl's type. */
7844 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
7845 gimple_seq_add_stmt (&new_body
,
7846 gimple_build_assign (ctx
->receiver_decl
, t
));
7849 gimple_seq_add_seq (&new_body
, par_ilist
);
7850 gimple_seq_add_seq (&new_body
, par_body
);
7851 gimple_seq_add_seq (&new_body
, par_olist
);
7852 new_body
= maybe_catch_exception (new_body
);
7853 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
7854 gimple_omp_set_body (stmt
, new_body
);
7856 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
7857 gsi_replace (gsi_p
, bind
, true);
7858 gimple_bind_add_seq (bind
, ilist
);
7859 gimple_bind_add_stmt (bind
, stmt
);
7860 gimple_bind_add_seq (bind
, olist
);
7862 pop_gimplify_context (NULL
);
7865 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
7866 regimplified. If DATA is non-NULL, lower_omp_1 is outside
7867 of OpenMP context, but with task_shared_vars set. */
7870 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
7875 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
7876 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
7879 if (task_shared_vars
7881 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
7884 /* If a global variable has been privatized, TREE_CONSTANT on
7885 ADDR_EXPR might be wrong. */
7886 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
7887 recompute_tree_invariant_for_addr_expr (t
);
7889 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
7894 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
7896 gimple stmt
= gsi_stmt (*gsi_p
);
7897 struct walk_stmt_info wi
;
7899 if (gimple_has_location (stmt
))
7900 input_location
= gimple_location (stmt
);
7902 if (task_shared_vars
)
7903 memset (&wi
, '\0', sizeof (wi
));
7905 /* If we have issued syntax errors, avoid doing any heavy lifting.
7906 Just replace the OpenMP directives with a NOP to avoid
7907 confusing RTL expansion. */
7908 if (seen_error () && is_gimple_omp (stmt
))
7910 gsi_replace (gsi_p
, gimple_build_nop (), true);
7914 switch (gimple_code (stmt
))
7917 if ((ctx
|| task_shared_vars
)
7918 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
7919 ctx
? NULL
: &wi
, NULL
)
7920 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
7921 ctx
? NULL
: &wi
, NULL
)))
7922 gimple_regimplify_operands (stmt
, gsi_p
);
7925 lower_omp (gimple_catch_handler_ptr (stmt
), ctx
);
7927 case GIMPLE_EH_FILTER
:
7928 lower_omp (gimple_eh_filter_failure_ptr (stmt
), ctx
);
7931 lower_omp (gimple_try_eval_ptr (stmt
), ctx
);
7932 lower_omp (gimple_try_cleanup_ptr (stmt
), ctx
);
7934 case GIMPLE_TRANSACTION
:
7935 lower_omp (gimple_transaction_body_ptr (stmt
), ctx
);
7938 lower_omp (gimple_bind_body_ptr (stmt
), ctx
);
7940 case GIMPLE_OMP_PARALLEL
:
7941 case GIMPLE_OMP_TASK
:
7942 ctx
= maybe_lookup_ctx (stmt
);
7943 lower_omp_taskreg (gsi_p
, ctx
);
7945 case GIMPLE_OMP_FOR
:
7946 ctx
= maybe_lookup_ctx (stmt
);
7948 lower_omp_for (gsi_p
, ctx
);
7950 case GIMPLE_OMP_SECTIONS
:
7951 ctx
= maybe_lookup_ctx (stmt
);
7953 lower_omp_sections (gsi_p
, ctx
);
7955 case GIMPLE_OMP_SINGLE
:
7956 ctx
= maybe_lookup_ctx (stmt
);
7958 lower_omp_single (gsi_p
, ctx
);
7960 case GIMPLE_OMP_MASTER
:
7961 ctx
= maybe_lookup_ctx (stmt
);
7963 lower_omp_master (gsi_p
, ctx
);
7965 case GIMPLE_OMP_ORDERED
:
7966 ctx
= maybe_lookup_ctx (stmt
);
7968 lower_omp_ordered (gsi_p
, ctx
);
7970 case GIMPLE_OMP_CRITICAL
:
7971 ctx
= maybe_lookup_ctx (stmt
);
7973 lower_omp_critical (gsi_p
, ctx
);
7975 case GIMPLE_OMP_ATOMIC_LOAD
:
7976 if ((ctx
|| task_shared_vars
)
7977 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
7978 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
7979 gimple_regimplify_operands (stmt
, gsi_p
);
7982 if ((ctx
|| task_shared_vars
)
7983 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
7985 gimple_regimplify_operands (stmt
, gsi_p
);
7991 lower_omp (gimple_seq
*body
, omp_context
*ctx
)
7993 location_t saved_location
= input_location
;
7994 gimple_stmt_iterator gsi
;
7995 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
7996 lower_omp_1 (&gsi
, ctx
);
7997 input_location
= saved_location
;
8000 /* Main entry point. */
8003 execute_lower_omp (void)
8007 /* This pass always runs, to provide PROP_gimple_lomp.
8008 But there is nothing to do unless -fopenmp is given. */
8009 if (flag_openmp
== 0)
8012 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
8013 delete_omp_context
);
8015 body
= gimple_body (current_function_decl
);
8016 scan_omp (&body
, NULL
);
8017 gcc_assert (taskreg_nesting_level
== 0);
8019 if (all_contexts
->root
)
8021 struct gimplify_ctx gctx
;
8023 if (task_shared_vars
)
8024 push_gimplify_context (&gctx
);
8025 lower_omp (&body
, NULL
);
8026 if (task_shared_vars
)
8027 pop_gimplify_context (NULL
);
8032 splay_tree_delete (all_contexts
);
8033 all_contexts
= NULL
;
8035 BITMAP_FREE (task_shared_vars
);
8041 const pass_data pass_data_lower_omp
=
8043 GIMPLE_PASS
, /* type */
8044 "omplower", /* name */
8045 OPTGROUP_NONE
, /* optinfo_flags */
8046 false, /* has_gate */
8047 true, /* has_execute */
8048 TV_NONE
, /* tv_id */
8049 PROP_gimple_any
, /* properties_required */
8050 PROP_gimple_lomp
, /* properties_provided */
8051 0, /* properties_destroyed */
8052 0, /* todo_flags_start */
8053 0, /* todo_flags_finish */
8056 class pass_lower_omp
: public gimple_opt_pass
8059 pass_lower_omp(gcc::context
*ctxt
)
8060 : gimple_opt_pass(pass_data_lower_omp
, ctxt
)
8063 /* opt_pass methods: */
8064 unsigned int execute () { return execute_lower_omp (); }
8066 }; // class pass_lower_omp
8071 make_pass_lower_omp (gcc::context
*ctxt
)
8073 return new pass_lower_omp (ctxt
);
8076 /* The following is a utility to diagnose OpenMP structured block violations.
8077 It is not part of the "omplower" pass, as that's invoked too late. It
8078 should be invoked by the respective front ends after gimplification. */
8080 static splay_tree all_labels
;
8082 /* Check for mismatched contexts and generate an error if needed. Return
8083 true if an error is detected. */
8086 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
8087 gimple branch_ctx
, gimple label_ctx
)
8089 if (label_ctx
== branch_ctx
)
8094 Previously we kept track of the label's entire context in diagnose_sb_[12]
8095 so we could traverse it and issue a correct "exit" or "enter" error
8096 message upon a structured block violation.
8098 We built the context by building a list with tree_cons'ing, but there is
8099 no easy counterpart in gimple tuples. It seems like far too much work
8100 for issuing exit/enter error messages. If someone really misses the
8101 distinct error message... patches welcome.
8105 /* Try to avoid confusing the user by producing and error message
8106 with correct "exit" or "enter" verbiage. We prefer "exit"
8107 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
8108 if (branch_ctx
== NULL
)
8114 if (TREE_VALUE (label_ctx
) == branch_ctx
)
8119 label_ctx
= TREE_CHAIN (label_ctx
);
8124 error ("invalid exit from OpenMP structured block");
8126 error ("invalid entry to OpenMP structured block");
8129 /* If it's obvious we have an invalid entry, be specific about the error. */
8130 if (branch_ctx
== NULL
)
8131 error ("invalid entry to OpenMP structured block");
8133 /* Otherwise, be vague and lazy, but efficient. */
8134 error ("invalid branch to/from an OpenMP structured block");
8136 gsi_replace (gsi_p
, gimple_build_nop (), false);
8140 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
8141 where each label is found. */
8144 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
8145 struct walk_stmt_info
*wi
)
8147 gimple context
= (gimple
) wi
->info
;
8148 gimple inner_context
;
8149 gimple stmt
= gsi_stmt (*gsi_p
);
8151 *handled_ops_p
= true;
8153 switch (gimple_code (stmt
))
8157 case GIMPLE_OMP_PARALLEL
:
8158 case GIMPLE_OMP_TASK
:
8159 case GIMPLE_OMP_SECTIONS
:
8160 case GIMPLE_OMP_SINGLE
:
8161 case GIMPLE_OMP_SECTION
:
8162 case GIMPLE_OMP_MASTER
:
8163 case GIMPLE_OMP_ORDERED
:
8164 case GIMPLE_OMP_CRITICAL
:
8165 /* The minimal context here is just the current OMP construct. */
8166 inner_context
= stmt
;
8167 wi
->info
= inner_context
;
8168 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
8172 case GIMPLE_OMP_FOR
:
8173 inner_context
= stmt
;
8174 wi
->info
= inner_context
;
8175 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8177 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
8178 diagnose_sb_1
, NULL
, wi
);
8179 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
8184 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
8185 (splay_tree_value
) context
);
8195 /* Pass 2: Check each branch and see if its context differs from that of
8196 the destination label's context. */
8199 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
8200 struct walk_stmt_info
*wi
)
8202 gimple context
= (gimple
) wi
->info
;
8204 gimple stmt
= gsi_stmt (*gsi_p
);
8206 *handled_ops_p
= true;
8208 switch (gimple_code (stmt
))
8212 case GIMPLE_OMP_PARALLEL
:
8213 case GIMPLE_OMP_TASK
:
8214 case GIMPLE_OMP_SECTIONS
:
8215 case GIMPLE_OMP_SINGLE
:
8216 case GIMPLE_OMP_SECTION
:
8217 case GIMPLE_OMP_MASTER
:
8218 case GIMPLE_OMP_ORDERED
:
8219 case GIMPLE_OMP_CRITICAL
:
8221 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
8225 case GIMPLE_OMP_FOR
:
8227 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8229 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt
),
8230 diagnose_sb_2
, NULL
, wi
);
8231 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
8237 tree lab
= gimple_cond_true_label (stmt
);
8240 n
= splay_tree_lookup (all_labels
,
8241 (splay_tree_key
) lab
);
8242 diagnose_sb_0 (gsi_p
, context
,
8243 n
? (gimple
) n
->value
: NULL
);
8245 lab
= gimple_cond_false_label (stmt
);
8248 n
= splay_tree_lookup (all_labels
,
8249 (splay_tree_key
) lab
);
8250 diagnose_sb_0 (gsi_p
, context
,
8251 n
? (gimple
) n
->value
: NULL
);
8258 tree lab
= gimple_goto_dest (stmt
);
8259 if (TREE_CODE (lab
) != LABEL_DECL
)
8262 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
8263 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
8270 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
8272 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
8273 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
8274 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
8281 diagnose_sb_0 (gsi_p
, context
, NULL
);
8292 diagnose_omp_structured_block_errors (void)
8294 struct walk_stmt_info wi
;
8295 gimple_seq body
= gimple_body (current_function_decl
);
8297 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
8299 memset (&wi
, 0, sizeof (wi
));
8300 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
8302 memset (&wi
, 0, sizeof (wi
));
8303 wi
.want_locations
= true;
8304 walk_gimple_seq_mod (&body
, diagnose_sb_2
, NULL
, &wi
);
8306 gimple_set_body (current_function_decl
, body
);
8308 splay_tree_delete (all_labels
);
8315 gate_diagnose_omp_blocks (void)
8317 return flag_openmp
!= 0;
8322 const pass_data pass_data_diagnose_omp_blocks
=
8324 GIMPLE_PASS
, /* type */
8325 "*diagnose_omp_blocks", /* name */
8326 OPTGROUP_NONE
, /* optinfo_flags */
8327 true, /* has_gate */
8328 true, /* has_execute */
8329 TV_NONE
, /* tv_id */
8330 PROP_gimple_any
, /* properties_required */
8331 0, /* properties_provided */
8332 0, /* properties_destroyed */
8333 0, /* todo_flags_start */
8334 0, /* todo_flags_finish */
8337 class pass_diagnose_omp_blocks
: public gimple_opt_pass
8340 pass_diagnose_omp_blocks(gcc::context
*ctxt
)
8341 : gimple_opt_pass(pass_data_diagnose_omp_blocks
, ctxt
)
8344 /* opt_pass methods: */
8345 bool gate () { return gate_diagnose_omp_blocks (); }
8346 unsigned int execute () {
8347 return diagnose_omp_structured_block_errors ();
8350 }; // class pass_diagnose_omp_blocks
8355 make_pass_diagnose_omp_blocks (gcc::context
*ctxt
)
8357 return new pass_diagnose_omp_blocks (ctxt
);
8360 #include "gt-omp-low.h"