[10/77] Make assemble_real take a scalar_float_mode
[official-gcc.git] / gcc / omp-general.c
blobaf955bce7832e138521308189786d54107cbd525
1 /* General types and functions that are uselful for processing of OpenMP,
2 OpenACC and similar directivers at various stages of compilation.
4 Copyright (C) 2005-2017 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Find an OMP clause of type KIND within CLAUSES. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "ssa.h"
32 #include "diagnostic-core.h"
33 #include "fold-const.h"
34 #include "langhooks.h"
35 #include "omp-general.h"
36 #include "stringpool.h"
37 #include "attribs.h"
39 tree
40 omp_find_clause (tree clauses, enum omp_clause_code kind)
42 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
43 if (OMP_CLAUSE_CODE (clauses) == kind)
44 return clauses;
46 return NULL_TREE;
49 /* Return true if DECL is a reference type. */
51 bool
52 omp_is_reference (tree decl)
54 return lang_hooks.decls.omp_privatize_by_reference (decl);
57 /* Adjust *COND_CODE and *N2 so that the former is either LT_EXPR or
58 GT_EXPR. */
60 void
61 omp_adjust_for_condition (location_t loc, enum tree_code *cond_code, tree *n2)
63 switch (*cond_code)
65 case LT_EXPR:
66 case GT_EXPR:
67 case NE_EXPR:
68 break;
69 case LE_EXPR:
70 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
71 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, 1);
72 else
73 *n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (*n2), *n2,
74 build_int_cst (TREE_TYPE (*n2), 1));
75 *cond_code = LT_EXPR;
76 break;
77 case GE_EXPR:
78 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
79 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, -1);
80 else
81 *n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (*n2), *n2,
82 build_int_cst (TREE_TYPE (*n2), 1));
83 *cond_code = GT_EXPR;
84 break;
85 default:
86 gcc_unreachable ();
90 /* Return the looping step from INCR, extracted from the step of a gimple omp
91 for statement. */
93 tree
94 omp_get_for_step_from_incr (location_t loc, tree incr)
96 tree step;
97 switch (TREE_CODE (incr))
99 case PLUS_EXPR:
100 step = TREE_OPERAND (incr, 1);
101 break;
102 case POINTER_PLUS_EXPR:
103 step = fold_convert (ssizetype, TREE_OPERAND (incr, 1));
104 break;
105 case MINUS_EXPR:
106 step = TREE_OPERAND (incr, 1);
107 step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (step), step);
108 break;
109 default:
110 gcc_unreachable ();
112 return step;
115 /* Extract the header elements of parallel loop FOR_STMT and store
116 them into *FD. */
118 void
119 omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
120 struct omp_for_data_loop *loops)
122 tree t, var, *collapse_iter, *collapse_count;
123 tree count = NULL_TREE, iter_type = long_integer_type_node;
124 struct omp_for_data_loop *loop;
125 int i;
126 struct omp_for_data_loop dummy_loop;
127 location_t loc = gimple_location (for_stmt);
128 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
129 bool distribute = gimple_omp_for_kind (for_stmt)
130 == GF_OMP_FOR_KIND_DISTRIBUTE;
131 bool taskloop = gimple_omp_for_kind (for_stmt)
132 == GF_OMP_FOR_KIND_TASKLOOP;
133 tree iterv, countv;
135 fd->for_stmt = for_stmt;
136 fd->pre = NULL;
137 fd->have_nowait = distribute || simd;
138 fd->have_ordered = false;
139 fd->tiling = NULL_TREE;
140 fd->collapse = 1;
141 fd->ordered = 0;
142 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
143 fd->sched_modifiers = 0;
144 fd->chunk_size = NULL_TREE;
145 fd->simd_schedule = false;
146 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
147 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
148 collapse_iter = NULL;
149 collapse_count = NULL;
151 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
152 switch (OMP_CLAUSE_CODE (t))
154 case OMP_CLAUSE_NOWAIT:
155 fd->have_nowait = true;
156 break;
157 case OMP_CLAUSE_ORDERED:
158 fd->have_ordered = true;
159 if (OMP_CLAUSE_ORDERED_EXPR (t))
160 fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
161 break;
162 case OMP_CLAUSE_SCHEDULE:
163 gcc_assert (!distribute && !taskloop);
164 fd->sched_kind
165 = (enum omp_clause_schedule_kind)
166 (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
167 fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
168 & ~OMP_CLAUSE_SCHEDULE_MASK);
169 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
170 fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
171 break;
172 case OMP_CLAUSE_DIST_SCHEDULE:
173 gcc_assert (distribute);
174 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
175 break;
176 case OMP_CLAUSE_COLLAPSE:
177 fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
178 if (fd->collapse > 1)
180 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
181 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
183 break;
184 case OMP_CLAUSE_TILE:
185 fd->tiling = OMP_CLAUSE_TILE_LIST (t);
186 fd->collapse = list_length (fd->tiling);
187 gcc_assert (fd->collapse);
188 collapse_iter = &OMP_CLAUSE_TILE_ITERVAR (t);
189 collapse_count = &OMP_CLAUSE_TILE_COUNT (t);
190 break;
191 default:
192 break;
195 if (fd->collapse > 1 || fd->tiling)
196 fd->loops = loops;
197 else
198 fd->loops = &fd->loop;
200 if (fd->ordered && fd->collapse == 1 && loops != NULL)
202 fd->loops = loops;
203 iterv = NULL_TREE;
204 countv = NULL_TREE;
205 collapse_iter = &iterv;
206 collapse_count = &countv;
209 /* FIXME: for now map schedule(auto) to schedule(static).
210 There should be analysis to determine whether all iterations
211 are approximately the same amount of work (then schedule(static)
212 is best) or if it varies (then schedule(dynamic,N) is better). */
213 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
215 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
216 gcc_assert (fd->chunk_size == NULL);
218 gcc_assert ((fd->collapse == 1 && !fd->tiling) || collapse_iter != NULL);
219 if (taskloop)
220 fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
221 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
222 gcc_assert (fd->chunk_size == NULL);
223 else if (fd->chunk_size == NULL)
225 /* We only need to compute a default chunk size for ordered
226 static loops and dynamic loops. */
227 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
228 || fd->have_ordered)
229 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
230 ? integer_zero_node : integer_one_node;
233 int cnt = fd->ordered ? fd->ordered : fd->collapse;
234 for (i = 0; i < cnt; i++)
236 if (i == 0
237 && fd->collapse == 1
238 && !fd->tiling
239 && (fd->ordered == 0 || loops == NULL))
240 loop = &fd->loop;
241 else if (loops != NULL)
242 loop = loops + i;
243 else
244 loop = &dummy_loop;
246 loop->v = gimple_omp_for_index (for_stmt, i);
247 gcc_assert (SSA_VAR_P (loop->v));
248 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
249 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
250 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
251 loop->n1 = gimple_omp_for_initial (for_stmt, i);
253 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
254 loop->n2 = gimple_omp_for_final (for_stmt, i);
255 gcc_assert (loop->cond_code != NE_EXPR
256 || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKSIMD
257 || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKFOR);
258 omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2);
260 t = gimple_omp_for_incr (for_stmt, i);
261 gcc_assert (TREE_OPERAND (t, 0) == var);
262 loop->step = omp_get_for_step_from_incr (loc, t);
264 if (simd
265 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
266 && !fd->have_ordered))
268 if (fd->collapse == 1 && !fd->tiling)
269 iter_type = TREE_TYPE (loop->v);
270 else if (i == 0
271 || TYPE_PRECISION (iter_type)
272 < TYPE_PRECISION (TREE_TYPE (loop->v)))
273 iter_type
274 = build_nonstandard_integer_type
275 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
277 else if (iter_type != long_long_unsigned_type_node)
279 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
280 iter_type = long_long_unsigned_type_node;
281 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
282 && TYPE_PRECISION (TREE_TYPE (loop->v))
283 >= TYPE_PRECISION (iter_type))
285 tree n;
287 if (loop->cond_code == LT_EXPR)
288 n = fold_build2_loc (loc,
289 PLUS_EXPR, TREE_TYPE (loop->v),
290 loop->n2, loop->step);
291 else
292 n = loop->n1;
293 if (TREE_CODE (n) != INTEGER_CST
294 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
295 iter_type = long_long_unsigned_type_node;
297 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
298 > TYPE_PRECISION (iter_type))
300 tree n1, n2;
302 if (loop->cond_code == LT_EXPR)
304 n1 = loop->n1;
305 n2 = fold_build2_loc (loc,
306 PLUS_EXPR, TREE_TYPE (loop->v),
307 loop->n2, loop->step);
309 else
311 n1 = fold_build2_loc (loc,
312 MINUS_EXPR, TREE_TYPE (loop->v),
313 loop->n2, loop->step);
314 n2 = loop->n1;
316 if (TREE_CODE (n1) != INTEGER_CST
317 || TREE_CODE (n2) != INTEGER_CST
318 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
319 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
320 iter_type = long_long_unsigned_type_node;
324 if (i >= fd->collapse)
325 continue;
327 if (collapse_count && *collapse_count == NULL)
329 t = fold_binary (loop->cond_code, boolean_type_node,
330 fold_convert (TREE_TYPE (loop->v), loop->n1),
331 fold_convert (TREE_TYPE (loop->v), loop->n2));
332 if (t && integer_zerop (t))
333 count = build_zero_cst (long_long_unsigned_type_node);
334 else if ((i == 0 || count != NULL_TREE)
335 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
336 && TREE_CONSTANT (loop->n1)
337 && TREE_CONSTANT (loop->n2)
338 && TREE_CODE (loop->step) == INTEGER_CST)
340 tree itype = TREE_TYPE (loop->v);
342 if (POINTER_TYPE_P (itype))
343 itype = signed_type_for (itype);
344 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
345 t = fold_build2_loc (loc,
346 PLUS_EXPR, itype,
347 fold_convert_loc (loc, itype, loop->step), t);
348 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
349 fold_convert_loc (loc, itype, loop->n2));
350 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
351 fold_convert_loc (loc, itype, loop->n1));
352 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
353 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
354 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
355 fold_build1_loc (loc, NEGATE_EXPR, itype,
356 fold_convert_loc (loc, itype,
357 loop->step)));
358 else
359 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
360 fold_convert_loc (loc, itype, loop->step));
361 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
362 if (count != NULL_TREE)
363 count = fold_build2_loc (loc,
364 MULT_EXPR, long_long_unsigned_type_node,
365 count, t);
366 else
367 count = t;
368 if (TREE_CODE (count) != INTEGER_CST)
369 count = NULL_TREE;
371 else if (count && !integer_zerop (count))
372 count = NULL_TREE;
376 if (count
377 && !simd
378 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
379 || fd->have_ordered))
381 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
382 iter_type = long_long_unsigned_type_node;
383 else
384 iter_type = long_integer_type_node;
386 else if (collapse_iter && *collapse_iter != NULL)
387 iter_type = TREE_TYPE (*collapse_iter);
388 fd->iter_type = iter_type;
389 if (collapse_iter && *collapse_iter == NULL)
390 *collapse_iter = create_tmp_var (iter_type, ".iter");
391 if (collapse_count && *collapse_count == NULL)
393 if (count)
394 *collapse_count = fold_convert_loc (loc, iter_type, count);
395 else
396 *collapse_count = create_tmp_var (iter_type, ".count");
399 if (fd->collapse > 1 || fd->tiling || (fd->ordered && loops))
401 fd->loop.v = *collapse_iter;
402 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
403 fd->loop.n2 = *collapse_count;
404 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
405 fd->loop.cond_code = LT_EXPR;
407 else if (loops)
408 loops[0] = fd->loop;
411 /* Build a call to GOMP_barrier. */
413 gimple *
414 omp_build_barrier (tree lhs)
416 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
417 : BUILT_IN_GOMP_BARRIER);
418 gcall *g = gimple_build_call (fndecl, 0);
419 if (lhs)
420 gimple_call_set_lhs (g, lhs);
421 return g;
424 /* Return maximum possible vectorization factor for the target. */
427 omp_max_vf (void)
429 if (!optimize
430 || optimize_debug
431 || !flag_tree_loop_optimize
432 || (!flag_tree_loop_vectorize
433 && global_options_set.x_flag_tree_loop_vectorize))
434 return 1;
436 int vf = 1;
437 int vs = targetm.vectorize.autovectorize_vector_sizes ();
438 if (vs)
439 vf = 1 << floor_log2 (vs);
440 else
442 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
443 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
444 vf = GET_MODE_NUNITS (vqimode);
446 return vf;
449 /* Return maximum SIMT width if offloading may target SIMT hardware. */
452 omp_max_simt_vf (void)
454 if (!optimize)
455 return 0;
456 if (ENABLE_OFFLOADING)
457 for (const char *c = getenv ("OFFLOAD_TARGET_NAMES"); c;)
459 if (!strncmp (c, "nvptx", strlen ("nvptx")))
460 return 32;
461 else if ((c = strchr (c, ',')))
462 c++;
464 return 0;
467 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
468 macro on gomp-constants.h. We do not check for overflow. */
470 tree
471 oacc_launch_pack (unsigned code, tree device, unsigned op)
473 tree res;
475 res = build_int_cst (unsigned_type_node, GOMP_LAUNCH_PACK (code, 0, op));
476 if (device)
478 device = fold_build2 (LSHIFT_EXPR, unsigned_type_node,
479 device, build_int_cst (unsigned_type_node,
480 GOMP_LAUNCH_DEVICE_SHIFT));
481 res = fold_build2 (BIT_IOR_EXPR, unsigned_type_node, res, device);
483 return res;
486 /* FIXME: What is the following comment for? */
487 /* Look for compute grid dimension clauses and convert to an attribute
488 attached to FN. This permits the target-side code to (a) massage
489 the dimensions, (b) emit that data and (c) optimize. Non-constant
490 dimensions are pushed onto ARGS.
492 The attribute value is a TREE_LIST. A set of dimensions is
493 represented as a list of INTEGER_CST. Those that are runtime
494 exprs are represented as an INTEGER_CST of zero.
496 TODO: Normally the attribute will just contain a single such list. If
497 however it contains a list of lists, this will represent the use of
498 device_type. Each member of the outer list is an assoc list of
499 dimensions, keyed by the device type. The first entry will be the
500 default. Well, that's the plan. */
502 /* Replace any existing oacc fn attribute with updated dimensions. */
504 void
505 oacc_replace_fn_attrib (tree fn, tree dims)
507 tree ident = get_identifier (OACC_FN_ATTRIB);
508 tree attribs = DECL_ATTRIBUTES (fn);
510 /* If we happen to be present as the first attrib, drop it. */
511 if (attribs && TREE_PURPOSE (attribs) == ident)
512 attribs = TREE_CHAIN (attribs);
513 DECL_ATTRIBUTES (fn) = tree_cons (ident, dims, attribs);
516 /* Scan CLAUSES for launch dimensions and attach them to the oacc
517 function attribute. Push any that are non-constant onto the ARGS
518 list, along with an appropriate GOMP_LAUNCH_DIM tag. */
520 void
521 oacc_set_fn_attrib (tree fn, tree clauses, vec<tree> *args)
523 /* Must match GOMP_DIM ordering. */
524 static const omp_clause_code ids[]
525 = { OMP_CLAUSE_NUM_GANGS, OMP_CLAUSE_NUM_WORKERS,
526 OMP_CLAUSE_VECTOR_LENGTH };
527 unsigned ix;
528 tree dims[GOMP_DIM_MAX];
530 tree attr = NULL_TREE;
531 unsigned non_const = 0;
533 for (ix = GOMP_DIM_MAX; ix--;)
535 tree clause = omp_find_clause (clauses, ids[ix]);
536 tree dim = NULL_TREE;
538 if (clause)
539 dim = OMP_CLAUSE_EXPR (clause, ids[ix]);
540 dims[ix] = dim;
541 if (dim && TREE_CODE (dim) != INTEGER_CST)
543 dim = integer_zero_node;
544 non_const |= GOMP_DIM_MASK (ix);
546 attr = tree_cons (NULL_TREE, dim, attr);
549 oacc_replace_fn_attrib (fn, attr);
551 if (non_const)
553 /* Push a dynamic argument set. */
554 args->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM,
555 NULL_TREE, non_const));
556 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
557 if (non_const & GOMP_DIM_MASK (ix))
558 args->safe_push (dims[ix]);
562 /* Process the routine's dimension clauess to generate an attribute
563 value. Issue diagnostics as appropriate. We default to SEQ
564 (OpenACC 2.5 clarifies this). All dimensions have a size of zero
565 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
566 can have a loop partitioned on it. non-zero indicates
567 yes, zero indicates no. By construction once a non-zero has been
568 reached, further inner dimensions must also be non-zero. We set
569 TREE_VALUE to zero for the dimensions that may be partitioned and
570 1 for the other ones -- if a loop is (erroneously) spawned at
571 an outer level, we don't want to try and partition it. */
573 tree
574 oacc_build_routine_dims (tree clauses)
576 /* Must match GOMP_DIM ordering. */
577 static const omp_clause_code ids[]
578 = {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ};
579 int ix;
580 int level = -1;
582 for (; clauses; clauses = OMP_CLAUSE_CHAIN (clauses))
583 for (ix = GOMP_DIM_MAX + 1; ix--;)
584 if (OMP_CLAUSE_CODE (clauses) == ids[ix])
586 if (level >= 0)
587 error_at (OMP_CLAUSE_LOCATION (clauses),
588 "multiple loop axes specified for routine");
589 level = ix;
590 break;
593 /* Default to SEQ. */
594 if (level < 0)
595 level = GOMP_DIM_MAX;
597 tree dims = NULL_TREE;
599 for (ix = GOMP_DIM_MAX; ix--;)
600 dims = tree_cons (build_int_cst (boolean_type_node, ix >= level),
601 build_int_cst (integer_type_node, ix < level), dims);
603 return dims;
606 /* Retrieve the oacc function attrib and return it. Non-oacc
607 functions will return NULL. */
609 tree
610 oacc_get_fn_attrib (tree fn)
612 return lookup_attribute (OACC_FN_ATTRIB, DECL_ATTRIBUTES (fn));
615 /* Extract an oacc execution dimension from FN. FN must be an
616 offloaded function or routine that has already had its execution
617 dimensions lowered to the target-specific values. */
620 oacc_get_fn_dim_size (tree fn, int axis)
622 tree attrs = oacc_get_fn_attrib (fn);
624 gcc_assert (axis < GOMP_DIM_MAX);
626 tree dims = TREE_VALUE (attrs);
627 while (axis--)
628 dims = TREE_CHAIN (dims);
630 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
632 return size;
635 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
636 IFN_GOACC_DIM_SIZE call. */
639 oacc_get_ifn_dim_arg (const gimple *stmt)
641 gcc_checking_assert (gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_SIZE
642 || gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_POS);
643 tree arg = gimple_call_arg (stmt, 0);
644 HOST_WIDE_INT axis = TREE_INT_CST_LOW (arg);
646 gcc_checking_assert (axis >= 0 && axis < GOMP_DIM_MAX);
647 return (int) axis;