Daily bump.
[official-gcc.git] / libgomp / libgomp.h
blobb694356f67edb6ae49818246873096ad1ad69719
1 /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This file contains data types and function declarations that are not
26 part of the official OpenMP user interface. There are declarations
27 in here that are part of the GNU OpenMP ABI, in that the compiler is
28 required to know about them and use them.
30 The convention is that the all caps prefix "GOMP" is used group items
31 that are part of the external ABI, and the lower case prefix "gomp"
32 is used group items that are completely private to the library. */
34 #ifndef LIBGOMP_H
35 #define LIBGOMP_H 1
37 #include "config.h"
38 #include "gstdint.h"
40 #include <pthread.h>
41 #include <stdbool.h>
42 #include <stdlib.h>
44 #ifdef HAVE_ATTRIBUTE_VISIBILITY
45 # pragma GCC visibility push(hidden)
46 #endif
48 /* If we were a C++ library, we'd get this from <std/atomic>. */
49 enum memmodel
51 MEMMODEL_RELAXED = 0,
52 MEMMODEL_CONSUME = 1,
53 MEMMODEL_ACQUIRE = 2,
54 MEMMODEL_RELEASE = 3,
55 MEMMODEL_ACQ_REL = 4,
56 MEMMODEL_SEQ_CST = 5
59 #include "sem.h"
60 #include "mutex.h"
61 #include "bar.h"
62 #include "ptrlock.h"
65 /* This structure contains the data to control one work-sharing construct,
66 either a LOOP (FOR/DO) or a SECTIONS. */
68 enum gomp_schedule_type
70 GFS_RUNTIME,
71 GFS_STATIC,
72 GFS_DYNAMIC,
73 GFS_GUIDED,
74 GFS_AUTO
77 struct gomp_work_share
79 /* This member records the SCHEDULE clause to be used for this construct.
80 The user specification of "runtime" will already have been resolved.
81 If this is a SECTIONS construct, this value will always be DYNAMIC. */
82 enum gomp_schedule_type sched;
84 int mode;
86 union {
87 struct {
88 /* This is the chunk_size argument to the SCHEDULE clause. */
89 long chunk_size;
91 /* This is the iteration end point. If this is a SECTIONS construct,
92 this is the number of contained sections. */
93 long end;
95 /* This is the iteration step. If this is a SECTIONS construct, this
96 is always 1. */
97 long incr;
100 struct {
101 /* The same as above, but for the unsigned long long loop variants. */
102 unsigned long long chunk_size_ull;
103 unsigned long long end_ull;
104 unsigned long long incr_ull;
108 /* This is a circular queue that details which threads will be allowed
109 into the ordered region and in which order. When a thread allocates
110 iterations on which it is going to work, it also registers itself at
111 the end of the array. When a thread reaches the ordered region, it
112 checks to see if it is the one at the head of the queue. If not, it
113 blocks on its RELEASE semaphore. */
114 unsigned *ordered_team_ids;
116 /* This is the number of threads that have registered themselves in
117 the circular queue ordered_team_ids. */
118 unsigned ordered_num_used;
120 /* This is the team_id of the currently acknowledged owner of the ordered
121 section, or -1u if the ordered section has not been acknowledged by
122 any thread. This is distinguished from the thread that is *allowed*
123 to take the section next. */
124 unsigned ordered_owner;
126 /* This is the index into the circular queue ordered_team_ids of the
127 current thread that's allowed into the ordered reason. */
128 unsigned ordered_cur;
130 /* This is a chain of allocated gomp_work_share blocks, valid only
131 in the first gomp_work_share struct in the block. */
132 struct gomp_work_share *next_alloc;
134 /* The above fields are written once during workshare initialization,
135 or related to ordered worksharing. Make sure the following fields
136 are in a different cache line. */
138 /* This lock protects the update of the following members. */
139 gomp_mutex_t lock __attribute__((aligned (64)));
141 /* This is the count of the number of threads that have exited the work
142 share construct. If the construct was marked nowait, they have moved on
143 to other work; otherwise they're blocked on a barrier. The last member
144 of the team to exit the work share construct must deallocate it. */
145 unsigned threads_completed;
147 union {
148 /* This is the next iteration value to be allocated. In the case of
149 GFS_STATIC loops, this the iteration start point and never changes. */
150 long next;
152 /* The same, but with unsigned long long type. */
153 unsigned long long next_ull;
155 /* This is the returned data structure for SINGLE COPYPRIVATE. */
156 void *copyprivate;
159 union {
160 /* Link to gomp_work_share struct for next work sharing construct
161 encountered after this one. */
162 gomp_ptrlock_t next_ws;
164 /* gomp_work_share structs are chained in the free work share cache
165 through this. */
166 struct gomp_work_share *next_free;
169 /* If only few threads are in the team, ordered_team_ids can point
170 to this array which fills the padding at the end of this struct. */
171 unsigned inline_ordered_team_ids[0];
174 /* This structure contains all of the thread-local data associated with
175 a thread team. This is the data that must be saved when a thread
176 encounters a nested PARALLEL construct. */
178 struct gomp_team_state
180 /* This is the team of which the thread is currently a member. */
181 struct gomp_team *team;
183 /* This is the work share construct which this thread is currently
184 processing. Recall that with NOWAIT, not all threads may be
185 processing the same construct. */
186 struct gomp_work_share *work_share;
188 /* This is the previous work share construct or NULL if there wasn't any.
189 When all threads are done with the current work sharing construct,
190 the previous one can be freed. The current one can't, as its
191 next_ws field is used. */
192 struct gomp_work_share *last_work_share;
194 /* This is the ID of this thread within the team. This value is
195 guaranteed to be between 0 and N-1, where N is the number of
196 threads in the team. */
197 unsigned team_id;
199 /* Nesting level. */
200 unsigned level;
202 /* Active nesting level. Only active parallel regions are counted. */
203 unsigned active_level;
205 /* Place-partition-var, offset and length into gomp_places_list array. */
206 unsigned place_partition_off;
207 unsigned place_partition_len;
209 #ifdef HAVE_SYNC_BUILTINS
210 /* Number of single stmts encountered. */
211 unsigned long single_count;
212 #endif
214 /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
215 trip number through the loop. So first time a particular loop
216 is encountered this number is 0, the second time through the loop
217 is 1, etc. This is unused when the compiler knows in advance that
218 the loop is statically scheduled. */
219 unsigned long static_trip;
222 struct target_mem_desc;
224 /* These are the OpenMP 4.0 Internal Control Variables described in
225 section 2.3.1. Those described as having one copy per task are
226 stored within the structure; those described as having one copy
227 for the whole program are (naturally) global variables. */
229 struct gomp_task_icv
231 unsigned long nthreads_var;
232 enum gomp_schedule_type run_sched_var;
233 int run_sched_modifier;
234 int default_device_var;
235 unsigned int thread_limit_var;
236 bool dyn_var;
237 bool nest_var;
238 char bind_var;
239 /* Internal ICV. */
240 struct target_mem_desc *target_data;
243 extern struct gomp_task_icv gomp_global_icv;
244 #ifndef HAVE_SYNC_BUILTINS
245 extern gomp_mutex_t gomp_managed_threads_lock;
246 #endif
247 extern unsigned long gomp_max_active_levels_var;
248 extern bool gomp_cancel_var;
249 extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var;
250 extern unsigned long gomp_available_cpus, gomp_managed_threads;
251 extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len;
252 extern char *gomp_bind_var_list;
253 extern unsigned long gomp_bind_var_list_len;
254 extern void **gomp_places_list;
255 extern unsigned long gomp_places_list_len;
257 enum gomp_task_kind
259 GOMP_TASK_IMPLICIT,
260 GOMP_TASK_IFFALSE,
261 GOMP_TASK_WAITING,
262 GOMP_TASK_TIED
265 struct gomp_task;
266 struct gomp_taskgroup;
267 struct htab;
269 struct gomp_task_depend_entry
271 void *addr;
272 struct gomp_task_depend_entry *next;
273 struct gomp_task_depend_entry *prev;
274 struct gomp_task *task;
275 bool is_in;
276 bool redundant;
277 bool redundant_out;
280 struct gomp_dependers_vec
282 size_t n_elem;
283 size_t allocated;
284 struct gomp_task *elem[];
287 /* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */
289 struct gomp_taskwait
291 bool in_taskwait;
292 bool in_depend_wait;
293 size_t n_depend;
294 struct gomp_task *last_parent_depends_on;
295 gomp_sem_t taskwait_sem;
298 /* This structure describes a "task" to be run by a thread. */
300 struct gomp_task
302 struct gomp_task *parent;
303 struct gomp_task *children;
304 struct gomp_task *next_child;
305 struct gomp_task *prev_child;
306 struct gomp_task *next_queue;
307 struct gomp_task *prev_queue;
308 struct gomp_task *next_taskgroup;
309 struct gomp_task *prev_taskgroup;
310 struct gomp_taskgroup *taskgroup;
311 struct gomp_dependers_vec *dependers;
312 struct htab *depend_hash;
313 struct gomp_taskwait *taskwait;
314 size_t depend_count;
315 size_t num_dependees;
316 struct gomp_task_icv icv;
317 void (*fn) (void *);
318 void *fn_data;
319 enum gomp_task_kind kind;
320 bool in_tied_task;
321 bool final_task;
322 bool copy_ctors_done;
323 bool parent_depends_on;
324 struct gomp_task_depend_entry depend[];
327 struct gomp_taskgroup
329 struct gomp_taskgroup *prev;
330 struct gomp_task *children;
331 bool in_taskgroup_wait;
332 bool cancelled;
333 gomp_sem_t taskgroup_sem;
334 size_t num_children;
337 /* This structure describes a "team" of threads. These are the threads
338 that are spawned by a PARALLEL constructs, as well as the work sharing
339 constructs that the team encounters. */
341 struct gomp_team
343 /* This is the number of threads in the current team. */
344 unsigned nthreads;
346 /* This is number of gomp_work_share structs that have been allocated
347 as a block last time. */
348 unsigned work_share_chunk;
350 /* This is the saved team state that applied to a master thread before
351 the current thread was created. */
352 struct gomp_team_state prev_ts;
354 /* This semaphore should be used by the master thread instead of its
355 "native" semaphore in the thread structure. Required for nested
356 parallels, as the master is a member of two teams. */
357 gomp_sem_t master_release;
359 /* This points to an array with pointers to the release semaphore
360 of the threads in the team. */
361 gomp_sem_t **ordered_release;
363 /* List of work shares on which gomp_fini_work_share hasn't been
364 called yet. If the team hasn't been cancelled, this should be
365 equal to each thr->ts.work_share, but otherwise it can be a possibly
366 long list of workshares. */
367 struct gomp_work_share *work_shares_to_free;
369 /* List of gomp_work_share structs chained through next_free fields.
370 This is populated and taken off only by the first thread in the
371 team encountering a new work sharing construct, in a critical
372 section. */
373 struct gomp_work_share *work_share_list_alloc;
375 /* List of gomp_work_share structs freed by free_work_share. New
376 entries are atomically added to the start of the list, and
377 alloc_work_share can safely only move all but the first entry
378 to work_share_list alloc, as free_work_share can happen concurrently
379 with alloc_work_share. */
380 struct gomp_work_share *work_share_list_free;
382 #ifdef HAVE_SYNC_BUILTINS
383 /* Number of simple single regions encountered by threads in this
384 team. */
385 unsigned long single_count;
386 #else
387 /* Mutex protecting addition of workshares to work_share_list_free. */
388 gomp_mutex_t work_share_list_free_lock;
389 #endif
391 /* This barrier is used for most synchronization of the team. */
392 gomp_barrier_t barrier;
394 /* Initial work shares, to avoid allocating any gomp_work_share
395 structs in the common case. */
396 struct gomp_work_share work_shares[8];
398 gomp_mutex_t task_lock;
399 struct gomp_task *task_queue;
400 /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */
401 unsigned int task_count;
402 /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */
403 unsigned int task_queued_count;
404 /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running
405 directly in gomp_barrier_handle_tasks; tasks spawned
406 from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when
407 that is called from a task run from gomp_barrier_handle_tasks.
408 task_running_count should be always <= team->nthreads,
409 and if current task isn't in_tied_task, then it will be
410 even < team->nthreads. */
411 unsigned int task_running_count;
412 int work_share_cancelled;
413 int team_cancelled;
415 /* This array contains structures for implicit tasks. */
416 struct gomp_task implicit_task[];
419 /* This structure contains all data that is private to libgomp and is
420 allocated per thread. */
422 struct gomp_thread
424 /* This is the function that the thread should run upon launch. */
425 void (*fn) (void *data);
426 void *data;
428 /* This is the current team state for this thread. The ts.team member
429 is NULL only if the thread is idle. */
430 struct gomp_team_state ts;
432 /* This is the task that the thread is currently executing. */
433 struct gomp_task *task;
435 /* This semaphore is used for ordered loops. */
436 gomp_sem_t release;
438 /* Place this thread is bound to plus one, or zero if not bound
439 to any place. */
440 unsigned int place;
442 /* User pthread thread pool */
443 struct gomp_thread_pool *thread_pool;
447 struct gomp_thread_pool
449 /* This array manages threads spawned from the top level, which will
450 return to the idle loop once the current PARALLEL construct ends. */
451 struct gomp_thread **threads;
452 unsigned threads_size;
453 unsigned threads_used;
454 struct gomp_team *last_team;
455 /* Number of threads running in this contention group. */
456 unsigned long threads_busy;
458 /* This barrier holds and releases threads waiting in threads. */
459 gomp_barrier_t threads_dock;
462 enum gomp_cancel_kind
464 GOMP_CANCEL_PARALLEL = 1,
465 GOMP_CANCEL_LOOP = 2,
466 GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP,
467 GOMP_CANCEL_DO = GOMP_CANCEL_LOOP,
468 GOMP_CANCEL_SECTIONS = 4,
469 GOMP_CANCEL_TASKGROUP = 8
472 /* ... and here is that TLS data. */
474 #if defined HAVE_TLS || defined USE_EMUTLS
475 extern __thread struct gomp_thread gomp_tls_data;
476 static inline struct gomp_thread *gomp_thread (void)
478 return &gomp_tls_data;
480 #else
481 extern pthread_key_t gomp_tls_key;
482 static inline struct gomp_thread *gomp_thread (void)
484 return pthread_getspecific (gomp_tls_key);
486 #endif
488 extern struct gomp_task_icv *gomp_new_icv (void);
490 /* Here's how to access the current copy of the ICVs. */
492 static inline struct gomp_task_icv *gomp_icv (bool write)
494 struct gomp_task *task = gomp_thread ()->task;
495 if (task)
496 return &task->icv;
497 else if (write)
498 return gomp_new_icv ();
499 else
500 return &gomp_global_icv;
503 /* The attributes to be used during thread creation. */
504 extern pthread_attr_t gomp_thread_attr;
506 /* Function prototypes. */
508 /* affinity.c */
510 extern void gomp_init_affinity (void);
511 extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int);
512 extern void **gomp_affinity_alloc (unsigned long, bool);
513 extern void gomp_affinity_init_place (void *);
514 extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long,
515 long, bool);
516 extern bool gomp_affinity_remove_cpu (void *, unsigned long);
517 extern bool gomp_affinity_copy_place (void *, void *, long);
518 extern bool gomp_affinity_same_place (void *, void *);
519 extern bool gomp_affinity_finalize_place_list (bool);
520 extern bool gomp_affinity_init_level (int, unsigned long, bool);
521 extern void gomp_affinity_print_place (void *);
523 /* alloc.c */
525 extern void *gomp_malloc (size_t) __attribute__((malloc));
526 extern void *gomp_malloc_cleared (size_t) __attribute__((malloc));
527 extern void *gomp_realloc (void *, size_t);
529 /* Avoid conflicting prototypes of alloca() in system headers by using
530 GCC's builtin alloca(). */
531 #define gomp_alloca(x) __builtin_alloca(x)
533 /* error.c */
535 extern void gomp_error (const char *, ...)
536 __attribute__((format (printf, 1, 2)));
537 extern void gomp_fatal (const char *, ...)
538 __attribute__((noreturn, format (printf, 1, 2)));
540 /* iter.c */
542 extern int gomp_iter_static_next (long *, long *);
543 extern bool gomp_iter_dynamic_next_locked (long *, long *);
544 extern bool gomp_iter_guided_next_locked (long *, long *);
546 #ifdef HAVE_SYNC_BUILTINS
547 extern bool gomp_iter_dynamic_next (long *, long *);
548 extern bool gomp_iter_guided_next (long *, long *);
549 #endif
551 /* iter_ull.c */
553 extern int gomp_iter_ull_static_next (unsigned long long *,
554 unsigned long long *);
555 extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *,
556 unsigned long long *);
557 extern bool gomp_iter_ull_guided_next_locked (unsigned long long *,
558 unsigned long long *);
560 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
561 extern bool gomp_iter_ull_dynamic_next (unsigned long long *,
562 unsigned long long *);
563 extern bool gomp_iter_ull_guided_next (unsigned long long *,
564 unsigned long long *);
565 #endif
567 /* ordered.c */
569 extern void gomp_ordered_first (void);
570 extern void gomp_ordered_last (void);
571 extern void gomp_ordered_next (void);
572 extern void gomp_ordered_static_init (void);
573 extern void gomp_ordered_static_next (void);
574 extern void gomp_ordered_sync (void);
576 /* parallel.c */
578 extern unsigned gomp_resolve_num_threads (unsigned, unsigned);
580 /* proc.c (in config/) */
582 extern void gomp_init_num_threads (void);
583 extern unsigned gomp_dynamic_max_threads (void);
585 /* task.c */
587 extern void gomp_init_task (struct gomp_task *, struct gomp_task *,
588 struct gomp_task_icv *);
589 extern void gomp_end_task (void);
590 extern void gomp_barrier_handle_tasks (gomp_barrier_state_t);
592 static void inline
593 gomp_finish_task (struct gomp_task *task)
595 if (__builtin_expect (task->depend_hash != NULL, 0))
596 free (task->depend_hash);
599 /* team.c */
601 extern struct gomp_team *gomp_new_team (unsigned);
602 extern void gomp_team_start (void (*) (void *), void *, unsigned,
603 unsigned, struct gomp_team *);
604 extern void gomp_team_end (void);
605 extern void gomp_free_thread (void *);
607 /* target.c */
609 extern int gomp_get_num_devices (void);
611 /* work.c */
613 extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned);
614 extern void gomp_fini_work_share (struct gomp_work_share *);
615 extern bool gomp_work_share_start (bool);
616 extern void gomp_work_share_end (void);
617 extern bool gomp_work_share_end_cancel (void);
618 extern void gomp_work_share_end_nowait (void);
620 static inline void
621 gomp_work_share_init_done (void)
623 struct gomp_thread *thr = gomp_thread ();
624 if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
625 gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
628 #ifdef HAVE_ATTRIBUTE_VISIBILITY
629 # pragma GCC visibility pop
630 #endif
632 /* Now that we're back to default visibility, include the globals. */
633 #include "libgomp_g.h"
635 /* Include omp.h by parts. */
636 #include "omp-lock.h"
637 #define _LIBGOMP_OMP_LOCK_DEFINED 1
638 #include "omp.h.in"
640 #if !defined (HAVE_ATTRIBUTE_VISIBILITY) \
641 || !defined (HAVE_ATTRIBUTE_ALIAS) \
642 || !defined (HAVE_AS_SYMVER_DIRECTIVE) \
643 || !defined (PIC) \
644 || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT)
645 # undef LIBGOMP_GNU_SYMBOL_VERSIONING
646 #endif
648 #ifdef LIBGOMP_GNU_SYMBOL_VERSIONING
649 extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
650 extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
651 extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
652 extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
653 extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
654 extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
655 extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
656 extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
657 extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
658 extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
660 extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
661 extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
662 extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
663 extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
664 extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
665 extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
666 extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
667 extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
668 extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
669 extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
671 # define strong_alias(fn, al) \
672 extern __typeof (fn) al __attribute__ ((alias (#fn)));
673 # define omp_lock_symver(fn) \
674 __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \
675 __asm (".symver g" #fn "_25, " #fn "@OMP_1.0");
676 #else
677 # define gomp_init_lock_30 omp_init_lock
678 # define gomp_destroy_lock_30 omp_destroy_lock
679 # define gomp_set_lock_30 omp_set_lock
680 # define gomp_unset_lock_30 omp_unset_lock
681 # define gomp_test_lock_30 omp_test_lock
682 # define gomp_init_nest_lock_30 omp_init_nest_lock
683 # define gomp_destroy_nest_lock_30 omp_destroy_nest_lock
684 # define gomp_set_nest_lock_30 omp_set_nest_lock
685 # define gomp_unset_nest_lock_30 omp_unset_nest_lock
686 # define gomp_test_nest_lock_30 omp_test_nest_lock
687 #endif
689 #ifdef HAVE_ATTRIBUTE_VISIBILITY
690 # define attribute_hidden __attribute__ ((visibility ("hidden")))
691 #else
692 # define attribute_hidden
693 #endif
695 #ifdef HAVE_ATTRIBUTE_ALIAS
696 # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__)
697 # define ialias_str1(x) ialias_str2(x)
698 # define ialias_str2(x) #x
699 # define ialias(fn) \
700 extern __typeof (fn) gomp_ialias_##fn \
701 __attribute__ ((alias (#fn))) attribute_hidden;
702 # define ialias_redirect(fn) \
703 extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden;
704 # define ialias_call(fn) gomp_ialias_ ## fn
705 #else
706 # define ialias(fn)
707 # define ialias_redirect(fn)
708 # define ialias_call(fn) fn
709 #endif
711 #endif /* LIBGOMP_H */