1 /* Copyright (C) 2005-2013 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This is a Linux specific implementation of a barrier synchronization
26 mechanism for libgomp. This type is private to the library. This
27 implementation uses atomic instructions and the futex syscall. */
29 #ifndef GOMP_BARRIER_H
30 #define GOMP_BARRIER_H 1
36 /* Make sure total/generation is in a mostly read cacheline, while
37 awaited in a separate cacheline. */
38 unsigned total
__attribute__((aligned (64)));
40 unsigned awaited
__attribute__((aligned (64)));
41 unsigned awaited_final
;
44 typedef unsigned int gomp_barrier_state_t
;
46 /* The generation field contains a counter in the high bits, with a few
47 low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can
48 share space because WAS_LAST is never stored back to generation. */
49 #define BAR_TASK_PENDING 1
50 #define BAR_WAS_LAST 1
51 #define BAR_WAITING_FOR_TASK 2
52 #define BAR_CANCELLED 4
55 static inline void gomp_barrier_init (gomp_barrier_t
*bar
, unsigned count
)
59 bar
->awaited_final
= count
;
63 static inline void gomp_barrier_reinit (gomp_barrier_t
*bar
, unsigned count
)
65 __atomic_add_fetch (&bar
->awaited
, count
- bar
->total
, MEMMODEL_ACQ_REL
);
69 static inline void gomp_barrier_destroy (gomp_barrier_t
*bar
)
73 extern void gomp_barrier_wait (gomp_barrier_t
*);
74 extern void gomp_barrier_wait_last (gomp_barrier_t
*);
75 extern void gomp_barrier_wait_end (gomp_barrier_t
*, gomp_barrier_state_t
);
76 extern void gomp_team_barrier_wait (gomp_barrier_t
*);
77 extern void gomp_team_barrier_wait_final (gomp_barrier_t
*);
78 extern void gomp_team_barrier_wait_end (gomp_barrier_t
*,
79 gomp_barrier_state_t
);
80 extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t
*);
81 extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t
*,
82 gomp_barrier_state_t
);
83 extern void gomp_team_barrier_wake (gomp_barrier_t
*, int);
85 extern void gomp_team_barrier_cancel (struct gomp_team
*);
87 static inline gomp_barrier_state_t
88 gomp_barrier_wait_start (gomp_barrier_t
*bar
)
90 unsigned int ret
= __atomic_load_n (&bar
->generation
, MEMMODEL_ACQUIRE
);
91 ret
&= -BAR_INCR
| BAR_CANCELLED
;
92 /* A memory barrier is needed before exiting from the various forms
93 of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section
94 2.8.6 flush Construct, which says there is an implicit flush during
95 a barrier region. This is a convenient place to add the barrier,
96 so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE. */
97 if (__atomic_add_fetch (&bar
->awaited
, -1, MEMMODEL_ACQ_REL
) == 0)
102 static inline gomp_barrier_state_t
103 gomp_barrier_wait_cancel_start (gomp_barrier_t
*bar
)
105 return gomp_barrier_wait_start (bar
);
108 /* This is like gomp_barrier_wait_start, except it decrements
109 bar->awaited_final rather than bar->awaited and should be used
110 for the gomp_team_end barrier only. */
111 static inline gomp_barrier_state_t
112 gomp_barrier_wait_final_start (gomp_barrier_t
*bar
)
114 unsigned int ret
= __atomic_load_n (&bar
->generation
, MEMMODEL_ACQUIRE
);
115 ret
&= -BAR_INCR
| BAR_CANCELLED
;
116 /* See above gomp_barrier_wait_start comment. */
117 if (__atomic_add_fetch (&bar
->awaited_final
, -1, MEMMODEL_ACQ_REL
) == 0)
123 gomp_barrier_last_thread (gomp_barrier_state_t state
)
125 return state
& BAR_WAS_LAST
;
128 /* All the inlines below must be called with team->task_lock
132 gomp_team_barrier_set_task_pending (gomp_barrier_t
*bar
)
134 bar
->generation
|= BAR_TASK_PENDING
;
138 gomp_team_barrier_clear_task_pending (gomp_barrier_t
*bar
)
140 bar
->generation
&= ~BAR_TASK_PENDING
;
144 gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t
*bar
)
146 bar
->generation
|= BAR_WAITING_FOR_TASK
;
150 gomp_team_barrier_waiting_for_tasks (gomp_barrier_t
*bar
)
152 return (bar
->generation
& BAR_WAITING_FOR_TASK
) != 0;
156 gomp_team_barrier_cancelled (gomp_barrier_t
*bar
)
158 return __builtin_expect ((bar
->generation
& BAR_CANCELLED
) != 0, 0);
162 gomp_team_barrier_done (gomp_barrier_t
*bar
, gomp_barrier_state_t state
)
164 bar
->generation
= (state
& -BAR_INCR
) + BAR_INCR
;
167 #endif /* GOMP_BARRIER_H */