1 /* Copyright (C) 2005-2021 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains routines to manage the work-share queue for a team
35 /* Allocate a new work share structure, preferably from current team's
36 free gomp_work_share cache. */
38 static struct gomp_work_share
*
39 alloc_work_share (struct gomp_team
*team
)
41 struct gomp_work_share
*ws
;
44 /* This is called in a critical section. */
45 if (team
->work_share_list_alloc
!= NULL
)
47 ws
= team
->work_share_list_alloc
;
48 team
->work_share_list_alloc
= ws
->next_free
;
52 #ifdef HAVE_SYNC_BUILTINS
53 ws
= team
->work_share_list_free
;
54 /* We need atomic read from work_share_list_free,
55 as free_work_share can be called concurrently. */
56 __asm ("" : "+r" (ws
));
58 if (ws
&& ws
->next_free
)
60 struct gomp_work_share
*next
= ws
->next_free
;
62 team
->work_share_list_alloc
= next
->next_free
;
66 gomp_mutex_lock (&team
->work_share_list_free_lock
);
67 ws
= team
->work_share_list_free
;
70 team
->work_share_list_alloc
= ws
->next_free
;
71 team
->work_share_list_free
= NULL
;
72 gomp_mutex_unlock (&team
->work_share_list_free_lock
);
75 gomp_mutex_unlock (&team
->work_share_list_free_lock
);
78 team
->work_share_chunk
*= 2;
79 /* Allocating gomp_work_share structures aligned is just an
80 optimization, don't do it when using the fallback method. */
81 #ifdef GOMP_USE_ALIGNED_WORK_SHARES
82 ws
= gomp_aligned_alloc (__alignof (struct gomp_work_share
),
83 team
->work_share_chunk
84 * sizeof (struct gomp_work_share
));
86 ws
= gomp_malloc (team
->work_share_chunk
* sizeof (struct gomp_work_share
));
88 ws
->next_alloc
= team
->work_shares
[0].next_alloc
;
89 team
->work_shares
[0].next_alloc
= ws
;
90 team
->work_share_list_alloc
= &ws
[1];
91 for (i
= 1; i
< team
->work_share_chunk
- 1; i
++)
92 ws
[i
].next_free
= &ws
[i
+ 1];
93 ws
[i
].next_free
= NULL
;
97 /* Initialize an already allocated struct gomp_work_share.
98 This shouldn't touch the next_alloc field. */
101 gomp_init_work_share (struct gomp_work_share
*ws
, size_t ordered
,
104 gomp_mutex_init (&ws
->lock
);
105 if (__builtin_expect (ordered
, 0))
107 #define INLINE_ORDERED_TEAM_IDS_SIZE \
108 (sizeof (struct gomp_work_share) \
109 - offsetof (struct gomp_work_share, inline_ordered_team_ids))
111 if (__builtin_expect (ordered
!= 1, 0))
113 size_t o
= nthreads
* sizeof (*ws
->ordered_team_ids
);
114 o
+= __alignof__ (long long) - 1;
115 if ((offsetof (struct gomp_work_share
, inline_ordered_team_ids
)
116 & (__alignof__ (long long) - 1)) == 0)
117 o
&= ~(__alignof__ (long long) - 1);
121 ordered
= nthreads
* sizeof (*ws
->ordered_team_ids
);
122 if (ordered
> INLINE_ORDERED_TEAM_IDS_SIZE
)
123 ws
->ordered_team_ids
= team_malloc (ordered
);
125 ws
->ordered_team_ids
= ws
->inline_ordered_team_ids
;
126 memset (ws
->ordered_team_ids
, '\0', ordered
);
127 ws
->ordered_num_used
= 0;
128 ws
->ordered_owner
= -1;
132 ws
->ordered_team_ids
= ws
->inline_ordered_team_ids
;
133 gomp_ptrlock_init (&ws
->next_ws
, NULL
);
134 ws
->threads_completed
= 0;
137 /* Do any needed destruction of gomp_work_share fields before it
138 is put back into free gomp_work_share cache or freed. */
141 gomp_fini_work_share (struct gomp_work_share
*ws
)
143 gomp_mutex_destroy (&ws
->lock
);
144 if (ws
->ordered_team_ids
!= ws
->inline_ordered_team_ids
)
145 team_free (ws
->ordered_team_ids
);
146 gomp_ptrlock_destroy (&ws
->next_ws
);
149 /* Free a work share struct, if not orphaned, put it into current
150 team's free gomp_work_share cache. */
153 free_work_share (struct gomp_team
*team
, struct gomp_work_share
*ws
)
155 gomp_fini_work_share (ws
);
156 if (__builtin_expect (team
== NULL
, 0))
160 struct gomp_work_share
*next_ws
;
161 #ifdef HAVE_SYNC_BUILTINS
164 next_ws
= team
->work_share_list_free
;
165 ws
->next_free
= next_ws
;
167 while (!__sync_bool_compare_and_swap (&team
->work_share_list_free
,
170 gomp_mutex_lock (&team
->work_share_list_free_lock
);
171 next_ws
= team
->work_share_list_free
;
172 ws
->next_free
= next_ws
;
173 team
->work_share_list_free
= ws
;
174 gomp_mutex_unlock (&team
->work_share_list_free_lock
);
179 /* The current thread is ready to begin the next work sharing construct.
180 In all cases, thr->ts.work_share is updated to point to the new
181 structure. In all cases the work_share lock is locked. Return true
182 if this was the first thread to reach this point. */
185 gomp_work_share_start (size_t ordered
)
187 struct gomp_thread
*thr
= gomp_thread ();
188 struct gomp_team
*team
= thr
->ts
.team
;
189 struct gomp_work_share
*ws
;
191 /* Work sharing constructs can be orphaned. */
194 #ifdef GOMP_USE_ALIGNED_WORK_SHARES
195 ws
= gomp_aligned_alloc (__alignof (struct gomp_work_share
),
198 ws
= gomp_malloc (sizeof (*ws
));
200 gomp_init_work_share (ws
, ordered
, 1);
201 thr
->ts
.work_share
= ws
;
205 ws
= thr
->ts
.work_share
;
206 thr
->ts
.last_work_share
= ws
;
207 ws
= gomp_ptrlock_get (&ws
->next_ws
);
210 /* This thread encountered a new ws first. */
211 struct gomp_work_share
*ws
= alloc_work_share (team
);
212 gomp_init_work_share (ws
, ordered
, team
->nthreads
);
213 thr
->ts
.work_share
= ws
;
218 thr
->ts
.work_share
= ws
;
223 /* The current thread is done with its current work sharing construct.
224 This version does imply a barrier at the end of the work-share. */
227 gomp_work_share_end (void)
229 struct gomp_thread
*thr
= gomp_thread ();
230 struct gomp_team
*team
= thr
->ts
.team
;
231 gomp_barrier_state_t bstate
;
233 /* Work sharing constructs can be orphaned. */
236 free_work_share (NULL
, thr
->ts
.work_share
);
237 thr
->ts
.work_share
= NULL
;
241 bstate
= gomp_barrier_wait_start (&team
->barrier
);
243 if (gomp_barrier_last_thread (bstate
))
245 if (__builtin_expect (thr
->ts
.last_work_share
!= NULL
, 1))
247 team
->work_shares_to_free
= thr
->ts
.work_share
;
248 free_work_share (team
, thr
->ts
.last_work_share
);
252 gomp_team_barrier_wait_end (&team
->barrier
, bstate
);
253 thr
->ts
.last_work_share
= NULL
;
256 /* The current thread is done with its current work sharing construct.
257 This version implies a cancellable barrier at the end of the work-share. */
260 gomp_work_share_end_cancel (void)
262 struct gomp_thread
*thr
= gomp_thread ();
263 struct gomp_team
*team
= thr
->ts
.team
;
264 gomp_barrier_state_t bstate
;
266 /* Cancellable work sharing constructs cannot be orphaned. */
267 bstate
= gomp_barrier_wait_cancel_start (&team
->barrier
);
269 if (gomp_barrier_last_thread (bstate
))
271 if (__builtin_expect (thr
->ts
.last_work_share
!= NULL
, 1))
273 team
->work_shares_to_free
= thr
->ts
.work_share
;
274 free_work_share (team
, thr
->ts
.last_work_share
);
277 thr
->ts
.last_work_share
= NULL
;
279 return gomp_team_barrier_wait_cancel_end (&team
->barrier
, bstate
);
282 /* The current thread is done with its current work sharing construct.
283 This version does NOT imply a barrier at the end of the work-share. */
286 gomp_work_share_end_nowait (void)
288 struct gomp_thread
*thr
= gomp_thread ();
289 struct gomp_team
*team
= thr
->ts
.team
;
290 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
293 /* Work sharing constructs can be orphaned. */
296 free_work_share (NULL
, ws
);
297 thr
->ts
.work_share
= NULL
;
301 if (__builtin_expect (thr
->ts
.last_work_share
== NULL
, 0))
304 #ifdef HAVE_SYNC_BUILTINS
305 completed
= __sync_add_and_fetch (&ws
->threads_completed
, 1);
307 gomp_mutex_lock (&ws
->lock
);
308 completed
= ++ws
->threads_completed
;
309 gomp_mutex_unlock (&ws
->lock
);
312 if (completed
== team
->nthreads
)
314 team
->work_shares_to_free
= thr
->ts
.work_share
;
315 free_work_share (team
, thr
->ts
.last_work_share
);
317 thr
->ts
.last_work_share
= NULL
;