1 /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains routines to manage the work-share queue for a team
35 /* Allocate a new work share structure, preferably from current team's
36 free gomp_work_share cache. */
38 static struct gomp_work_share
*
39 alloc_work_share (struct gomp_team
*team
)
41 struct gomp_work_share
*ws
;
44 /* This is called in a critical section. */
45 if (team
->work_share_list_alloc
!= NULL
)
47 ws
= team
->work_share_list_alloc
;
48 team
->work_share_list_alloc
= ws
->next_free
;
52 #ifdef HAVE_SYNC_BUILTINS
53 ws
= team
->work_share_list_free
;
54 /* We need atomic read from work_share_list_free,
55 as free_work_share can be called concurrently. */
56 __asm ("" : "+r" (ws
));
58 if (ws
&& ws
->next_free
)
60 struct gomp_work_share
*next
= ws
->next_free
;
62 team
->work_share_list_alloc
= next
->next_free
;
66 gomp_mutex_lock (&team
->work_share_list_free_lock
);
67 ws
= team
->work_share_list_free
;
70 team
->work_share_list_alloc
= ws
->next_free
;
71 team
->work_share_list_free
= NULL
;
72 gomp_mutex_unlock (&team
->work_share_list_free_lock
);
75 gomp_mutex_unlock (&team
->work_share_list_free_lock
);
78 team
->work_share_chunk
*= 2;
79 ws
= gomp_malloc (team
->work_share_chunk
* sizeof (struct gomp_work_share
));
80 ws
->next_alloc
= team
->work_shares
[0].next_alloc
;
81 team
->work_shares
[0].next_alloc
= ws
;
82 team
->work_share_list_alloc
= &ws
[1];
83 for (i
= 1; i
< team
->work_share_chunk
- 1; i
++)
84 ws
[i
].next_free
= &ws
[i
+ 1];
85 ws
[i
].next_free
= NULL
;
89 /* Initialize an already allocated struct gomp_work_share.
90 This shouldn't touch the next_alloc field. */
93 gomp_init_work_share (struct gomp_work_share
*ws
, bool ordered
,
96 gomp_mutex_init (&ws
->lock
);
97 if (__builtin_expect (ordered
, 0))
99 #define INLINE_ORDERED_TEAM_IDS_CNT \
100 ((sizeof (struct gomp_work_share) \
101 - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \
102 / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0]))
104 if (nthreads
> INLINE_ORDERED_TEAM_IDS_CNT
)
106 = gomp_malloc (nthreads
* sizeof (*ws
->ordered_team_ids
));
108 ws
->ordered_team_ids
= ws
->inline_ordered_team_ids
;
109 memset (ws
->ordered_team_ids
, '\0',
110 nthreads
* sizeof (*ws
->ordered_team_ids
));
111 ws
->ordered_num_used
= 0;
112 ws
->ordered_owner
= -1;
116 ws
->ordered_team_ids
= NULL
;
117 gomp_ptrlock_init (&ws
->next_ws
, NULL
);
118 ws
->threads_completed
= 0;
121 /* Do any needed destruction of gomp_work_share fields before it
122 is put back into free gomp_work_share cache or freed. */
125 gomp_fini_work_share (struct gomp_work_share
*ws
)
127 gomp_mutex_destroy (&ws
->lock
);
128 if (ws
->ordered_team_ids
!= ws
->inline_ordered_team_ids
)
129 free (ws
->ordered_team_ids
);
130 gomp_ptrlock_destroy (&ws
->next_ws
);
133 /* Free a work share struct, if not orphaned, put it into current
134 team's free gomp_work_share cache. */
137 free_work_share (struct gomp_team
*team
, struct gomp_work_share
*ws
)
139 gomp_fini_work_share (ws
);
140 if (__builtin_expect (team
== NULL
, 0))
144 struct gomp_work_share
*next_ws
;
145 #ifdef HAVE_SYNC_BUILTINS
148 next_ws
= team
->work_share_list_free
;
149 ws
->next_free
= next_ws
;
151 while (!__sync_bool_compare_and_swap (&team
->work_share_list_free
,
154 gomp_mutex_lock (&team
->work_share_list_free_lock
);
155 next_ws
= team
->work_share_list_free
;
156 ws
->next_free
= next_ws
;
157 team
->work_share_list_free
= ws
;
158 gomp_mutex_unlock (&team
->work_share_list_free_lock
);
163 /* The current thread is ready to begin the next work sharing construct.
164 In all cases, thr->ts.work_share is updated to point to the new
165 structure. In all cases the work_share lock is locked. Return true
166 if this was the first thread to reach this point. */
169 gomp_work_share_start (bool ordered
)
171 struct gomp_thread
*thr
= gomp_thread ();
172 struct gomp_team
*team
= thr
->ts
.team
;
173 struct gomp_work_share
*ws
;
175 /* Work sharing constructs can be orphaned. */
178 ws
= gomp_malloc (sizeof (*ws
));
179 gomp_init_work_share (ws
, ordered
, 1);
180 thr
->ts
.work_share
= ws
;
184 ws
= thr
->ts
.work_share
;
185 thr
->ts
.last_work_share
= ws
;
186 ws
= gomp_ptrlock_get (&ws
->next_ws
);
189 /* This thread encountered a new ws first. */
190 struct gomp_work_share
*ws
= alloc_work_share (team
);
191 gomp_init_work_share (ws
, ordered
, team
->nthreads
);
192 thr
->ts
.work_share
= ws
;
197 thr
->ts
.work_share
= ws
;
202 /* The current thread is done with its current work sharing construct.
203 This version does imply a barrier at the end of the work-share. */
206 gomp_work_share_end (void)
208 struct gomp_thread
*thr
= gomp_thread ();
209 struct gomp_team
*team
= thr
->ts
.team
;
210 gomp_barrier_state_t bstate
;
212 /* Work sharing constructs can be orphaned. */
215 free_work_share (NULL
, thr
->ts
.work_share
);
216 thr
->ts
.work_share
= NULL
;
220 bstate
= gomp_barrier_wait_start (&team
->barrier
);
222 if (gomp_barrier_last_thread (bstate
))
224 if (__builtin_expect (thr
->ts
.last_work_share
!= NULL
, 1))
226 team
->work_shares_to_free
= thr
->ts
.work_share
;
227 free_work_share (team
, thr
->ts
.last_work_share
);
231 gomp_team_barrier_wait_end (&team
->barrier
, bstate
);
232 thr
->ts
.last_work_share
= NULL
;
235 /* The current thread is done with its current work sharing construct.
236 This version implies a cancellable barrier at the end of the work-share. */
239 gomp_work_share_end_cancel (void)
241 struct gomp_thread
*thr
= gomp_thread ();
242 struct gomp_team
*team
= thr
->ts
.team
;
243 gomp_barrier_state_t bstate
;
245 /* Cancellable work sharing constructs cannot be orphaned. */
246 bstate
= gomp_barrier_wait_cancel_start (&team
->barrier
);
248 if (gomp_barrier_last_thread (bstate
))
250 if (__builtin_expect (thr
->ts
.last_work_share
!= NULL
, 1))
252 team
->work_shares_to_free
= thr
->ts
.work_share
;
253 free_work_share (team
, thr
->ts
.last_work_share
);
256 thr
->ts
.last_work_share
= NULL
;
258 return gomp_team_barrier_wait_cancel_end (&team
->barrier
, bstate
);
261 /* The current thread is done with its current work sharing construct.
262 This version does NOT imply a barrier at the end of the work-share. */
265 gomp_work_share_end_nowait (void)
267 struct gomp_thread
*thr
= gomp_thread ();
268 struct gomp_team
*team
= thr
->ts
.team
;
269 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
272 /* Work sharing constructs can be orphaned. */
275 free_work_share (NULL
, ws
);
276 thr
->ts
.work_share
= NULL
;
280 if (__builtin_expect (thr
->ts
.last_work_share
== NULL
, 0))
283 #ifdef HAVE_SYNC_BUILTINS
284 completed
= __sync_add_and_fetch (&ws
->threads_completed
, 1);
286 gomp_mutex_lock (&ws
->lock
);
287 completed
= ++ws
->threads_completed
;
288 gomp_mutex_unlock (&ws
->lock
);
291 if (completed
== team
->nthreads
)
293 team
->work_shares_to_free
= thr
->ts
.work_share
;
294 free_work_share (team
, thr
->ts
.last_work_share
);
296 thr
->ts
.last_work_share
= NULL
;