gcc/cp/
[official-gcc.git] / libgomp / critical.c
blob2b1f7f25013113ab6b5df065f3108e0a912dfa9c
1 /* Copyright (C) 2005-2016 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the CRITICAL construct. */
28 #include "libgomp.h"
29 #include <stdlib.h>
32 static gomp_mutex_t default_lock;
34 void
35 GOMP_critical_start (void)
37 /* There is an implicit flush on entry to a critical region. */
38 __atomic_thread_fence (MEMMODEL_RELEASE);
39 gomp_mutex_lock (&default_lock);
42 void
43 GOMP_critical_end (void)
45 gomp_mutex_unlock (&default_lock);
48 #ifndef HAVE_SYNC_BUILTINS
49 static gomp_mutex_t create_lock_lock;
50 #endif
52 void
53 GOMP_critical_name_start (void **pptr)
55 gomp_mutex_t *plock;
57 /* If a mutex fits within the space for a pointer, and is zero initialized,
58 then use the pointer space directly. */
59 if (GOMP_MUTEX_INIT_0
60 && sizeof (gomp_mutex_t) <= sizeof (void *)
61 && __alignof (gomp_mutex_t) <= sizeof (void *))
62 plock = (gomp_mutex_t *)pptr;
64 /* Otherwise we have to be prepared to malloc storage. */
65 else
67 plock = *pptr;
69 if (plock == NULL)
71 #ifdef HAVE_SYNC_BUILTINS
72 gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
73 gomp_mutex_init (nlock);
75 plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
76 if (plock != NULL)
78 gomp_mutex_destroy (nlock);
79 free (nlock);
81 else
82 plock = nlock;
83 #else
84 gomp_mutex_lock (&create_lock_lock);
85 plock = *pptr;
86 if (plock == NULL)
88 plock = gomp_malloc (sizeof (gomp_mutex_t));
89 gomp_mutex_init (plock);
90 __sync_synchronize ();
91 *pptr = plock;
93 gomp_mutex_unlock (&create_lock_lock);
94 #endif
98 gomp_mutex_lock (plock);
101 void
102 GOMP_critical_name_end (void **pptr)
104 gomp_mutex_t *plock;
106 /* If a mutex fits within the space for a pointer, and is zero initialized,
107 then use the pointer space directly. */
108 if (GOMP_MUTEX_INIT_0
109 && sizeof (gomp_mutex_t) <= sizeof (void *)
110 && __alignof (gomp_mutex_t) <= sizeof (void *))
111 plock = (gomp_mutex_t *)pptr;
112 else
113 plock = *pptr;
115 gomp_mutex_unlock (plock);
118 /* This mutex is used when atomic operations don't exist for the target
119 in the mode requested. The result is not globally atomic, but works so
120 long as all parallel references are within #pragma omp atomic directives.
121 According to responses received from omp@openmp.org, appears to be within
122 spec. Which makes sense, since that's how several other compilers
123 handle this situation as well. */
125 static gomp_mutex_t atomic_lock;
127 void
128 GOMP_atomic_start (void)
130 gomp_mutex_lock (&atomic_lock);
133 void
134 GOMP_atomic_end (void)
136 gomp_mutex_unlock (&atomic_lock);
139 #if !GOMP_MUTEX_INIT_0
140 static void __attribute__((constructor))
141 initialize_critical (void)
143 gomp_mutex_init (&default_lock);
144 gomp_mutex_init (&atomic_lock);
145 #ifndef HAVE_SYNC_BUILTINS
146 gomp_mutex_init (&create_lock_lock);
147 #endif
149 #endif