Add execution + assembler tests of AArch64 TRN Intrinsics.
[official-gcc.git] / libgomp / critical.c
blob7051441aea0549ce1ddf3510f842fc8f24bc4a95
1 /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This file handles the CRITICAL construct. */
27 #include "libgomp.h"
28 #include <stdlib.h>
31 static gomp_mutex_t default_lock;
33 void
34 GOMP_critical_start (void)
36 /* There is an implicit flush on entry to a critical region. */
37 __atomic_thread_fence (MEMMODEL_RELEASE);
38 gomp_mutex_lock (&default_lock);
41 void
42 GOMP_critical_end (void)
44 gomp_mutex_unlock (&default_lock);
47 #ifndef HAVE_SYNC_BUILTINS
48 static gomp_mutex_t create_lock_lock;
49 #endif
51 void
52 GOMP_critical_name_start (void **pptr)
54 gomp_mutex_t *plock;
56 /* If a mutex fits within the space for a pointer, and is zero initialized,
57 then use the pointer space directly. */
58 if (GOMP_MUTEX_INIT_0
59 && sizeof (gomp_mutex_t) <= sizeof (void *)
60 && __alignof (gomp_mutex_t) <= sizeof (void *))
61 plock = (gomp_mutex_t *)pptr;
63 /* Otherwise we have to be prepared to malloc storage. */
64 else
66 plock = *pptr;
68 if (plock == NULL)
70 #ifdef HAVE_SYNC_BUILTINS
71 gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
72 gomp_mutex_init (nlock);
74 plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
75 if (plock != NULL)
77 gomp_mutex_destroy (nlock);
78 free (nlock);
80 else
81 plock = nlock;
82 #else
83 gomp_mutex_lock (&create_lock_lock);
84 plock = *pptr;
85 if (plock == NULL)
87 plock = gomp_malloc (sizeof (gomp_mutex_t));
88 gomp_mutex_init (plock);
89 __sync_synchronize ();
90 *pptr = plock;
92 gomp_mutex_unlock (&create_lock_lock);
93 #endif
97 gomp_mutex_lock (plock);
100 void
101 GOMP_critical_name_end (void **pptr)
103 gomp_mutex_t *plock;
105 /* If a mutex fits within the space for a pointer, and is zero initialized,
106 then use the pointer space directly. */
107 if (GOMP_MUTEX_INIT_0
108 && sizeof (gomp_mutex_t) <= sizeof (void *)
109 && __alignof (gomp_mutex_t) <= sizeof (void *))
110 plock = (gomp_mutex_t *)pptr;
111 else
112 plock = *pptr;
114 gomp_mutex_unlock (plock);
117 /* This mutex is used when atomic operations don't exist for the target
118 in the mode requested. The result is not globally atomic, but works so
119 long as all parallel references are within #pragma omp atomic directives.
120 According to responses received from omp@openmp.org, appears to be within
121 spec. Which makes sense, since that's how several other compilers
122 handle this situation as well. */
124 static gomp_mutex_t atomic_lock;
126 void
127 GOMP_atomic_start (void)
129 gomp_mutex_lock (&atomic_lock);
132 void
133 GOMP_atomic_end (void)
135 gomp_mutex_unlock (&atomic_lock);
138 #if !GOMP_MUTEX_INIT_0
139 static void __attribute__((constructor))
140 initialize_critical (void)
142 gomp_mutex_init (&default_lock);
143 gomp_mutex_init (&atomic_lock);
144 #ifndef HAVE_SYNC_BUILTINS
145 gomp_mutex_init (&create_lock_lock);
146 #endif
148 #endif