tr_TR locale: Base collation on iso14651_t1 [BZ #22527]
[glibc.git] / sysdeps / x86 / elide.h
blob53de41836ef5ba98450a27c343b6687c82b107ce
1 /* elide.h: Generic lock elision support.
2 Copyright (C) 2014-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18 #ifndef ELIDE_H
19 #define ELIDE_H 1
21 #include <hle.h>
22 #include <elision-conf.h>
23 #include <atomic.h>
26 /* Adapt elision with ADAPT_COUNT and STATUS and decide retries. */
28 static inline bool
29 elision_adapt(signed char *adapt_count, unsigned int status)
31 if (status & _XABORT_RETRY)
32 return false;
33 if ((status & _XABORT_EXPLICIT)
34 && _XABORT_CODE (status) == _ABORT_LOCK_BUSY)
36 /* Right now we skip here. Better would be to wait a bit
37 and retry. This likely needs some spinning. Be careful
38 to avoid writing the lock.
39 Using relaxed MO and separate atomic accesses is sufficient because
40 adapt_count is just a hint. */
41 if (atomic_load_relaxed (adapt_count) != __elision_aconf.skip_lock_busy)
42 atomic_store_relaxed (adapt_count, __elision_aconf.skip_lock_busy);
44 /* Internal abort. There is no chance for retry.
45 Use the normal locking and next time use lock.
46 Be careful to avoid writing to the lock. See above for MO. */
47 else if (atomic_load_relaxed (adapt_count)
48 != __elision_aconf.skip_lock_internal_abort)
49 atomic_store_relaxed (adapt_count,
50 __elision_aconf.skip_lock_internal_abort);
51 return true;
54 /* is_lock_free must be executed inside the transaction */
56 /* Returns true if lock defined by IS_LOCK_FREE was elided.
57 ADAPT_COUNT is a per-lock state variable; it must be accessed atomically
58 to avoid data races but is just a hint, so using relaxed MO and separate
59 atomic loads and stores instead of atomic read-modify-write operations is
60 sufficient. */
62 #define ELIDE_LOCK(adapt_count, is_lock_free) \
63 ({ \
64 int ret = 0; \
66 if (atomic_load_relaxed (&(adapt_count)) <= 0) \
67 { \
68 for (int i = __elision_aconf.retry_try_xbegin; i > 0; i--) \
69 { \
70 unsigned int status; \
71 if ((status = _xbegin ()) == _XBEGIN_STARTED) \
72 { \
73 if (is_lock_free) \
74 { \
75 ret = 1; \
76 break; \
77 } \
78 _xabort (_ABORT_LOCK_BUSY); \
79 } \
80 if (!elision_adapt (&(adapt_count), status)) \
81 break; \
82 } \
83 } \
84 else \
85 atomic_store_relaxed (&(adapt_count), \
86 atomic_load_relaxed (&(adapt_count)) - 1); \
87 ret; \
90 /* Returns true if lock defined by IS_LOCK_FREE was try-elided.
91 ADAPT_COUNT is a per-lock state variable. */
93 #define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) ({ \
94 int ret = 0; \
95 if (__elision_aconf.retry_try_xbegin > 0) \
96 { \
97 if (write) \
98 _xabort (_ABORT_NESTED_TRYLOCK); \
99 ret = ELIDE_LOCK (adapt_count, is_lock_free); \
101 ret; \
104 /* Returns true if lock defined by IS_LOCK_FREE was elided. The call
105 to _xend crashes if the application incorrectly tries to unlock a
106 lock which has not been locked. */
108 #define ELIDE_UNLOCK(is_lock_free) \
109 ({ \
110 int ret = 0; \
111 if (is_lock_free) \
113 _xend (); \
114 ret = 1; \
116 ret; \
119 #endif