1 /* elide.h: Generic lock elision support.
2 Copyright (C) 2014-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
22 #include <elision-conf.h>
26 /* Adapt elision with ADAPT_COUNT and STATUS and decide retries. */
29 elision_adapt(signed char *adapt_count
, unsigned int status
)
31 if (status
& _XABORT_RETRY
)
33 if ((status
& _XABORT_EXPLICIT
)
34 && _XABORT_CODE (status
) == _ABORT_LOCK_BUSY
)
36 /* Right now we skip here. Better would be to wait a bit
37 and retry. This likely needs some spinning. Be careful
38 to avoid writing the lock.
39 Using relaxed MO and separate atomic accesses is sufficient because
40 adapt_count is just a hint. */
41 if (atomic_load_relaxed (adapt_count
) != __elision_aconf
.skip_lock_busy
)
42 atomic_store_relaxed (adapt_count
, __elision_aconf
.skip_lock_busy
);
44 /* Internal abort. There is no chance for retry.
45 Use the normal locking and next time use lock.
46 Be careful to avoid writing to the lock. See above for MO. */
47 else if (atomic_load_relaxed (adapt_count
)
48 != __elision_aconf
.skip_lock_internal_abort
)
49 atomic_store_relaxed (adapt_count
,
50 __elision_aconf
.skip_lock_internal_abort
);
54 /* is_lock_free must be executed inside the transaction */
56 /* Returns true if lock defined by IS_LOCK_FREE was elided.
57 ADAPT_COUNT is a per-lock state variable; it must be accessed atomically
58 to avoid data races but is just a hint, so using relaxed MO and separate
59 atomic loads and stores instead of atomic read-modify-write operations is
62 #define ELIDE_LOCK(adapt_count, is_lock_free) \
66 if (atomic_load_relaxed (&(adapt_count)) <= 0) \
68 for (int i = __elision_aconf.retry_try_xbegin; i > 0; i--) \
70 unsigned int status; \
71 if ((status = _xbegin ()) == _XBEGIN_STARTED) \
78 _xabort (_ABORT_LOCK_BUSY); \
80 if (!elision_adapt (&(adapt_count), status)) \
85 atomic_store_relaxed (&(adapt_count), \
86 atomic_load_relaxed (&(adapt_count)) - 1); \
90 /* Returns true if lock defined by IS_LOCK_FREE was try-elided.
91 ADAPT_COUNT is a per-lock state variable. */
93 #define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) ({ \
95 if (__elision_aconf.retry_try_xbegin > 0) \
98 _xabort (_ABORT_NESTED_TRYLOCK); \
99 ret = ELIDE_LOCK (adapt_count, is_lock_free); \
104 /* Returns true if lock defined by IS_LOCK_FREE was elided. The call
105 to _xend crashes if the application incorrectly tries to unlock a
106 lock which has not been locked. */
108 #define ELIDE_UNLOCK(is_lock_free) \