1 // <shared_mutex> -*- C++ -*-
3 // Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file include/shared_mutex
26 * This is a Standard C++ Library header.
29 #ifndef _GLIBCXX_SHARED_MUTEX
30 #define _GLIBCXX_SHARED_MUTEX 1
32 #pragma GCC system_header
34 #if __cplusplus <= 201103L
35 # include <bits/c++14_warning.h>
38 #include <bits/c++config.h>
40 #include <condition_variable>
41 #include <bits/functexcept.h>
43 namespace std _GLIBCXX_VISIBILITY(default)
45 _GLIBCXX_BEGIN_NAMESPACE_VERSION
52 #ifdef _GLIBCXX_USE_C99_STDINT_TR1
53 #ifdef _GLIBCXX_HAS_GTHREADS
55 #define __cpp_lib_shared_timed_mutex 201402
57 /// shared_timed_mutex
58 class shared_timed_mutex
60 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
61 typedef chrono::system_clock __clock_t;
63 #ifdef PTHREAD_RWLOCK_INITIALIZER
64 pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
67 shared_timed_mutex() = default;
68 ~shared_timed_mutex() = default;
70 pthread_rwlock_t _M_rwlock;
75 int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
78 else if (__ret == EAGAIN)
79 __throw_system_error(int(errc::resource_unavailable_try_again));
80 else if (__ret == EPERM)
81 __throw_system_error(int(errc::operation_not_permitted));
82 // Errors not handled: EBUSY, EINVAL
83 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
88 int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
89 // Errors not handled: EBUSY, EINVAL
90 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
94 shared_timed_mutex(const shared_timed_mutex&) = delete;
95 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
97 // Exclusive ownership
102 int __ret = pthread_rwlock_wrlock(&_M_rwlock);
103 if (__ret == EDEADLK)
104 __throw_system_error(int(errc::resource_deadlock_would_occur));
105 // Errors not handled: EINVAL
106 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
112 int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
113 if (__ret == EBUSY) return false;
114 // Errors not handled: EINVAL
115 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
119 template<typename _Rep, typename _Period>
121 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
123 return try_lock_until(__clock_t::now() + __rel_time);
126 template<typename _Duration>
128 try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
130 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
131 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
133 __gthread_time_t __ts =
135 static_cast<std::time_t>(__s.time_since_epoch().count()),
136 static_cast<long>(__ns.count())
139 int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
140 // On self-deadlock, we just fail to acquire the lock. Technically,
141 // the program violated the precondition.
142 if (__ret == ETIMEDOUT || __ret == EDEADLK)
144 // Errors not handled: EINVAL
145 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
149 template<typename _Clock, typename _Duration>
151 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
153 // DR 887 - Sync unknown clock to known clock.
154 const typename _Clock::time_point __c_entry = _Clock::now();
155 const __clock_t::time_point __s_entry = __clock_t::now();
156 const auto __delta = __abs_time - __c_entry;
157 const auto __s_atime = __s_entry + __delta;
158 return try_lock_until(__s_atime);
164 int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
165 // Errors not handled: EPERM, EBUSY, EINVAL
166 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
175 // We retry if we exceeded the maximum number of read locks supported by
176 // the POSIX implementation; this can result in busy-waiting, but this
177 // is okay based on the current specification of forward progress
178 // guarantees by the standard.
180 __ret = pthread_rwlock_rdlock(&_M_rwlock);
181 while (__ret == EAGAIN);
182 if (__ret == EDEADLK)
183 __throw_system_error(int(errc::resource_deadlock_would_occur));
184 // Errors not handled: EINVAL
185 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
191 int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
192 // If the maximum number of read locks has been exceeded, we just fail
193 // to acquire the lock. Unlike for lock(), we are not allowed to throw
195 if (__ret == EBUSY || __ret == EAGAIN) return false;
196 // Errors not handled: EINVAL
197 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
201 template<typename _Rep, typename _Period>
203 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
205 return try_lock_shared_until(__clock_t::now() + __rel_time);
208 template<typename _Duration>
210 try_lock_shared_until(const chrono::time_point<__clock_t,
213 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
214 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
216 __gthread_time_t __ts =
218 static_cast<std::time_t>(__s.time_since_epoch().count()),
219 static_cast<long>(__ns.count())
223 // Unlike for lock(), we are not allowed to throw an exception so if
224 // the maximum number of read locks has been exceeded, or we would
225 // deadlock, we just try to acquire the lock again (and will time out
227 // In cases where we would exceed the maximum number of read locks
228 // throughout the whole time until the timeout, we will fail to
229 // acquire the lock even if it would be logically free; however, this
230 // is allowed by the standard, and we made a "strong effort"
231 // (see C++14 30.4.1.4p26).
232 // For cases where the implementation detects a deadlock we
233 // intentionally block and timeout so that an early return isn't
234 // mistaken for a spurious failure, which might help users realise
235 // there is a deadlock.
237 __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
238 while (__ret == EAGAIN || __ret == EDEADLK);
239 if (__ret == ETIMEDOUT)
241 // Errors not handled: EINVAL
242 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
246 template<typename _Clock, typename _Duration>
248 try_lock_shared_until(const chrono::time_point<_Clock,
249 _Duration>& __abs_time)
251 // DR 887 - Sync unknown clock to known clock.
252 const typename _Clock::time_point __c_entry = _Clock::now();
253 const __clock_t::time_point __s_entry = __clock_t::now();
254 const auto __delta = __abs_time - __c_entry;
255 const auto __s_atime = __s_entry + __delta;
256 return try_lock_shared_until(__s_atime);
265 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
267 // Must use the same clock as condition_variable
268 typedef chrono::system_clock __clock_t;
270 // Based on Howard Hinnant's reference implementation from N2406.
272 // The high bit of _M_state is the write-entered flag which is set to
273 // indicate a writer has taken the lock or is queuing to take the lock.
274 // The remaining bits are the count of reader locks.
276 // To take a reader lock, block on gate1 while the write-entered flag is
277 // set or the maximum number of reader locks is held, then increment the
278 // reader lock count.
279 // To release, decrement the count, then if the write-entered flag is set
280 // and the count is zero then signal gate2 to wake a queued writer,
281 // otherwise if the maximum number of reader locks was held signal gate1
284 // To take a writer lock, block on gate1 while the write-entered flag is
285 // set, then set the write-entered flag to start queueing, then block on
286 // gate2 while the number of reader locks is non-zero.
287 // To release, unset the write-entered flag and signal gate1 to wake all
288 // blocked readers and writers.
290 // This means that when no reader locks are held readers and writers get
291 // equal priority. When one or more reader locks is held a writer gets
292 // priority and no more reader locks can be taken while the writer is
295 // Only locked when accessing _M_state or waiting on condition variables.
297 // Used to block while write-entered is set or reader count at maximum.
298 condition_variable _M_gate1;
299 // Used to block queued writers while reader count is non-zero.
300 condition_variable _M_gate2;
301 // The write-entered flag and reader count.
304 static constexpr unsigned _S_write_entered
305 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
306 static constexpr unsigned _S_max_readers = ~_S_write_entered;
308 // Test whether the write-entered flag is set. _M_mut must be locked.
309 bool _M_write_entered() const { return _M_state & _S_write_entered; }
311 // The number of reader locks currently held. _M_mut must be locked.
312 unsigned _M_readers() const { return _M_state & _S_max_readers; }
315 shared_timed_mutex() : _M_state(0) {}
317 ~shared_timed_mutex()
319 _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
322 shared_timed_mutex(const shared_timed_mutex&) = delete;
323 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
325 // Exclusive ownership
330 unique_lock<mutex> __lk(_M_mut);
331 // Wait until we can set the write-entered flag.
332 _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
333 _M_state |= _S_write_entered;
334 // Then wait until there are no more readers.
335 _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
341 unique_lock<mutex> __lk(_M_mut, try_to_lock);
342 if (__lk.owns_lock() && _M_state == 0)
344 _M_state = _S_write_entered;
350 template<typename _Rep, typename _Period>
352 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
354 return try_lock_until(__clock_t::now() + __rel_time);
357 template<typename _Clock, typename _Duration>
359 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
361 unique_lock<mutex> __lk(_M_mut);
362 if (!_M_gate1.wait_until(__lk, __abs_time,
363 [=]{ return !_M_write_entered(); }))
367 _M_state |= _S_write_entered;
368 if (!_M_gate2.wait_until(__lk, __abs_time,
369 [=]{ return _M_readers() == 0; }))
371 _M_state ^= _S_write_entered;
372 // Wake all threads blocked while the write-entered flag was set.
373 _M_gate1.notify_all();
382 lock_guard<mutex> __lk(_M_mut);
383 _GLIBCXX_DEBUG_ASSERT( _M_write_entered() );
385 // call notify_all() while mutex is held so that another thread can't
386 // lock and unlock the mutex then destroy *this before we make the call.
387 _M_gate1.notify_all();
395 unique_lock<mutex> __lk(_M_mut);
396 _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
403 unique_lock<mutex> __lk(_M_mut, try_to_lock);
404 if (!__lk.owns_lock())
406 if (_M_state < _S_max_readers)
414 template<typename _Rep, typename _Period>
416 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
418 return try_lock_shared_until(__clock_t::now() + __rel_time);
421 template <typename _Clock, typename _Duration>
423 try_lock_shared_until(const chrono::time_point<_Clock,
424 _Duration>& __abs_time)
426 unique_lock<mutex> __lk(_M_mut);
427 if (!_M_gate1.wait_until(__lk, __abs_time,
428 [=]{ return _M_state < _S_max_readers; }))
439 lock_guard<mutex> __lk(_M_mut);
440 _GLIBCXX_DEBUG_ASSERT( _M_readers() > 0 );
441 auto __prev = _M_state--;
442 if (_M_write_entered())
444 // Wake the queued writer if there are no more readers.
445 if (_M_readers() == 0)
446 _M_gate2.notify_one();
447 // No need to notify gate1 because we give priority to the queued
448 // writer, and that writer will eventually notify gate1 after it
449 // clears the write-entered flag.
453 // Wake any thread that was blocked on reader overflow.
454 if (__prev == _S_max_readers)
455 _M_gate1.notify_one();
458 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
460 #endif // _GLIBCXX_HAS_GTHREADS
463 template<typename _Mutex>
467 typedef _Mutex mutex_type;
471 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
474 shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
475 { __m.lock_shared(); }
477 shared_lock(mutex_type& __m, defer_lock_t) noexcept
478 : _M_pm(&__m), _M_owns(false) { }
480 shared_lock(mutex_type& __m, try_to_lock_t)
481 : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
483 shared_lock(mutex_type& __m, adopt_lock_t)
484 : _M_pm(&__m), _M_owns(true) { }
486 template<typename _Clock, typename _Duration>
487 shared_lock(mutex_type& __m,
488 const chrono::time_point<_Clock, _Duration>& __abs_time)
489 : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
491 template<typename _Rep, typename _Period>
492 shared_lock(mutex_type& __m,
493 const chrono::duration<_Rep, _Period>& __rel_time)
494 : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
499 _M_pm->unlock_shared();
502 shared_lock(shared_lock const&) = delete;
503 shared_lock& operator=(shared_lock const&) = delete;
505 shared_lock(shared_lock&& __sl) noexcept : shared_lock()
509 operator=(shared_lock&& __sl) noexcept
511 shared_lock(std::move(__sl)).swap(*this);
519 _M_pm->lock_shared();
527 return _M_owns = _M_pm->try_lock_shared();
530 template<typename _Rep, typename _Period>
532 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
535 return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
538 template<typename _Clock, typename _Duration>
540 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
543 return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
550 __throw_system_error(int(errc::resource_deadlock_would_occur));
551 _M_pm->unlock_shared();
558 swap(shared_lock& __u) noexcept
560 std::swap(_M_pm, __u._M_pm);
561 std::swap(_M_owns, __u._M_owns);
568 return std::exchange(_M_pm, nullptr);
573 bool owns_lock() const noexcept { return _M_owns; }
575 explicit operator bool() const noexcept { return _M_owns; }
577 mutex_type* mutex() const noexcept { return _M_pm; }
583 if (_M_pm == nullptr)
584 __throw_system_error(int(errc::operation_not_permitted));
586 __throw_system_error(int(errc::resource_deadlock_would_occur));
593 /// Swap specialization for shared_lock
594 template<typename _Mutex>
596 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
599 #endif // _GLIBCXX_USE_C99_STDINT_TR1
602 _GLIBCXX_END_NAMESPACE_VERSION
607 #endif // _GLIBCXX_SHARED_MUTEX