lockfree: add lockfree spsc ringbuffer
[boost_lockfree.git] / boost / lockfree / ringbuffer.hpp
blob139f79869f610c61b0d117d2836a3c8c4949db02
1 // lock-free single-producer/single-consumer ringbuffer
2 // this algorithm is implemented in various projects (jack, portaudio, supercollider)
3 //
4 // implementation for c++
5 //
6 // Copyright (C) 2009 Tim Blechmann
7 //
8 // Distributed under the Boost Software License, Version 1.0. (See
9 // accompanying file LICENSE_1_0.txt or copy at
10 // http://www.boost.org/LICENSE_1_0.txt)
12 // Disclaimer: Not a Boost library.
14 #ifndef BOOST_LOCKFREE_RINGBUFFER_HPP_INCLUDED
15 #define BOOST_LOCKFREE_RINGBUFFER_HPP_INCLUDED
17 #include <boost/atomic.hpp>
18 #include <boost/array.hpp>
19 #include <boost/noncopyable.hpp>
20 #include <boost/smart_ptr/scoped_array.hpp>
22 #include "detail/branch_hints.hpp"
23 #include "detail/prefix.hpp"
25 namespace boost
27 namespace lockfree
30 namespace detail
33 template <typename T>
34 class ringbuffer_internal:
35 boost::noncopyable
37 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(size_t);
38 atomic<size_t> write_index_;
39 char padding1[padding_size]; /* force read_index and write_index to different cache lines */
40 atomic<size_t> read_index_;
42 protected:
43 ringbuffer_internal(void):
44 write_index_(0), read_index_(0)
47 static size_t next_index(size_t arg, size_t max_size)
49 size_t ret = arg + 1;
50 while (unlikely(ret >= max_size))
51 ret -= max_size;
52 return ret;
55 bool enqueue(T const & t, T * buffer, size_t max_size)
57 size_t next = next_index(write_index_.load(memory_order_acquire), max_size);
59 if (next == read_index_.load(memory_order_acquire))
60 return false; /* ringbuffer is full */
62 buffer[next] = t;
64 write_index_.store(next, memory_order_release);
66 return true;
69 bool dequeue (T * ret, T * buffer, size_t max_size)
71 if (empty())
72 return false;
74 size_t next = next_index(read_index_.load(memory_order_acquire), max_size);
75 *ret = buffer[next];
76 read_index_.store(next, memory_order_release);
77 return true;
80 public:
81 void reset(void)
83 write_index_.store(0, memory_order_relaxed);
84 read_index_.store(0, memory_order_release);
87 bool empty(void)
89 return write_index_.load(memory_order_relaxed) == read_index_.load(memory_order_relaxed);
94 } /* namespace detail */
96 template <typename T, size_t max_size>
97 class ringbuffer:
98 public detail::ringbuffer_internal<T>
100 boost::array<T, max_size> array_;
102 public:
103 bool enqueue(T const & t)
105 return detail::ringbuffer_internal<T>::enqueue(t, array_.c_array(), max_size);
108 bool dequeue(T * ret)
110 return detail::ringbuffer_internal<T>::dequeue(ret, array_.c_array(), max_size);
114 template <typename T>
115 class ringbuffer<T, 0>:
116 public detail::ringbuffer_internal<T>
118 size_t max_size_;
119 scoped_array<T> array_;
121 public:
122 ringbuffer(size_t max_size):
123 max_size_(max_size), array_(new T[max_size])
126 bool enqueue(T const & t)
128 return detail::ringbuffer_internal<T>::enqueue(t, array_.get(), max_size_);
131 bool dequeue(T * ret)
133 return detail::ringbuffer_internal<T>::dequeue(ret, array_.get(), max_size_);
138 } /* namespace lockfree */
139 } /* namespace boost */
142 #endif /* BOOST_LOCKFREE_RINGBUFFER_HPP_INCLUDED */