boost.atomic: ppc fix
[boost_lockfree.git] / boost / lockfree / detail / freelist.hpp
blob8094f205dac1e76c9772be23622f0a33b2cf1575
1 // lock-free freelist
2 //
3 // Copyright (C) 2008, 2009, 2011 Tim Blechmann
4 //
5 // Distributed under the Boost Software License, Version 1.0. (See
6 // accompanying file LICENSE_1_0.txt or copy at
7 // http://www.boost.org/LICENSE_1_0.txt)
9 // Disclaimer: Not a Boost library.
11 #ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
12 #define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
14 #include <boost/lockfree/detail/tagged_ptr.hpp>
16 #include <boost/lockfree/detail/atomic.hpp>
17 #include <boost/noncopyable.hpp>
19 #include <boost/mpl/map.hpp>
20 #include <boost/mpl/apply.hpp>
21 #include <boost/mpl/at.hpp>
22 #include <boost/type_traits/is_pod.hpp>
24 #include <algorithm> /* for std::min */
26 namespace boost
28 namespace lockfree
30 namespace detail
33 struct freelist_node
35 tagged_ptr<freelist_node> next;
38 template <typename T,
39 bool allocate_may_allocate,
40 typename Alloc = std::allocator<T>
42 class freelist_stack:
43 Alloc
45 typedef tagged_ptr<freelist_node> tagged_node_ptr;
47 public:
48 freelist_stack (std::size_t n = 0):
49 pool_(tagged_node_ptr(NULL))
51 reserve_unsafe(n);
54 void reserve (std::size_t count)
56 for (std::size_t i = 0; i != count; ++i) {
57 T * node = Alloc::allocate(1);
58 deallocate(node);
62 void reserve_unsafe (std::size_t count)
64 for (std::size_t i = 0; i != count; ++i) {
65 T * node = Alloc::allocate(1);
66 deallocate_unsafe(node);
70 T * construct (void)
72 T * node = allocate();
73 if (node)
74 new(node) T();
75 return node;
78 template <typename ArgumentType>
79 T * construct (ArgumentType const & arg)
81 T * node = allocate();
82 if (node)
83 new(node) T(arg);
84 return node;
87 T * construct_unsafe (void)
89 T * node = allocate_unsafe();
90 if (node)
91 new(node) T();
92 return node;
95 template <typename ArgumentType>
96 T * construct_unsafe (ArgumentType const & arg)
98 T * node = allocate_unsafe();
99 if (node)
100 new(node) T(arg);
101 return node;
105 void destruct (T * n)
107 n->~T();
108 deallocate(n);
111 void destruct_unsafe (T * n)
113 n->~T();
114 deallocate_unsafe(n);
117 T * allocate (void)
119 tagged_node_ptr old_pool = pool_.load(memory_order_consume);
121 for(;;) {
122 if (!old_pool.get_ptr()) {
123 if (allocate_may_allocate)
124 return Alloc::allocate(1);
125 else
126 return 0;
129 freelist_node * new_pool_ptr = old_pool->next.get_ptr();
130 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag() + 1);
132 if (pool_.compare_exchange_weak(old_pool, new_pool)) {
133 void * ptr = old_pool.get_ptr();
134 return reinterpret_cast<T*>(ptr);
139 T * allocate_unsafe (void)
141 tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
143 if (!old_pool.get_ptr()) {
144 if (allocate_may_allocate)
145 return Alloc::allocate(1);
146 else
147 return 0;
150 freelist_node * new_pool_ptr = old_pool->next.get_ptr();
151 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag() + 1);
153 pool_.store(new_pool, memory_order_relaxed);
154 void * ptr = old_pool.get_ptr();
155 return reinterpret_cast<T*>(ptr);
158 void deallocate (T * n)
160 void * node = n;
161 tagged_node_ptr old_pool = pool_.load(memory_order_consume);
162 freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
164 for(;;) {
165 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
166 new_pool->next.set_ptr(old_pool.get_ptr());
168 if (pool_.compare_exchange_weak(old_pool, new_pool))
169 return;
173 void deallocate_unsafe (T * n)
175 void * node = n;
176 tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
177 freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
179 tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
180 new_pool->next.set_ptr(old_pool.get_ptr());
182 pool_.store(new_pool, memory_order_relaxed);
185 ~freelist_stack(void)
187 tagged_node_ptr current (pool_);
189 while (current) {
190 freelist_node * current_ptr = current.get_ptr();
191 if (current_ptr)
192 current = current_ptr->next;
193 Alloc::deallocate((T*)current_ptr, 1);
197 bool is_lock_free(void) const
199 return pool_.is_lock_free();
202 private:
203 atomic<tagged_node_ptr> pool_;
207 } /* namespace detail */
210 struct caching_freelist_t {};
211 struct static_freelist_t {};
215 } /* namespace lockfree */
216 } /* namespace boost */
218 #endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */