// lock-free freelist // // Copyright (C) 2008-2016 Tim Blechmann // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED #define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable: 4100) // unreferenced formal parameter #pragma warning(disable: 4127) // conditional expression is constant #endif namespace boost { namespace lockfree { namespace detail { template > class freelist_stack: Alloc { struct freelist_node { tagged_ptr next; }; typedef tagged_ptr tagged_node_ptr; public: typedef T * index_t; typedef tagged_ptr tagged_node_handle; template freelist_stack (Allocator const & alloc, std::size_t n = 0): Alloc(alloc), pool_(tagged_node_ptr(NULL)) { for (std::size_t i = 0; i != n; ++i) { T * node = Alloc::allocate(1); std::memset(node, 0, sizeof(T)); #ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR destruct(node); #else deallocate(node); #endif } } template void reserve (std::size_t count) { for (std::size_t i = 0; i != count; ++i) { T * node = Alloc::allocate(1); std::memset(node, 0, sizeof(T)); deallocate(node); } } template T * construct (void) { T * node = allocate(); if (node) new(node) T(); return node; } template T * construct (ArgumentType const & arg) { T * node = allocate(); if (node) new(node) T(arg); return node; } template T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2) { T * node = allocate(); if (node) new(node) T(arg1, arg2); return node; } template void destruct (tagged_node_handle const & tagged_ptr) { T * n = tagged_ptr.get_ptr(); n->~T(); deallocate(n); } template void destruct (T * n) { n->~T(); deallocate(n); } ~freelist_stack(void) { tagged_node_ptr current = pool_.load(); while (current) { freelist_node * current_ptr = current.get_ptr(); if (current_ptr) current = current_ptr->next; Alloc::deallocate((T*)current_ptr, 1); } } bool is_lock_free(void) const { return pool_.is_lock_free(); } T * get_handle(T * pointer) const { return pointer; } T * get_handle(tagged_node_handle const & handle) const { return get_pointer(handle); } T * get_pointer(tagged_node_handle const & tptr) const { return tptr.get_ptr(); } T * get_pointer(T * pointer) const { return pointer; } T * null_handle(void) const { return NULL; } protected: // allow use from subclasses template T * allocate (void) { if (ThreadSafe) return allocate_impl(); else return allocate_impl_unsafe(); } private: template T * allocate_impl (void) { tagged_node_ptr old_pool = pool_.load(memory_order_consume); for(;;) { if (!old_pool.get_ptr()) { if (!Bounded) { T *ptr = Alloc::allocate(1); std::memset(ptr, 0, sizeof(T)); return ptr; } else return 0; } freelist_node * new_pool_ptr = old_pool->next.get_ptr(); tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag()); if (pool_.compare_exchange_weak(old_pool, new_pool)) { void * ptr = old_pool.get_ptr(); return reinterpret_cast(ptr); } } } template T * allocate_impl_unsafe (void) { tagged_node_ptr old_pool = pool_.load(memory_order_relaxed); if (!old_pool.get_ptr()) { if (!Bounded) { T *ptr = Alloc::allocate(1); std::memset(ptr, 0, sizeof(T)); return ptr; } else return 0; } freelist_node * new_pool_ptr = old_pool->next.get_ptr(); tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag()); pool_.store(new_pool, memory_order_relaxed); void * ptr = old_pool.get_ptr(); return reinterpret_cast(ptr); } protected: template void deallocate (T * n) { if (ThreadSafe) deallocate_impl(n); else deallocate_impl_unsafe(n); } private: void deallocate_impl (T * n) { void * node = n; tagged_node_ptr old_pool = pool_.load(memory_order_consume); freelist_node * new_pool_ptr = reinterpret_cast(node); for(;;) { tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag()); new_pool->next.set_ptr(old_pool.get_ptr()); if (pool_.compare_exchange_weak(old_pool, new_pool)) return; } } void deallocate_impl_unsafe (T * n) { void * node = n; tagged_node_ptr old_pool = pool_.load(memory_order_relaxed); freelist_node * new_pool_ptr = reinterpret_cast(node); tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag()); new_pool->next.set_ptr(old_pool.get_ptr()); pool_.store(new_pool, memory_order_relaxed); } atomic pool_; }; class BOOST_ALIGNMENT( 4 ) // workaround for bugs in MSVC tagged_index { public: typedef boost::uint16_t tag_t; typedef boost::uint16_t index_t; /** uninitialized constructor */ tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0) {} /** copy constructor */ #ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS tagged_index(tagged_index const & rhs): index(rhs.index), tag(rhs.tag) {} #else tagged_index(tagged_index const & rhs) = default; #endif explicit tagged_index(index_t i, tag_t t = 0): index(i), tag(t) {} /** index access */ /* @{ */ index_t get_index() const { return index; } void set_index(index_t i) { index = i; } /* @} */ /** tag access */ /* @{ */ tag_t get_tag() const { return tag; } tag_t get_next_tag() const { tag_t next = (get_tag() + 1u) & (std::numeric_limits::max)(); return next; } void set_tag(tag_t t) { tag = t; } /* @} */ bool operator==(tagged_index const & rhs) const { return (index == rhs.index) && (tag == rhs.tag); } bool operator!=(tagged_index const & rhs) const { return !operator==(rhs); } protected: index_t index; tag_t tag; }; template struct compiletime_sized_freelist_storage { // array-based freelists only support a 16bit address space. BOOST_STATIC_ASSERT(size < 65536); boost::array data; // unused ... only for API purposes template compiletime_sized_freelist_storage(Allocator const & /* alloc */, std::size_t /* count */) { data.fill(0); } T * nodes(void) const { char * data_pointer = const_cast(data.data()); return reinterpret_cast( boost::alignment::align_up( data_pointer, BOOST_LOCKFREE_CACHELINE_BYTES ) ); } std::size_t node_count(void) const { return size; } }; template > struct runtime_sized_freelist_storage: boost::alignment::aligned_allocator_adaptor { typedef boost::alignment::aligned_allocator_adaptor allocator_type; T * nodes_; std::size_t node_count_; template runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count): allocator_type(alloc), node_count_(count) { if (count > 65535) boost::throw_exception(std::runtime_error("boost.lockfree: freelist size is limited to a maximum of 65535 objects")); nodes_ = allocator_type::allocate(count); std::memset(nodes_, 0, sizeof(T) * count); } ~runtime_sized_freelist_storage(void) { allocator_type::deallocate(nodes_, node_count_); } T * nodes(void) const { return nodes_; } std::size_t node_count(void) const { return node_count_; } }; template > class fixed_size_freelist: NodeStorage { struct freelist_node { tagged_index next; }; void initialize(void) { T * nodes = NodeStorage::nodes(); for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) { tagged_index * next_index = reinterpret_cast(nodes + i); next_index->set_index(null_handle()); #ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR destruct(nodes + i); #else deallocate(static_cast(i)); #endif } } public: typedef tagged_index tagged_node_handle; typedef tagged_index::index_t index_t; template fixed_size_freelist (Allocator const & alloc, std::size_t count): NodeStorage(alloc, count), pool_(tagged_index(static_cast(count), 0)) { initialize(); } fixed_size_freelist (void): pool_(tagged_index(NodeStorage::node_count(), 0)) { initialize(); } template T * construct (void) { index_t node_index = allocate(); if (node_index == null_handle()) return NULL; T * node = NodeStorage::nodes() + node_index; new(node) T(); return node; } template T * construct (ArgumentType const & arg) { index_t node_index = allocate(); if (node_index == null_handle()) return NULL; T * node = NodeStorage::nodes() + node_index; new(node) T(arg); return node; } template T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2) { index_t node_index = allocate(); if (node_index == null_handle()) return NULL; T * node = NodeStorage::nodes() + node_index; new(node) T(arg1, arg2); return node; } template void destruct (tagged_node_handle tagged_index) { index_t index = tagged_index.get_index(); T * n = NodeStorage::nodes() + index; (void)n; // silence msvc warning n->~T(); deallocate(index); } template void destruct (T * n) { n->~T(); deallocate(static_cast(n - NodeStorage::nodes())); } bool is_lock_free(void) const { return pool_.is_lock_free(); } index_t null_handle(void) const { return static_cast(NodeStorage::node_count()); } index_t get_handle(T * pointer) const { if (pointer == NULL) return null_handle(); else return static_cast(pointer - NodeStorage::nodes()); } index_t get_handle(tagged_node_handle const & handle) const { return handle.get_index(); } T * get_pointer(tagged_node_handle const & tptr) const { return get_pointer(tptr.get_index()); } T * get_pointer(index_t index) const { if (index == null_handle()) return 0; else return NodeStorage::nodes() + index; } T * get_pointer(T * ptr) const { return ptr; } protected: // allow use from subclasses template index_t allocate (void) { if (ThreadSafe) return allocate_impl(); else return allocate_impl_unsafe(); } private: index_t allocate_impl (void) { tagged_index old_pool = pool_.load(memory_order_consume); for(;;) { index_t index = old_pool.get_index(); if (index == null_handle()) return index; T * old_node = NodeStorage::nodes() + index; tagged_index * next_index = reinterpret_cast(old_node); tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag()); if (pool_.compare_exchange_weak(old_pool, new_pool)) return old_pool.get_index(); } } index_t allocate_impl_unsafe (void) { tagged_index old_pool = pool_.load(memory_order_consume); index_t index = old_pool.get_index(); if (index == null_handle()) return index; T * old_node = NodeStorage::nodes() + index; tagged_index * next_index = reinterpret_cast(old_node); tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag()); pool_.store(new_pool, memory_order_relaxed); return old_pool.get_index(); } template void deallocate (index_t index) { if (ThreadSafe) deallocate_impl(index); else deallocate_impl_unsafe(index); } void deallocate_impl (index_t index) { freelist_node * new_pool_node = reinterpret_cast(NodeStorage::nodes() + index); tagged_index old_pool = pool_.load(memory_order_consume); for(;;) { tagged_index new_pool (index, old_pool.get_tag()); new_pool_node->next.set_index(old_pool.get_index()); if (pool_.compare_exchange_weak(old_pool, new_pool)) return; } } void deallocate_impl_unsafe (index_t index) { freelist_node * new_pool_node = reinterpret_cast(NodeStorage::nodes() + index); tagged_index old_pool = pool_.load(memory_order_consume); tagged_index new_pool (index, old_pool.get_tag()); new_pool_node->next.set_index(old_pool.get_index()); pool_.store(new_pool); } atomic pool_; }; template struct select_freelist { typedef typename mpl::if_c, runtime_sized_freelist_storage >::type fixed_sized_storage_type; typedef typename mpl::if_c, freelist_stack >::type type; }; template struct select_tagged_handle { typedef typename mpl::if_c, tagged_index >::type tagged_handle_type; typedef typename mpl::if_c::type handle_type; }; } /* namespace detail */ } /* namespace lockfree */ } /* namespace boost */ #if defined(_MSC_VER) #pragma warning(pop) #endif #endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */