lock_pool.hpp 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /*
  2. * Distributed under the Boost Software License, Version 1.0.
  3. * (See accompanying file LICENSE_1_0.txt or copy at
  4. * http://www.boost.org/LICENSE_1_0.txt)
  5. *
  6. * Copyright (c) 2011 Helge Bahmann
  7. * Copyright (c) 2013-2014, 2020 Andrey Semashev
  8. */
  9. /*!
  10. * \file atomic/detail/lock_pool.hpp
  11. *
  12. * This header contains declaration of the lock pool used to emulate atomic ops.
  13. */
  14. #ifndef BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
  15. #define BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
  16. #include <cstddef>
  17. #include <boost/atomic/detail/config.hpp>
  18. #include <boost/atomic/detail/link.hpp>
  19. #include <boost/atomic/detail/intptr.hpp>
  20. #if defined(BOOST_WINDOWS)
  21. #include <boost/winapi/thread.hpp>
  22. #elif defined(BOOST_HAS_NANOSLEEP)
  23. #include <time.h>
  24. #else
  25. #include <unistd.h>
  26. #endif
  27. #include <boost/atomic/detail/header.hpp>
  28. #ifdef BOOST_HAS_PRAGMA_ONCE
  29. #pragma once
  30. #endif
  31. namespace boost {
  32. namespace atomics {
  33. namespace detail {
  34. BOOST_FORCEINLINE void wait_some() BOOST_NOEXCEPT
  35. {
  36. #if defined(BOOST_WINDOWS)
  37. boost::winapi::SwitchToThread();
  38. #elif defined(BOOST_HAS_NANOSLEEP)
  39. // Do not use sched_yield or pthread_yield as at least on Linux it doesn't block the thread if there are no other
  40. // pending threads on the current CPU. Proper sleeping is guaranteed to block the thread, which allows other threads
  41. // to potentially migrate to this CPU and complete the tasks we're waiting for.
  42. struct ::timespec ts = {};
  43. ts.tv_sec = 0;
  44. ts.tv_nsec = 1000;
  45. ::nanosleep(&ts, NULL);
  46. #else
  47. ::usleep(1);
  48. #endif
  49. }
  50. namespace lock_pool {
  51. BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
  52. BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
  53. BOOST_ATOMIC_DECL void unlock(void* ls) BOOST_NOEXCEPT;
  54. BOOST_ATOMIC_DECL void* allocate_wait_state(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
  55. BOOST_ATOMIC_DECL void free_wait_state(void* ls, void* ws) BOOST_NOEXCEPT;
  56. BOOST_ATOMIC_DECL void wait(void* ls, void* ws) BOOST_NOEXCEPT;
  57. BOOST_ATOMIC_DECL void notify_one(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
  58. BOOST_ATOMIC_DECL void notify_all(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
  59. BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
  60. BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
  61. template< std::size_t Alignment >
  62. BOOST_FORCEINLINE atomics::detail::uintptr_t hash_ptr(const volatile void* addr) BOOST_NOEXCEPT
  63. {
  64. atomics::detail::uintptr_t ptr = (atomics::detail::uintptr_t)addr;
  65. atomics::detail::uintptr_t h = ptr / Alignment;
  66. // Since many malloc/new implementations return pointers with higher alignment
  67. // than indicated by Alignment, it makes sense to mix higher bits
  68. // into the lower ones. On 64-bit platforms, malloc typically aligns to 16 bytes,
  69. // on 32-bit - to 8 bytes.
  70. BOOST_CONSTEXPR_OR_CONST std::size_t malloc_alignment = sizeof(void*) >= 8u ? 16u : 8u;
  71. BOOST_IF_CONSTEXPR (Alignment != malloc_alignment)
  72. h ^= ptr / malloc_alignment;
  73. return h;
  74. }
  75. template< std::size_t Alignment, bool LongLock = false >
  76. class scoped_lock
  77. {
  78. private:
  79. void* m_lock;
  80. public:
  81. explicit scoped_lock(const volatile void* addr) BOOST_NOEXCEPT
  82. {
  83. atomics::detail::uintptr_t h = lock_pool::hash_ptr< Alignment >(addr);
  84. BOOST_IF_CONSTEXPR (!LongLock)
  85. m_lock = lock_pool::short_lock(h);
  86. else
  87. m_lock = lock_pool::long_lock(h);
  88. }
  89. ~scoped_lock() BOOST_NOEXCEPT
  90. {
  91. lock_pool::unlock(m_lock);
  92. }
  93. void* get_lock_state() const BOOST_NOEXCEPT
  94. {
  95. return m_lock;
  96. }
  97. BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
  98. BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
  99. };
  100. template< std::size_t Alignment >
  101. class scoped_wait_state :
  102. public scoped_lock< Alignment, true >
  103. {
  104. private:
  105. void* m_wait_state;
  106. public:
  107. explicit scoped_wait_state(const volatile void* addr) BOOST_NOEXCEPT :
  108. scoped_lock< Alignment, true >(addr)
  109. {
  110. m_wait_state = lock_pool::allocate_wait_state(this->get_lock_state(), addr);
  111. }
  112. ~scoped_wait_state() BOOST_NOEXCEPT
  113. {
  114. lock_pool::free_wait_state(this->get_lock_state(), m_wait_state);
  115. }
  116. void wait() BOOST_NOEXCEPT
  117. {
  118. lock_pool::wait(this->get_lock_state(), m_wait_state);
  119. }
  120. BOOST_DELETED_FUNCTION(scoped_wait_state(scoped_wait_state const&))
  121. BOOST_DELETED_FUNCTION(scoped_wait_state& operator=(scoped_wait_state const&))
  122. };
  123. } // namespace lock_pool
  124. } // namespace detail
  125. } // namespace atomics
  126. } // namespace boost
  127. #include <boost/atomic/detail/footer.hpp>
  128. #endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_