spinlock_ttas.hpp 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. // Copyright Oliver Kowalke 2016.
  2. // Distributed under the Boost Software License, Version 1.0.
  3. // (See accompanying file LICENSE_1_0.txt or copy at
  4. // http://www.boost.org/LICENSE_1_0.txt)
  5. #ifndef BOOST_FIBERS_SPINLOCK_TTAS_H
  6. #define BOOST_FIBERS_SPINLOCK_TTAS_H
  7. #include <algorithm>
  8. #include <atomic>
  9. #include <chrono>
  10. #include <cmath>
  11. #include <random>
  12. #include <thread>
  13. #include <boost/fiber/detail/config.hpp>
  14. #include <boost/fiber/detail/cpu_relax.hpp>
  15. #include <boost/fiber/detail/spinlock_status.hpp>
  16. // based on informations from:
  17. // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
  18. // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
  19. namespace boost {
  20. namespace fibers {
  21. namespace detail {
  22. class spinlock_ttas {
  23. private:
  24. template< typename FBSplk >
  25. friend class spinlock_rtm;
  26. std::atomic< spinlock_status > state_{ spinlock_status::unlocked };
  27. public:
  28. spinlock_ttas() = default;
  29. spinlock_ttas( spinlock_ttas const&) = delete;
  30. spinlock_ttas & operator=( spinlock_ttas const&) = delete;
  31. void lock() noexcept {
  32. static thread_local std::minstd_rand generator{ std::random_device{}() };
  33. std::size_t collisions = 0 ;
  34. for (;;) {
  35. // avoid using multiple pause instructions for a delay of a specific cycle count
  36. // the delay of cpu_relax() (pause on Intel) depends on the processor family
  37. // the cycle count can not guaranteed from one system to the next
  38. // -> check the shared variable 'state_' in between each cpu_relax() to prevent
  39. // unnecessarily long delays on some systems
  40. std::size_t retries = 0;
  41. // test shared variable 'status_'
  42. // first access to 'state_' -> chache miss
  43. // sucessive acccess to 'state_' -> cache hit
  44. // if 'state_' was released by other fiber
  45. // cached 'state_' is invalidated -> cache miss
  46. while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) {
  47. #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
  48. if ( BOOST_FIBERS_SPIN_BEFORE_SLEEP0 > retries) {
  49. ++retries;
  50. // give CPU a hint that this thread is in a "spin-wait" loop
  51. // delays the next instruction's execution for a finite period of time (depends on processor family)
  52. // the CPU is not under demand, parts of the pipeline are no longer being used
  53. // -> reduces the power consumed by the CPU
  54. // -> prevent pipeline stalls
  55. cpu_relax();
  56. } else if ( BOOST_FIBERS_SPIN_BEFORE_YIELD > retries) {
  57. ++retries;
  58. // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
  59. // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
  60. // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
  61. // if and only if a thread of equal or greater priority is ready to run
  62. static constexpr std::chrono::microseconds us0{ 0 };
  63. std::this_thread::sleep_for( us0);
  64. } else {
  65. // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
  66. // but only to another thread on the same processor
  67. // instead of constant checking, a thread only checks if no other useful work is pending
  68. std::this_thread::yield();
  69. }
  70. #else
  71. std::this_thread::yield();
  72. #endif
  73. }
  74. // test-and-set shared variable 'status_'
  75. // everytime 'status_' is signaled over the bus, even if the test failes
  76. if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) {
  77. // spinlock now contended
  78. // utilize 'Binary Exponential Backoff' algorithm
  79. // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
  80. std::uniform_int_distribution< std::size_t > distribution{
  81. 0, static_cast< std::size_t >( 1) << (std::min)(collisions, static_cast< std::size_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) };
  82. const std::size_t z = distribution( generator);
  83. ++collisions;
  84. for ( std::size_t i = 0; i < z; ++i) {
  85. // -> reduces the power consumed by the CPU
  86. // -> prevent pipeline stalls
  87. cpu_relax();
  88. }
  89. } else {
  90. // success, thread has acquired the lock
  91. break;
  92. }
  93. }
  94. }
  95. bool try_lock() noexcept {
  96. return spinlock_status::unlocked == state_.exchange( spinlock_status::locked, std::memory_order_acquire);
  97. }
  98. void unlock() noexcept {
  99. state_.store( spinlock_status::unlocked, std::memory_order_release);
  100. }
  101. };
  102. }}}
  103. #endif // BOOST_FIBERS_SPINLOCK_TTAS_H