atomicops_internals_portable.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. // Copyright (c) 2014 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. // This file is an internal atomic implementation, use atomicops.h instead.
  5. //
  6. // This implementation uses C++11 atomics' member functions. The code base is
  7. // currently written assuming atomicity revolves around accesses instead of
  8. // C++11's memory locations. The burden is on the programmer to ensure that all
  9. // memory locations accessed atomically are never accessed non-atomically (tsan
  10. // should help with this).
  11. //
  12. // TODO(jfb) Modify the atomicops.h API and user code to declare atomic
  13. // locations as truly atomic. See the static_assert below.
  14. //
  15. // Of note in this implementation:
  16. // * All NoBarrier variants are implemented as relaxed.
  17. // * All Barrier variants are implemented as sequentially-consistent.
  18. // * Compare exchange's failure ordering is always the same as the success one
  19. // (except for release, which fails as relaxed): using a weaker ordering is
  20. // only valid under certain uses of compare exchange.
  21. // * Acquire store doesn't exist in the C11 memory model, it is instead
  22. // implemented as a relaxed store followed by a sequentially consistent
  23. // fence.
  24. // * Release load doesn't exist in the C11 memory model, it is instead
  25. // implemented as sequentially consistent fence followed by a relaxed load.
  26. // * Atomic increment is expected to return the post-incremented value, whereas
  27. // C11 fetch add returns the previous value. The implementation therefore
  28. // needs to increment twice (which the compiler should be able to detect and
  29. // optimize).
  30. #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
  31. #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
  32. #include <atomic>
  33. #include "build/build_config.h"
  34. namespace base {
  35. namespace subtle {
  36. // This implementation is transitional and maintains the original API for
  37. // atomicops.h. This requires casting memory locations to the atomic types, and
  38. // assumes that the API and the C++11 implementation are layout-compatible,
  39. // which isn't true for all implementations or hardware platforms. The static
  40. // assertion should detect this issue, were it to fire then this header
  41. // shouldn't be used.
  42. //
  43. // TODO(jfb) If this header manages to stay committed then the API should be
  44. // modified, and all call sites updated.
  45. typedef volatile std::atomic<Atomic32>* AtomicLocation32;
  46. static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
  47. "incompatible 32-bit atomic layout");
  48. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
  49. Atomic32 old_value,
  50. Atomic32 new_value) {
  51. ((AtomicLocation32)ptr)
  52. ->compare_exchange_strong(old_value,
  53. new_value,
  54. std::memory_order_relaxed,
  55. std::memory_order_relaxed);
  56. return old_value;
  57. }
  58. inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
  59. Atomic32 new_value) {
  60. return ((AtomicLocation32)ptr)
  61. ->exchange(new_value, std::memory_order_relaxed);
  62. }
  63. inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
  64. Atomic32 increment) {
  65. return increment +
  66. ((AtomicLocation32)ptr)
  67. ->fetch_add(increment, std::memory_order_relaxed);
  68. }
  69. inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
  70. Atomic32 increment) {
  71. return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
  72. }
  73. inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
  74. Atomic32 old_value,
  75. Atomic32 new_value) {
  76. ((AtomicLocation32)ptr)
  77. ->compare_exchange_strong(old_value,
  78. new_value,
  79. std::memory_order_acquire,
  80. std::memory_order_acquire);
  81. return old_value;
  82. }
  83. inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
  84. Atomic32 old_value,
  85. Atomic32 new_value) {
  86. ((AtomicLocation32)ptr)
  87. ->compare_exchange_strong(old_value,
  88. new_value,
  89. std::memory_order_release,
  90. std::memory_order_relaxed);
  91. return old_value;
  92. }
  93. inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
  94. ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
  95. }
  96. inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
  97. ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
  98. std::atomic_thread_fence(std::memory_order_seq_cst);
  99. }
  100. inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
  101. ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
  102. }
  103. inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
  104. return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
  105. }
  106. inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
  107. return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
  108. }
  109. inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
  110. std::atomic_thread_fence(std::memory_order_seq_cst);
  111. return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
  112. }
  113. #if defined(ARCH_CPU_64_BITS)
  114. typedef volatile std::atomic<Atomic64>* AtomicLocation64;
  115. static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
  116. "incompatible 64-bit atomic layout");
  117. inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
  118. Atomic64 old_value,
  119. Atomic64 new_value) {
  120. ((AtomicLocation64)ptr)
  121. ->compare_exchange_strong(old_value,
  122. new_value,
  123. std::memory_order_relaxed,
  124. std::memory_order_relaxed);
  125. return old_value;
  126. }
  127. inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
  128. Atomic64 new_value) {
  129. return ((AtomicLocation64)ptr)
  130. ->exchange(new_value, std::memory_order_relaxed);
  131. }
  132. inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
  133. Atomic64 increment) {
  134. return increment +
  135. ((AtomicLocation64)ptr)
  136. ->fetch_add(increment, std::memory_order_relaxed);
  137. }
  138. inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
  139. Atomic64 increment) {
  140. return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
  141. }
  142. inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
  143. Atomic64 old_value,
  144. Atomic64 new_value) {
  145. ((AtomicLocation64)ptr)
  146. ->compare_exchange_strong(old_value,
  147. new_value,
  148. std::memory_order_acquire,
  149. std::memory_order_acquire);
  150. return old_value;
  151. }
  152. inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
  153. Atomic64 old_value,
  154. Atomic64 new_value) {
  155. ((AtomicLocation64)ptr)
  156. ->compare_exchange_strong(old_value,
  157. new_value,
  158. std::memory_order_release,
  159. std::memory_order_relaxed);
  160. return old_value;
  161. }
  162. inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  163. ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
  164. }
  165. inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
  166. ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
  167. std::atomic_thread_fence(std::memory_order_seq_cst);
  168. }
  169. inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
  170. ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
  171. }
  172. inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  173. return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
  174. }
  175. inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
  176. return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
  177. }
  178. inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
  179. std::atomic_thread_fence(std::memory_order_seq_cst);
  180. return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
  181. }
  182. #endif // defined(ARCH_CPU_64_BITS)
  183. } // namespace subtle
  184. } // namespace base
  185. #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_