atomicops_internals_x86_msvc.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. // This file is an internal atomic implementation, use base/atomicops.h instead.
  5. #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
  6. #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
  7. #include "base/win/windows_types.h"
  8. #include <intrin.h>
  9. #include <atomic>
  10. #include "base/macros.h"
  11. #include "build/build_config.h"
  12. namespace base {
  13. namespace subtle {
  14. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
  15. Atomic32 old_value,
  16. Atomic32 new_value) {
  17. LONG result = _InterlockedCompareExchange(
  18. reinterpret_cast<volatile LONG*>(ptr),
  19. static_cast<LONG>(new_value),
  20. static_cast<LONG>(old_value));
  21. return static_cast<Atomic32>(result);
  22. }
  23. inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
  24. Atomic32 new_value) {
  25. LONG result = _InterlockedExchange(
  26. reinterpret_cast<volatile LONG*>(ptr),
  27. static_cast<LONG>(new_value));
  28. return static_cast<Atomic32>(result);
  29. }
  30. inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
  31. Atomic32 increment) {
  32. return _InterlockedExchangeAdd(
  33. reinterpret_cast<volatile LONG*>(ptr),
  34. static_cast<LONG>(increment)) + increment;
  35. }
  36. inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
  37. Atomic32 increment) {
  38. return Barrier_AtomicIncrement(ptr, increment);
  39. }
  40. inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
  41. Atomic32 old_value,
  42. Atomic32 new_value) {
  43. return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
  44. }
  45. inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
  46. Atomic32 old_value,
  47. Atomic32 new_value) {
  48. return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
  49. }
  50. inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
  51. *ptr = value;
  52. }
  53. inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
  54. NoBarrier_AtomicExchange(ptr, value);
  55. // acts as a barrier in this implementation
  56. }
  57. inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
  58. *ptr = value; // works w/o barrier for current Intel chips as of June 2005
  59. // See comments in Atomic64 version of Release_Store() below.
  60. }
  61. inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
  62. return *ptr;
  63. }
  64. inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
  65. Atomic32 value = *ptr;
  66. return value;
  67. }
  68. inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
  69. std::atomic_thread_fence(std::memory_order_seq_cst);
  70. return *ptr;
  71. }
  72. #if defined(_WIN64)
  73. // 64-bit low-level operations on 64-bit platform.
  74. static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
  75. inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
  76. Atomic64 old_value,
  77. Atomic64 new_value) {
  78. PVOID result = _InterlockedCompareExchangePointer(
  79. reinterpret_cast<volatile PVOID*>(ptr),
  80. reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
  81. return reinterpret_cast<Atomic64>(result);
  82. }
  83. inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
  84. Atomic64 new_value) {
  85. PVOID result =
  86. _InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
  87. reinterpret_cast<PVOID>(new_value));
  88. return reinterpret_cast<Atomic64>(result);
  89. }
  90. inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
  91. Atomic64 increment) {
  92. return _InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
  93. static_cast<LONGLONG>(increment)) +
  94. increment;
  95. }
  96. inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
  97. Atomic64 increment) {
  98. return Barrier_AtomicIncrement(ptr, increment);
  99. }
  100. inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  101. *ptr = value;
  102. }
  103. inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
  104. NoBarrier_AtomicExchange(ptr, value);
  105. // acts as a barrier in this implementation
  106. }
  107. inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
  108. *ptr = value; // works w/o barrier for current Intel chips as of June 2005
  109. // When new chips come out, check:
  110. // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
  111. // System Programming Guide, Chatper 7: Multiple-processor management,
  112. // Section 7.2, Memory Ordering.
  113. // Last seen at:
  114. // http://developer.intel.com/design/pentium4/manuals/index_new.htm
  115. }
  116. inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  117. return *ptr;
  118. }
  119. inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
  120. Atomic64 value = *ptr;
  121. return value;
  122. }
  123. inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
  124. std::atomic_thread_fence(std::memory_order_seq_cst);
  125. return *ptr;
  126. }
  127. inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
  128. Atomic64 old_value,
  129. Atomic64 new_value) {
  130. return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
  131. }
  132. inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
  133. Atomic64 old_value,
  134. Atomic64 new_value) {
  135. return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
  136. }
  137. #endif // defined(_WIN64)
  138. } // namespace subtle
  139. } // namespace base
  140. #endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_