atomic_flag_set.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. // Copyright 2019 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
  5. #define BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
  6. #include <atomic>
  7. #include <memory>
  8. #include "base/base_export.h"
  9. #include "base/callback.h"
  10. #include "base/task/sequence_manager/associated_thread_id.h"
  11. namespace base {
  12. namespace sequence_manager {
  13. namespace internal {
  14. // This class maintains a set of AtomicFlags which can be activated or
  15. // deactivated at any time by any thread. When a flag is created a callback is
  16. // specified and the RunActiveCallbacks method can be invoked to fire callbacks
  17. // for all active flags. Creating releasing or destroying an AtomicFlag must be
  18. // done on the associated thread, as must calling RunActiveCallbacks. This
  19. // class is thread-affine.
  20. class BASE_EXPORT AtomicFlagSet {
  21. protected:
  22. struct Group;
  23. public:
  24. explicit AtomicFlagSet(scoped_refptr<AssociatedThreadId> associated_thread);
  25. AtomicFlagSet(const AtomicFlagSet&) = delete;
  26. AtomicFlagSet& operator=(const AtomicFlagSet&) = delete;
  27. // AtomicFlags need to be released (or deleted) before this can be deleted.
  28. ~AtomicFlagSet();
  29. // This class is thread-affine in addition SetActive can be called
  30. // concurrently from any thread.
  31. class BASE_EXPORT AtomicFlag {
  32. public:
  33. AtomicFlag();
  34. // Automatically releases the AtomicFlag.
  35. ~AtomicFlag();
  36. AtomicFlag(const AtomicFlag&) = delete;
  37. AtomicFlag(AtomicFlag&& other);
  38. // Can be called on any thread. Marks whether the flag is active or not,
  39. // which controls whether RunActiveCallbacks() will fire the associated
  40. // callback or not. In the absence of external synchronization, the value
  41. // set by this call might not immediately be visible to a thread calling
  42. // RunActiveCallbacks(); the only guarantee is that a value set by this will
  43. // eventually be visible to other threads due to cache coherency. Release /
  44. // acquire semantics are used on the underlying atomic operations so if
  45. // RunActiveCallbacks sees the value set by a call to SetActive(), it will
  46. // also see the memory changes that happened prior to that SetActive() call.
  47. void SetActive(bool active);
  48. // Releases the flag. Must be called on the associated thread. SetActive
  49. // can't be called after this.
  50. void ReleaseAtomicFlag();
  51. private:
  52. friend AtomicFlagSet;
  53. AtomicFlag(AtomicFlagSet* outer, Group* element, size_t flag_bit);
  54. AtomicFlagSet* outer_ = nullptr;
  55. Group* group_ = nullptr; // Null when AtomicFlag is invalid.
  56. size_t flag_bit_ = 0; // This is 1 << index of this flag within the group.
  57. };
  58. // Adds a new flag to the set. The |callback| will be fired by
  59. // RunActiveCallbacks if the flag is active. Must be called on the associated
  60. // thread.
  61. AtomicFlag AddFlag(RepeatingClosure callback);
  62. // Runs the registered callback for all flags marked as active and atomically
  63. // resets all flags to inactive. Must be called on the associated thread.
  64. void RunActiveCallbacks() const;
  65. protected:
  66. Group* GetAllocListForTesting() const { return alloc_list_head_.get(); }
  67. Group* GetPartiallyFreeListForTesting() const {
  68. return partially_free_list_head_;
  69. }
  70. // Wraps a single std::atomic<size_t> which is shared by a number of
  71. // AtomicFlag's with one bit per flag.
  72. struct BASE_EXPORT Group {
  73. Group();
  74. Group(const Group&) = delete;
  75. Group& operator=(const Group&) = delete;
  76. ~Group();
  77. static constexpr int kNumFlags = sizeof(size_t) * 8;
  78. std::atomic<size_t> flags = {0};
  79. size_t allocated_flags = 0;
  80. RepeatingClosure flag_callbacks[kNumFlags];
  81. Group* prev = nullptr;
  82. std::unique_ptr<Group> next;
  83. Group* partially_free_list_prev = nullptr;
  84. Group* partially_free_list_next = nullptr;
  85. bool IsFull() const;
  86. bool IsEmpty() const;
  87. // Returns the index of the first unallocated flag. Must not be called when
  88. // all flags are set.
  89. int FindFirstUnallocatedFlag() const;
  90. // Computes the index of the |flag_callbacks| based on the number of leading
  91. // zero bits in |flag|.
  92. static int IndexOfFirstFlagSet(size_t flag);
  93. };
  94. private:
  95. void AddToAllocList(std::unique_ptr<Group> element);
  96. // This deletes |element|.
  97. void RemoveFromAllocList(Group* element);
  98. void AddToPartiallyFreeList(Group* element);
  99. // This does not delete |element|.
  100. void RemoveFromPartiallyFreeList(Group* element);
  101. scoped_refptr<AssociatedThreadId> associated_thread_;
  102. std::unique_ptr<Group> alloc_list_head_;
  103. Group* partially_free_list_head_ = nullptr;
  104. };
  105. } // namespace internal
  106. } // namespace sequence_manager
  107. } // namespace base
  108. #endif // BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_