lazily_deallocated_deque.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. // Copyright 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
  5. #define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
  6. #include <algorithm>
  7. #include <cmath>
  8. #include <memory>
  9. #include <utility>
  10. #include <vector>
  11. #include "base/check_op.h"
  12. #include "base/debug/alias.h"
  13. #include "base/gtest_prod_util.h"
  14. #include "base/time/time.h"
  15. namespace base {
  16. namespace sequence_manager {
  17. namespace internal {
  18. // A LazilyDeallocatedDeque specialized for the SequenceManager's usage
  19. // patterns. The queue generally grows while tasks are added and then removed
  20. // until empty and the cycle repeats.
  21. //
  22. // The main difference between sequence_manager::LazilyDeallocatedDeque and
  23. // others is memory management. For performance (memory allocation isn't free)
  24. // we don't automatically reclaiming memory when the queue becomes empty.
  25. // Instead we rely on the surrounding code periodically calling
  26. // MaybeShrinkQueue, ideally when the queue is empty.
  27. //
  28. // We keep track of the maximum recent queue size and rate limit
  29. // MaybeShrinkQueue to avoid unnecessary churn.
  30. //
  31. // NB this queue isn't by itself thread safe.
  32. template <typename T, TimeTicks (*now_source)() = TimeTicks::Now>
  33. class LazilyDeallocatedDeque {
  34. public:
  35. enum {
  36. // Minimum allocation for a ring. Note a ring of size 4 will only hold up to
  37. // 3 elements.
  38. kMinimumRingSize = 4,
  39. // Maximum "wasted" capacity allowed when considering if we should resize
  40. // the backing store.
  41. kReclaimThreshold = 16,
  42. // Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
  43. // queue.
  44. kMinimumShrinkIntervalInSeconds = 5
  45. };
  46. LazilyDeallocatedDeque() = default;
  47. LazilyDeallocatedDeque(const LazilyDeallocatedDeque&) = delete;
  48. LazilyDeallocatedDeque& operator=(const LazilyDeallocatedDeque&) = delete;
  49. ~LazilyDeallocatedDeque() { clear(); }
  50. bool empty() const { return size_ == 0; }
  51. size_t max_size() const { return max_size_; }
  52. size_t size() const { return size_; }
  53. size_t capacity() const {
  54. size_t capacity = 0;
  55. for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
  56. capacity += iter->capacity();
  57. }
  58. return capacity;
  59. }
  60. void clear() {
  61. while (head_) {
  62. head_ = std::move(head_->next_);
  63. }
  64. tail_ = nullptr;
  65. size_ = 0;
  66. }
  67. // Assumed to be an uncommon operation.
  68. void push_front(T t) {
  69. if (!head_) {
  70. DCHECK(!tail_);
  71. head_ = std::make_unique<Ring>(kMinimumRingSize);
  72. tail_ = head_.get();
  73. }
  74. // Grow if needed, by the minimum amount.
  75. if (!head_->CanPush()) {
  76. // TODO(alexclarke): Remove once we've understood the OOMs.
  77. size_t size = size_;
  78. base::debug::Alias(&size);
  79. std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
  80. new_ring->next_ = std::move(head_);
  81. head_ = std::move(new_ring);
  82. }
  83. head_->push_front(std::move(t));
  84. max_size_ = std::max(max_size_, ++size_);
  85. }
  86. // Assumed to be a common operation.
  87. void push_back(T t) {
  88. if (!head_) {
  89. DCHECK(!tail_);
  90. head_ = std::make_unique<Ring>(kMinimumRingSize);
  91. tail_ = head_.get();
  92. }
  93. // Grow if needed.
  94. if (!tail_->CanPush()) {
  95. // TODO(alexclarke): Remove once we've understood the OOMs.
  96. size_t size = size_;
  97. base::debug::Alias(&size);
  98. // Doubling the size is a common strategy, but one which can be wasteful
  99. // so we use a (somewhat) slower growth curve.
  100. tail_->next_ = std::make_unique<Ring>(2 + tail_->capacity() +
  101. (tail_->capacity() / 2));
  102. tail_ = tail_->next_.get();
  103. }
  104. tail_->push_back(std::move(t));
  105. max_size_ = std::max(max_size_, ++size_);
  106. }
  107. T& front() {
  108. DCHECK(head_);
  109. return head_->front();
  110. }
  111. const T& front() const {
  112. DCHECK(head_);
  113. return head_->front();
  114. }
  115. T& back() {
  116. DCHECK(tail_);
  117. return tail_->back();
  118. }
  119. const T& back() const {
  120. DCHECK(tail_);
  121. return tail_->back();
  122. }
  123. void pop_front() {
  124. DCHECK(head_);
  125. DCHECK(!head_->empty());
  126. DCHECK(tail_);
  127. DCHECK_GT(size_, 0u);
  128. head_->pop_front();
  129. // If the ring has become empty and we have several rings then, remove the
  130. // head one (which we expect to have lower capacity than the remaining
  131. // ones).
  132. if (head_->empty() && head_->next_) {
  133. head_ = std::move(head_->next_);
  134. }
  135. --size_;
  136. }
  137. void swap(LazilyDeallocatedDeque& other) {
  138. std::swap(head_, other.head_);
  139. std::swap(tail_, other.tail_);
  140. std::swap(size_, other.size_);
  141. std::swap(max_size_, other.max_size_);
  142. std::swap(next_resize_time_, other.next_resize_time_);
  143. }
  144. void MaybeShrinkQueue() {
  145. if (!tail_)
  146. return;
  147. DCHECK_GE(max_size_, size_);
  148. // Rate limit how often we shrink the queue because it's somewhat expensive.
  149. TimeTicks current_time = now_source();
  150. if (current_time < next_resize_time_)
  151. return;
  152. // Due to the way the Ring works we need 1 more slot than is used.
  153. size_t new_capacity = max_size_ + 1;
  154. if (new_capacity < kMinimumRingSize)
  155. new_capacity = kMinimumRingSize;
  156. // Reset |max_size_| so that unless usage has spiked up we will consider
  157. // reclaiming it next time.
  158. max_size_ = size_;
  159. // Only realloc if the current capacity is sufficiently greater than the
  160. // observed maximum size for the previous period.
  161. if (new_capacity + kReclaimThreshold >= capacity())
  162. return;
  163. SetCapacity(new_capacity);
  164. next_resize_time_ =
  165. current_time + TimeDelta::FromSeconds(kMinimumShrinkIntervalInSeconds);
  166. }
  167. void SetCapacity(size_t new_capacity) {
  168. std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
  169. DCHECK_GE(new_capacity, size_ + 1);
  170. // Preserve the |size_| which counts down to zero in the while loop.
  171. size_t real_size = size_;
  172. while (!empty()) {
  173. DCHECK(new_ring->CanPush());
  174. new_ring->push_back(std::move(head_->front()));
  175. pop_front();
  176. }
  177. size_ = real_size;
  178. DCHECK_EQ(head_.get(), tail_);
  179. head_ = std::move(new_ring);
  180. tail_ = head_.get();
  181. }
  182. private:
  183. FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
  184. FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
  185. FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
  186. FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
  187. struct Ring {
  188. explicit Ring(size_t capacity)
  189. : capacity_(capacity),
  190. front_index_(0),
  191. back_index_(0),
  192. data_(reinterpret_cast<T*>(new char[sizeof(T) * capacity])),
  193. next_(nullptr) {
  194. DCHECK_GE(capacity_, kMinimumRingSize);
  195. }
  196. Ring(const Ring&) = delete;
  197. Ring& operator=(const Ring&) = delete;
  198. ~Ring() {
  199. while (!empty()) {
  200. pop_front();
  201. }
  202. delete[] reinterpret_cast<char*>(data_);
  203. }
  204. bool empty() const { return back_index_ == front_index_; }
  205. size_t capacity() const { return capacity_; }
  206. bool CanPush() const {
  207. return front_index_ != CircularIncrement(back_index_);
  208. }
  209. void push_front(T&& t) {
  210. // Mustn't appear to become empty.
  211. DCHECK_NE(CircularDecrement(front_index_), back_index_);
  212. new (&data_[front_index_]) T(std::move(t));
  213. front_index_ = CircularDecrement(front_index_);
  214. }
  215. void push_back(T&& t) {
  216. back_index_ = CircularIncrement(back_index_);
  217. DCHECK(!empty()); // Mustn't appear to become empty.
  218. new (&data_[back_index_]) T(std::move(t));
  219. }
  220. bool CanPop() const { return front_index_ != back_index_; }
  221. void pop_front() {
  222. DCHECK(!empty());
  223. front_index_ = CircularIncrement(front_index_);
  224. data_[front_index_].~T();
  225. }
  226. T& front() {
  227. DCHECK(!empty());
  228. return data_[CircularIncrement(front_index_)];
  229. }
  230. const T& front() const {
  231. DCHECK(!empty());
  232. return data_[CircularIncrement(front_index_)];
  233. }
  234. T& back() {
  235. DCHECK(!empty());
  236. return data_[back_index_];
  237. }
  238. const T& back() const {
  239. DCHECK(!empty());
  240. return data_[back_index_];
  241. }
  242. size_t CircularDecrement(size_t index) const {
  243. if (index == 0)
  244. return capacity_ - 1;
  245. return index - 1;
  246. }
  247. size_t CircularIncrement(size_t index) const {
  248. DCHECK_LT(index, capacity_);
  249. ++index;
  250. if (index == capacity_)
  251. return 0;
  252. return index;
  253. }
  254. size_t capacity_;
  255. size_t front_index_;
  256. size_t back_index_;
  257. T* data_;
  258. std::unique_ptr<Ring> next_;
  259. };
  260. public:
  261. class Iterator {
  262. public:
  263. using value_type = T;
  264. using pointer = const T*;
  265. using reference = const T&;
  266. const T& operator->() const { return ring_->data_[index_]; }
  267. const T& operator*() const { return ring_->data_[index_]; }
  268. Iterator& operator++() {
  269. if (index_ == ring_->back_index_) {
  270. ring_ = ring_->next_.get();
  271. index_ = ring_ ? ring_->CircularIncrement(ring_->front_index_) : 0;
  272. } else {
  273. index_ = ring_->CircularIncrement(index_);
  274. }
  275. return *this;
  276. }
  277. operator bool() const { return !!ring_; }
  278. private:
  279. explicit Iterator(const Ring* ring) {
  280. if (!ring || ring->empty()) {
  281. ring_ = nullptr;
  282. index_ = 0;
  283. return;
  284. }
  285. ring_ = ring;
  286. index_ = ring_->CircularIncrement(ring->front_index_);
  287. }
  288. const Ring* ring_;
  289. size_t index_;
  290. friend class LazilyDeallocatedDeque;
  291. };
  292. Iterator begin() const { return Iterator(head_.get()); }
  293. Iterator end() const { return Iterator(nullptr); }
  294. private:
  295. // We maintain a list of Ring buffers, to enable us to grow without copying,
  296. // but most of the time we aim to have only one active Ring.
  297. std::unique_ptr<Ring> head_;
  298. Ring* tail_ = nullptr;
  299. size_t size_ = 0;
  300. size_t max_size_ = 0;
  301. TimeTicks next_resize_time_;
  302. };
  303. } // namespace internal
  304. } // namespace sequence_manager
  305. } // namespace base
  306. #endif // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_