task_queue_impl.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. // Copyright 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
  5. #define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
  6. #include <stddef.h>
  7. #include <memory>
  8. #include <queue>
  9. #include <set>
  10. #include <utility>
  11. #include "base/callback.h"
  12. #include "base/memory/weak_ptr.h"
  13. #include "base/observer_list.h"
  14. #include "base/pending_task.h"
  15. #include "base/task/common/checked_lock.h"
  16. #include "base/task/common/intrusive_heap.h"
  17. #include "base/task/common/operations_controller.h"
  18. #include "base/task/sequence_manager/associated_thread_id.h"
  19. #include "base/task/sequence_manager/atomic_flag_set.h"
  20. #include "base/task/sequence_manager/enqueue_order.h"
  21. #include "base/task/sequence_manager/lazily_deallocated_deque.h"
  22. #include "base/task/sequence_manager/sequenced_task_source.h"
  23. #include "base/task/sequence_manager/task_queue.h"
  24. #include "base/threading/thread_checker.h"
  25. #include "base/time/time_override.h"
  26. #include "base/trace_event/base_tracing.h"
  27. #include "base/values.h"
  28. namespace base {
  29. namespace sequence_manager {
  30. class LazyNow;
  31. class TimeDomain;
  32. namespace internal {
  33. class SequenceManagerImpl;
  34. class WorkQueue;
  35. class WorkQueueSets;
  36. // TaskQueueImpl has four main queues:
  37. //
  38. // Immediate (non-delayed) tasks:
  39. // |immediate_incoming_queue| - PostTask enqueues tasks here.
  40. // |immediate_work_queue| - SequenceManager takes immediate tasks here.
  41. //
  42. // Delayed tasks
  43. // |delayed_incoming_queue| - PostDelayedTask enqueues tasks here.
  44. // |delayed_work_queue| - SequenceManager takes delayed tasks here.
  45. //
  46. // The |immediate_incoming_queue| can be accessed from any thread, the other
  47. // queues are main-thread only. To reduce the overhead of locking,
  48. // |immediate_work_queue| is swapped with |immediate_incoming_queue| when
  49. // |immediate_work_queue| becomes empty.
  50. //
  51. // Delayed tasks are initially posted to |delayed_incoming_queue| and a wake-up
  52. // is scheduled with the TimeDomain. When the delay has elapsed, the TimeDomain
  53. // calls UpdateDelayedWorkQueue and ready delayed tasks are moved into the
  54. // |delayed_work_queue|. Note the EnqueueOrder (used for ordering) for a delayed
  55. // task is not set until it's moved into the |delayed_work_queue|.
  56. //
  57. // TaskQueueImpl uses the WorkQueueSets and the TaskQueueSelector to implement
  58. // prioritization. Task selection is done by the TaskQueueSelector and when a
  59. // queue is selected, it round-robins between the |immediate_work_queue| and
  60. // |delayed_work_queue|. The reason for this is we want to make sure delayed
  61. // tasks (normally the most common type) don't starve out immediate work.
  62. class BASE_EXPORT TaskQueueImpl {
  63. public:
  64. TaskQueueImpl(SequenceManagerImpl* sequence_manager,
  65. TimeDomain* time_domain,
  66. const TaskQueue::Spec& spec);
  67. TaskQueueImpl(const TaskQueueImpl&) = delete;
  68. TaskQueueImpl& operator=(const TaskQueueImpl&) = delete;
  69. ~TaskQueueImpl();
  70. // Types of queues TaskQueueImpl is maintaining internally.
  71. enum class WorkQueueType { kImmediate, kDelayed };
  72. // Some methods have fast paths when on the main thread.
  73. enum class CurrentThread { kMainThread, kNotMainThread };
  74. // Non-nestable tasks may get deferred but such queue is being maintained on
  75. // SequenceManager side, so we need to keep information how to requeue it.
  76. struct DeferredNonNestableTask {
  77. Task task;
  78. internal::TaskQueueImpl* task_queue;
  79. WorkQueueType work_queue_type;
  80. };
  81. using OnNextWakeUpChangedCallback = RepeatingCallback<void(TimeTicks)>;
  82. using OnTaskStartedHandler =
  83. RepeatingCallback<void(const Task&, const TaskQueue::TaskTiming&)>;
  84. using OnTaskCompletedHandler =
  85. RepeatingCallback<void(const Task&, TaskQueue::TaskTiming*, LazyNow*)>;
  86. using OnTaskPostedHandler = RepeatingCallback<void(const Task&)>;
  87. // May be called from any thread.
  88. scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
  89. TaskType task_type) const;
  90. // TaskQueue implementation.
  91. const char* GetName() const;
  92. bool IsQueueEnabled() const;
  93. void SetQueueEnabled(bool enabled);
  94. void SetShouldReportPostedTasksWhenDisabled(bool should_report);
  95. bool IsEmpty() const;
  96. size_t GetNumberOfPendingTasks() const;
  97. bool HasTaskToRunImmediately() const;
  98. Optional<TimeTicks> GetNextScheduledWakeUp();
  99. Optional<DelayedWakeUp> GetNextScheduledWakeUpImpl();
  100. void SetQueuePriority(TaskQueue::QueuePriority priority);
  101. TaskQueue::QueuePriority GetQueuePriority() const;
  102. void AddTaskObserver(TaskObserver* task_observer);
  103. void RemoveTaskObserver(TaskObserver* task_observer);
  104. void SetTimeDomain(TimeDomain* time_domain);
  105. TimeDomain* GetTimeDomain() const;
  106. void SetBlameContext(trace_event::BlameContext* blame_context);
  107. void InsertFence(TaskQueue::InsertFencePosition position);
  108. void InsertFenceAt(TimeTicks time);
  109. void RemoveFence();
  110. bool HasActiveFence();
  111. bool BlockedByFence() const;
  112. // Implementation of TaskQueue::SetObserver.
  113. void SetObserver(TaskQueue::Observer* observer);
  114. void UnregisterTaskQueue();
  115. // Returns true if a (potentially hypothetical) task with the specified
  116. // |enqueue_order| could run on the queue. Must be called from the main
  117. // thread.
  118. bool CouldTaskRun(EnqueueOrder enqueue_order) const;
  119. // Returns true if a task with |enqueue_order| obtained from this queue was
  120. // ever in the queue while it was disabled, blocked by a fence, or less
  121. // important than kNormalPriority.
  122. bool WasBlockedOrLowPriority(EnqueueOrder enqueue_order) const;
  123. // Must only be called from the thread this task queue was created on.
  124. void ReloadEmptyImmediateWorkQueue();
  125. Value AsValue(TimeTicks now, bool force_verbose) const;
  126. bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
  127. bool GetShouldNotifyObservers() const { return should_notify_observers_; }
  128. void NotifyWillProcessTask(const Task& task,
  129. bool was_blocked_or_low_priority);
  130. void NotifyDidProcessTask(const Task& task);
  131. // Check for available tasks in immediate work queues.
  132. // Used to check if we need to generate notifications about delayed work.
  133. bool HasPendingImmediateWork();
  134. bool HasPendingImmediateWorkLocked()
  135. EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
  136. bool has_pending_high_resolution_tasks() const {
  137. return main_thread_only()
  138. .delayed_incoming_queue.has_pending_high_resolution_tasks();
  139. }
  140. WorkQueue* delayed_work_queue() {
  141. return main_thread_only().delayed_work_queue.get();
  142. }
  143. const WorkQueue* delayed_work_queue() const {
  144. return main_thread_only().delayed_work_queue.get();
  145. }
  146. WorkQueue* immediate_work_queue() {
  147. return main_thread_only().immediate_work_queue.get();
  148. }
  149. const WorkQueue* immediate_work_queue() const {
  150. return main_thread_only().immediate_work_queue.get();
  151. }
  152. // Enqueues any delayed tasks which should be run now on the
  153. // |delayed_work_queue|. Must be called from the main thread.
  154. void MoveReadyDelayedTasksToWorkQueue(LazyNow* lazy_now);
  155. base::internal::HeapHandle heap_handle() const {
  156. return main_thread_only().heap_handle;
  157. }
  158. void set_heap_handle(base::internal::HeapHandle heap_handle) {
  159. main_thread_only().heap_handle = heap_handle;
  160. }
  161. // Pushes |task| onto the front of the specified work queue. Caution must be
  162. // taken with this API because you could easily starve out other work.
  163. // TODO(kraynov): Simplify non-nestable task logic https://crbug.com/845437.
  164. void RequeueDeferredNonNestableTask(DeferredNonNestableTask task);
  165. void PushImmediateIncomingTaskForTest(Task&& task);
  166. // Iterates over |delayed_incoming_queue| removing canceled tasks. In
  167. // addition MaybeShrinkQueue is called on all internal queues.
  168. void ReclaimMemory(TimeTicks now);
  169. // Allows wrapping TaskQueue to set a handler to subscribe for notifications
  170. // about started and completed tasks.
  171. void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
  172. void OnTaskStarted(const Task& task,
  173. const TaskQueue::TaskTiming& task_timing);
  174. // |task_timing| may be passed in Running state and may not have the end time,
  175. // so that the handler can run an additional task that is counted as a part of
  176. // the main task.
  177. // The handler can call TaskTiming::RecordTaskEnd, which is optional, to
  178. // finalize the task, and use the resulting timing.
  179. void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
  180. void OnTaskCompleted(const Task& task,
  181. TaskQueue::TaskTiming* task_timing,
  182. LazyNow* lazy_now);
  183. bool RequiresTaskTiming() const;
  184. // Set a callback for adding custom functionality for processing posted task.
  185. // Callback will be dispatched while holding a scheduler lock. As a result,
  186. // callback should not call scheduler APIs directly, as this can lead to
  187. // deadlocks. For example, PostTask should not be called directly and
  188. // ScopedDeferTaskPosting::PostOrDefer should be used instead.
  189. void SetOnTaskPostedHandler(OnTaskPostedHandler handler);
  190. WeakPtr<SequenceManagerImpl> GetSequenceManagerWeakPtr();
  191. SequenceManagerImpl* sequence_manager() const { return sequence_manager_; }
  192. // Returns true if this queue is unregistered or task queue manager is deleted
  193. // and this queue can be safely deleted on any thread.
  194. bool IsUnregistered() const;
  195. // Delete all tasks within this TaskQueue.
  196. void DeletePendingTasks();
  197. // Whether this task queue owns any tasks. Task queue being disabled doesn't
  198. // affect this.
  199. bool HasTasks() const;
  200. protected:
  201. void SetDelayedWakeUpForTesting(Optional<DelayedWakeUp> wake_up);
  202. private:
  203. friend class WorkQueue;
  204. friend class WorkQueueTest;
  205. // A TaskQueueImpl instance can be destroyed or unregistered before all its
  206. // associated TaskRunner instances are (they are refcounted). Thus we need a
  207. // way to prevent TaskRunner instances from posting further tasks. This class
  208. // guards PostTask calls using an OperationsController.
  209. // This class is ref-counted as both the TaskQueueImpl instance and all
  210. // associated TaskRunner instances share the same GuardedTaskPoster instance.
  211. // When TaskQueueImpl shuts down it calls ShutdownAndWaitForZeroOperations(),
  212. // preventing further PostTask calls being made to the underlying
  213. // TaskQueueImpl.
  214. class GuardedTaskPoster : public RefCountedThreadSafe<GuardedTaskPoster> {
  215. public:
  216. explicit GuardedTaskPoster(TaskQueueImpl* outer);
  217. bool PostTask(PostedTask task);
  218. void StartAcceptingOperations() {
  219. operations_controller_.StartAcceptingOperations();
  220. }
  221. void ShutdownAndWaitForZeroOperations() {
  222. operations_controller_.ShutdownAndWaitForZeroOperations();
  223. }
  224. private:
  225. friend class RefCountedThreadSafe<GuardedTaskPoster>;
  226. ~GuardedTaskPoster();
  227. base::internal::OperationsController operations_controller_;
  228. // Pointer might be stale, access guarded by |operations_controller_|
  229. TaskQueueImpl* const outer_;
  230. };
  231. class TaskRunner : public SingleThreadTaskRunner {
  232. public:
  233. explicit TaskRunner(scoped_refptr<GuardedTaskPoster> task_poster,
  234. scoped_refptr<AssociatedThreadId> associated_thread,
  235. TaskType task_type);
  236. bool PostDelayedTask(const Location& location,
  237. OnceClosure callback,
  238. TimeDelta delay) final;
  239. bool PostNonNestableDelayedTask(const Location& location,
  240. OnceClosure callback,
  241. TimeDelta delay) final;
  242. bool RunsTasksInCurrentSequence() const final;
  243. private:
  244. ~TaskRunner() final;
  245. bool PostTask(PostedTask task) const;
  246. const scoped_refptr<GuardedTaskPoster> task_poster_;
  247. const scoped_refptr<AssociatedThreadId> associated_thread_;
  248. const TaskType task_type_;
  249. };
  250. // A queue for holding delayed tasks before their delay has expired.
  251. struct DelayedIncomingQueue {
  252. public:
  253. DelayedIncomingQueue();
  254. DelayedIncomingQueue(const DelayedIncomingQueue&) = delete;
  255. DelayedIncomingQueue& operator=(const DelayedIncomingQueue&) = delete;
  256. ~DelayedIncomingQueue();
  257. void push(Task&& task);
  258. void pop();
  259. bool empty() const { return queue_.empty(); }
  260. size_t size() const { return queue_.size(); }
  261. const Task& top() const { return queue_.top(); }
  262. void swap(DelayedIncomingQueue* other);
  263. bool has_pending_high_resolution_tasks() const {
  264. return pending_high_res_tasks_;
  265. }
  266. void SweepCancelledTasks();
  267. std::priority_queue<Task> TakeTasks() { return std::move(queue_); }
  268. Value AsValue(TimeTicks now) const;
  269. private:
  270. struct PQueue : public std::priority_queue<Task> {
  271. // Expose the container and comparator.
  272. using std::priority_queue<Task>::c;
  273. using std::priority_queue<Task>::comp;
  274. };
  275. PQueue queue_;
  276. // Number of pending tasks in the queue that need high resolution timing.
  277. int pending_high_res_tasks_ = 0;
  278. };
  279. struct MainThreadOnly {
  280. MainThreadOnly(TaskQueueImpl* task_queue, TimeDomain* time_domain);
  281. ~MainThreadOnly();
  282. // Another copy of TimeDomain for lock-free access from the main thread.
  283. // See description inside struct AnyThread for details.
  284. TimeDomain* time_domain;
  285. TaskQueue::Observer* task_queue_observer = nullptr;
  286. std::unique_ptr<WorkQueue> delayed_work_queue;
  287. std::unique_ptr<WorkQueue> immediate_work_queue;
  288. DelayedIncomingQueue delayed_incoming_queue;
  289. ObserverList<TaskObserver>::Unchecked task_observers;
  290. base::internal::HeapHandle heap_handle;
  291. bool is_enabled = true;
  292. trace_event::BlameContext* blame_context = nullptr; // Not owned.
  293. EnqueueOrder current_fence;
  294. Optional<TimeTicks> delayed_fence;
  295. // Snapshots the next sequence number when the queue is unblocked, otherwise
  296. // it contains EnqueueOrder::none(). If the EnqueueOrder of a task just
  297. // popped from this queue is greater than this, it means that the queue was
  298. // never disabled or blocked by a fence while the task was queued.
  299. EnqueueOrder enqueue_order_at_which_we_became_unblocked;
  300. // If the EnqueueOrder of a task just popped from this queue is greater than
  301. // this, it means that the queue was never disabled, blocked by a fence or
  302. // less important than kNormalPriority while the task was queued.
  303. //
  304. // Implementation details:
  305. // 1) When the queue is made less important than kNormalPriority, this is
  306. // set to EnqueueOrder::max(). The EnqueueOrder of any task will compare
  307. // less than this.
  308. // 2) When the queue is made at least as important as kNormalPriority, this
  309. // snapshots the next sequence number. If the queue is blocked, the value
  310. // is irrelevant because no task should be popped. If the queue is not
  311. // blocked, the EnqueueOrder of any already queued task will compare less
  312. // than this.
  313. // 3) When the queue is unblocked while at least as important as
  314. // kNormalPriority, this snapshots the next sequence number. The
  315. // EnqueueOrder of any already queued task will compare less than this.
  316. EnqueueOrder
  317. enqueue_order_at_which_we_became_unblocked_with_normal_priority;
  318. OnTaskStartedHandler on_task_started_handler;
  319. OnTaskCompletedHandler on_task_completed_handler;
  320. // Last reported wake up, used only in UpdateWakeUp to avoid
  321. // excessive calls.
  322. Optional<DelayedWakeUp> scheduled_wake_up;
  323. // If false, queue will be disabled. Used only for tests.
  324. bool is_enabled_for_test = true;
  325. // The time at which the task queue was disabled, if it is currently
  326. // disabled.
  327. Optional<TimeTicks> disabled_time;
  328. // Whether or not the task queue should emit tracing events for tasks
  329. // posted to this queue when it is disabled.
  330. bool should_report_posted_tasks_when_disabled = false;
  331. };
  332. void PostTask(PostedTask task);
  333. void PostImmediateTaskImpl(PostedTask task, CurrentThread current_thread);
  334. void PostDelayedTaskImpl(PostedTask task, CurrentThread current_thread);
  335. // Push the task onto the |delayed_incoming_queue|. Lock-free main thread
  336. // only fast path.
  337. void PushOntoDelayedIncomingQueueFromMainThread(Task pending_task,
  338. TimeTicks now,
  339. bool notify_task_annotator);
  340. // Push the task onto the |delayed_incoming_queue|. Slow path from other
  341. // threads.
  342. void PushOntoDelayedIncomingQueue(Task pending_task);
  343. void ScheduleDelayedWorkTask(Task pending_task);
  344. void MoveReadyImmediateTasksToImmediateWorkQueueLocked()
  345. EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
  346. // LazilyDeallocatedDeque use TimeTicks to figure out when to resize. We
  347. // should use real time here always.
  348. using TaskDeque =
  349. LazilyDeallocatedDeque<Task, subtle::TimeTicksNowIgnoringOverride>;
  350. // Extracts all the tasks from the immediate incoming queue and swaps it with
  351. // |queue| which must be empty.
  352. // Can be called from any thread.
  353. void TakeImmediateIncomingQueueTasks(TaskDeque* queue);
  354. void TraceQueueSize() const;
  355. static Value QueueAsValue(const TaskDeque& queue, TimeTicks now);
  356. static Value TaskAsValue(const Task& task, TimeTicks now);
  357. // Schedules delayed work on time domain and calls the observer.
  358. void UpdateDelayedWakeUp(LazyNow* lazy_now);
  359. void UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
  360. Optional<DelayedWakeUp> wake_up);
  361. // Activate a delayed fence if a time has come.
  362. void ActivateDelayedFenceIfNeeded(TimeTicks now);
  363. // Updates state protected by any_thread_lock_.
  364. void UpdateCrossThreadQueueStateLocked()
  365. EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
  366. void MaybeLogPostTask(PostedTask* task);
  367. void MaybeAdjustTaskDelay(PostedTask* task, CurrentThread current_thread);
  368. // Reports the task if it was due to IPC and was posted to a disabled queue.
  369. // This should be called after WillQueueTask has been called for the task.
  370. void MaybeReportIpcTaskQueuedFromMainThread(Task* pending_task,
  371. const char* task_queue_name);
  372. bool ShouldReportIpcTaskQueuedFromAnyThreadLocked(
  373. base::TimeDelta* time_since_disabled)
  374. EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
  375. void MaybeReportIpcTaskQueuedFromAnyThreadLocked(Task* pending_task,
  376. const char* task_queue_name)
  377. EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
  378. void MaybeReportIpcTaskQueuedFromAnyThreadUnlocked(
  379. Task* pending_task,
  380. const char* task_queue_name);
  381. void ReportIpcTaskQueued(Task* pending_task,
  382. const char* task_queue_name,
  383. const base::TimeDelta& time_since_disabled);
  384. // Invoked when the queue becomes enabled and not blocked by a fence.
  385. void OnQueueUnblocked();
  386. const char* name_;
  387. SequenceManagerImpl* const sequence_manager_;
  388. scoped_refptr<AssociatedThreadId> associated_thread_;
  389. const scoped_refptr<GuardedTaskPoster> task_poster_;
  390. mutable base::internal::CheckedLock any_thread_lock_;
  391. struct AnyThread {
  392. // Mirrored from MainThreadOnly. These are only used for tracing.
  393. struct TracingOnly {
  394. TracingOnly();
  395. ~TracingOnly();
  396. bool is_enabled = true;
  397. Optional<TimeTicks> disabled_time;
  398. bool should_report_posted_tasks_when_disabled = false;
  399. };
  400. explicit AnyThread(TimeDomain* time_domain);
  401. ~AnyThread();
  402. // TimeDomain is maintained in two copies: inside AnyThread and inside
  403. // MainThreadOnly. It can be changed only from main thread, so it should be
  404. // locked before accessing from other threads.
  405. TimeDomain* time_domain;
  406. TaskQueue::Observer* task_queue_observer = nullptr;
  407. TaskDeque immediate_incoming_queue;
  408. // True if main_thread_only().immediate_work_queue is empty.
  409. bool immediate_work_queue_empty = true;
  410. bool post_immediate_task_should_schedule_work = true;
  411. bool unregistered = false;
  412. OnTaskPostedHandler on_task_posted_handler;
  413. #if DCHECK_IS_ON()
  414. // A cache of |immediate_work_queue->work_queue_set_index()| which is used
  415. // to index into
  416. // SequenceManager::Settings::per_priority_cross_thread_task_delay to apply
  417. // a priority specific delay for debugging purposes.
  418. int queue_set_index = 0;
  419. #endif
  420. TracingOnly tracing_only;
  421. };
  422. AnyThread any_thread_ GUARDED_BY(any_thread_lock_);
  423. MainThreadOnly main_thread_only_;
  424. MainThreadOnly& main_thread_only() {
  425. DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
  426. return main_thread_only_;
  427. }
  428. const MainThreadOnly& main_thread_only() const {
  429. DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
  430. return main_thread_only_;
  431. }
  432. // Handle to our entry within the SequenceManagers |empty_queues_to_reload_|
  433. // atomic flag set. Used to signal that this queue needs to be reloaded.
  434. // If you call SetActive(false) you should do so inside |any_thread_lock_|
  435. // because there is a danger a cross thread PostTask might reset it before we
  436. // make |immediate_work_queue| non-empty.
  437. AtomicFlagSet::AtomicFlag empty_queues_to_reload_handle_;
  438. const bool should_monitor_quiescence_;
  439. const bool should_notify_observers_;
  440. const bool delayed_fence_allowed_;
  441. };
  442. } // namespace internal
  443. } // namespace sequence_manager
  444. } // namespace base
  445. #endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_