trace_log.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_TRACE_EVENT_TRACE_LOG_H_
  5. #define BASE_TRACE_EVENT_TRACE_LOG_H_
  6. #include <stddef.h>
  7. #include <stdint.h>
  8. #include <atomic>
  9. #include <memory>
  10. #include <string>
  11. #include <unordered_map>
  12. #include <vector>
  13. #include "base/atomicops.h"
  14. #include "base/containers/stack.h"
  15. #include "base/gtest_prod_util.h"
  16. #include "base/macros.h"
  17. #include "base/memory/scoped_refptr.h"
  18. #include "base/single_thread_task_runner.h"
  19. #include "base/time/time_override.h"
  20. #include "base/trace_event/category_registry.h"
  21. #include "base/trace_event/memory_dump_provider.h"
  22. #include "base/trace_event/trace_config.h"
  23. #include "base/trace_event/trace_event_impl.h"
  24. #include "build/build_config.h"
  25. namespace base {
  26. class RefCountedString;
  27. template <typename T>
  28. class NoDestructor;
  29. namespace trace_event {
  30. struct TraceCategory;
  31. class TraceBuffer;
  32. class TraceBufferChunk;
  33. class TraceEvent;
  34. class TraceEventFilter;
  35. class TraceEventMemoryOverhead;
  36. struct BASE_EXPORT TraceLogStatus {
  37. TraceLogStatus();
  38. ~TraceLogStatus();
  39. uint32_t event_capacity;
  40. uint32_t event_count;
  41. };
  42. class BASE_EXPORT TraceLog : public MemoryDumpProvider {
  43. public:
  44. // Argument passed to TraceLog::SetEnabled.
  45. enum Mode : uint8_t {
  46. // Enables normal tracing (recording trace events in the trace buffer).
  47. RECORDING_MODE = 1 << 0,
  48. // Trace events are enabled just for filtering but not for recording. Only
  49. // event filters config of |trace_config| argument is used.
  50. FILTERING_MODE = 1 << 1
  51. };
  52. static TraceLog* GetInstance();
  53. // Retrieves a copy (for thread-safety) of the current TraceConfig.
  54. TraceConfig GetCurrentTraceConfig() const;
  55. // Initializes the thread-local event buffer, if not already initialized and
  56. // if the current thread supports that (has a message loop).
  57. void InitializeThreadLocalEventBufferIfSupported();
  58. // See TraceConfig comments for details on how to control which categories
  59. // will be traced. SetDisabled must be called distinctly for each mode that is
  60. // enabled. If tracing has already been enabled for recording, category filter
  61. // (enabled and disabled categories) will be merged into the current category
  62. // filter. Enabling RECORDING_MODE does not enable filters. Trace event
  63. // filters will be used only if FILTERING_MODE is set on |modes_to_enable|.
  64. // Conversely to RECORDING_MODE, FILTERING_MODE doesn't support upgrading,
  65. // i.e. filters can only be enabled if not previously enabled.
  66. void SetEnabled(const TraceConfig& trace_config, uint8_t modes_to_enable);
  67. // TODO(ssid): Remove the default SetEnabled and IsEnabled. They should take
  68. // Mode as argument.
  69. // Disables tracing for all categories for the specified |modes_to_disable|
  70. // only. Only RECORDING_MODE is taken as default |modes_to_disable|.
  71. void SetDisabled();
  72. void SetDisabled(uint8_t modes_to_disable);
  73. // Returns true if TraceLog is enabled on recording mode.
  74. // Note: Returns false even if FILTERING_MODE is enabled.
  75. bool IsEnabled() {
  76. AutoLock lock(lock_);
  77. return enabled_modes_ & RECORDING_MODE;
  78. }
  79. // Returns a bitmap of enabled modes from TraceLog::Mode.
  80. uint8_t enabled_modes() { return enabled_modes_; }
  81. // The number of times we have begun recording traces. If tracing is off,
  82. // returns -1. If tracing is on, then it returns the number of times we have
  83. // recorded a trace. By watching for this number to increment, you can
  84. // passively discover when a new trace has begun. This is then used to
  85. // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
  86. int GetNumTracesRecorded();
  87. #if defined(OS_ANDROID)
  88. void StartATrace();
  89. void StopATrace();
  90. void AddClockSyncMetadataEvent();
  91. #endif
  92. // Enabled state listeners give a callback when tracing is enabled or
  93. // disabled. This can be used to tie into other library's tracing systems
  94. // on-demand.
  95. class BASE_EXPORT EnabledStateObserver {
  96. public:
  97. virtual ~EnabledStateObserver() = default;
  98. // Called just after the tracing system becomes enabled, outside of the
  99. // |lock_|. TraceLog::IsEnabled() is true at this point.
  100. virtual void OnTraceLogEnabled() = 0;
  101. // Called just after the tracing system disables, outside of the |lock_|.
  102. // TraceLog::IsEnabled() is false at this point.
  103. virtual void OnTraceLogDisabled() = 0;
  104. };
  105. // Adds an observer. Cannot be called from within the observer callback.
  106. void AddEnabledStateObserver(EnabledStateObserver* listener);
  107. // Removes an observer. Cannot be called from within the observer callback.
  108. void RemoveEnabledStateObserver(EnabledStateObserver* listener);
  109. // Adds an observer that is owned by TraceLog. This is useful for agents that
  110. // implement tracing feature that needs to stay alive as long as TraceLog
  111. // does.
  112. void AddOwnedEnabledStateObserver(
  113. std::unique_ptr<EnabledStateObserver> listener);
  114. bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
  115. // Asynchronous enabled state listeners. When tracing is enabled or disabled,
  116. // for each observer, a task for invoking its appropriate callback is posted
  117. // to the thread from which AddAsyncEnabledStateObserver() was called. This
  118. // allows the observer to be safely destroyed, provided that it happens on the
  119. // same thread that invoked AddAsyncEnabledStateObserver().
  120. class BASE_EXPORT AsyncEnabledStateObserver {
  121. public:
  122. virtual ~AsyncEnabledStateObserver() = default;
  123. // Posted just after the tracing system becomes enabled, outside |lock_|.
  124. // TraceLog::IsEnabled() is true at this point.
  125. virtual void OnTraceLogEnabled() = 0;
  126. // Posted just after the tracing system becomes disabled, outside |lock_|.
  127. // TraceLog::IsEnabled() is false at this point.
  128. virtual void OnTraceLogDisabled() = 0;
  129. };
  130. // TODO(oysteine): This API originally needed to use WeakPtrs as the observer
  131. // list was copied under the global trace lock, but iterated over outside of
  132. // that lock so that observers could add tracing. The list is now protected by
  133. // its own lock, so this can be changed to a raw ptr.
  134. void AddAsyncEnabledStateObserver(
  135. WeakPtr<AsyncEnabledStateObserver> listener);
  136. void RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener);
  137. bool HasAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener) const;
  138. TraceLogStatus GetStatus() const;
  139. bool BufferIsFull() const;
  140. // Computes an estimate of the size of the TraceLog including all the retained
  141. // objects.
  142. void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
  143. void SetArgumentFilterPredicate(
  144. const ArgumentFilterPredicate& argument_filter_predicate);
  145. ArgumentFilterPredicate GetArgumentFilterPredicate() const;
  146. void SetMetadataFilterPredicate(
  147. const MetadataFilterPredicate& metadata_filter_predicate);
  148. MetadataFilterPredicate GetMetadataFilterPredicate() const;
  149. // Flush all collected events to the given output callback. The callback will
  150. // be called one or more times either synchronously or asynchronously from
  151. // the current thread with IPC-bite-size chunks. The string format is
  152. // undefined. Use TraceResultBuffer to convert one or more trace strings to
  153. // JSON. The callback can be null if the caller doesn't want any data.
  154. // Due to the implementation of thread-local buffers, flush can't be
  155. // done when tracing is enabled. If called when tracing is enabled, the
  156. // callback will be called directly with (empty_string, false) to indicate
  157. // the end of this unsuccessful flush. Flush does the serialization
  158. // on the same thread if the caller doesn't set use_worker_thread explicitly.
  159. using OutputCallback =
  160. base::RepeatingCallback<void(const scoped_refptr<base::RefCountedString>&,
  161. bool has_more_events)>;
  162. void Flush(const OutputCallback& cb, bool use_worker_thread = false);
  163. // Cancels tracing and discards collected data.
  164. void CancelTracing(const OutputCallback& cb);
  165. using AddTraceEventOverrideFunction = void (*)(TraceEvent*,
  166. bool thread_will_flush,
  167. TraceEventHandle* handle);
  168. using OnFlushFunction = void (*)();
  169. using UpdateDurationFunction =
  170. void (*)(const unsigned char* category_group_enabled,
  171. const char* name,
  172. TraceEventHandle handle,
  173. int thread_id,
  174. bool explicit_timestamps,
  175. const TimeTicks& now,
  176. const ThreadTicks& thread_now,
  177. ThreadInstructionCount thread_instruction_now);
  178. // The callbacks will be called up until the point where the flush is
  179. // finished, i.e. must be callable until OutputCallback is called with
  180. // has_more_events==false.
  181. void SetAddTraceEventOverrides(
  182. const AddTraceEventOverrideFunction& add_event_override,
  183. const OnFlushFunction& on_flush_callback,
  184. const UpdateDurationFunction& update_duration_callback);
  185. // Called by TRACE_EVENT* macros, don't call this directly.
  186. // The name parameter is a category group for example:
  187. // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
  188. static const unsigned char* GetCategoryGroupEnabled(const char* name);
  189. static const char* GetCategoryGroupName(
  190. const unsigned char* category_group_enabled);
  191. static constexpr const unsigned char* GetBuiltinCategoryEnabled(
  192. const char* name) {
  193. TraceCategory* builtin_category =
  194. CategoryRegistry::GetBuiltinCategoryByName(name);
  195. if (builtin_category)
  196. return builtin_category->state_ptr();
  197. return nullptr;
  198. }
  199. // Called by TRACE_EVENT* macros, don't call this directly.
  200. // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
  201. // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
  202. bool ShouldAddAfterUpdatingState(char phase,
  203. const unsigned char* category_group_enabled,
  204. const char* name,
  205. unsigned long long id,
  206. int thread_id,
  207. TraceArguments* args);
  208. TraceEventHandle AddTraceEvent(char phase,
  209. const unsigned char* category_group_enabled,
  210. const char* name,
  211. const char* scope,
  212. unsigned long long id,
  213. TraceArguments* args,
  214. unsigned int flags);
  215. TraceEventHandle AddTraceEventWithBindId(
  216. char phase,
  217. const unsigned char* category_group_enabled,
  218. const char* name,
  219. const char* scope,
  220. unsigned long long id,
  221. unsigned long long bind_id,
  222. TraceArguments* args,
  223. unsigned int flags);
  224. TraceEventHandle AddTraceEventWithProcessId(
  225. char phase,
  226. const unsigned char* category_group_enabled,
  227. const char* name,
  228. const char* scope,
  229. unsigned long long id,
  230. int process_id,
  231. TraceArguments* args,
  232. unsigned int flags);
  233. TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
  234. char phase,
  235. const unsigned char* category_group_enabled,
  236. const char* name,
  237. const char* scope,
  238. unsigned long long id,
  239. int thread_id,
  240. const TimeTicks& timestamp,
  241. TraceArguments* args,
  242. unsigned int flags);
  243. TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
  244. char phase,
  245. const unsigned char* category_group_enabled,
  246. const char* name,
  247. const char* scope,
  248. unsigned long long id,
  249. unsigned long long bind_id,
  250. int thread_id,
  251. const TimeTicks& timestamp,
  252. TraceArguments* args,
  253. unsigned int flags);
  254. TraceEventHandle AddTraceEventWithThreadIdAndTimestamps(
  255. char phase,
  256. const unsigned char* category_group_enabled,
  257. const char* name,
  258. const char* scope,
  259. unsigned long long id,
  260. unsigned long long bind_id,
  261. int thread_id,
  262. const TimeTicks& timestamp,
  263. const ThreadTicks& thread_timestamp,
  264. TraceArguments* args,
  265. unsigned int flags);
  266. // Adds a metadata event that will be written when the trace log is flushed.
  267. void AddMetadataEvent(const unsigned char* category_group_enabled,
  268. const char* name,
  269. TraceArguments* args,
  270. unsigned int flags);
  271. void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
  272. const char* name,
  273. TraceEventHandle handle);
  274. void UpdateTraceEventDurationExplicit(
  275. const unsigned char* category_group_enabled,
  276. const char* name,
  277. TraceEventHandle handle,
  278. int thread_id,
  279. bool explicit_timestamps,
  280. const TimeTicks& now,
  281. const ThreadTicks& thread_now,
  282. ThreadInstructionCount thread_instruction_now);
  283. void EndFilteredEvent(const unsigned char* category_group_enabled,
  284. const char* name,
  285. TraceEventHandle handle);
  286. int process_id() const { return process_id_; }
  287. const std::string& process_name() const { return process_name_; }
  288. uint64_t MangleEventId(uint64_t id);
  289. // Exposed for unittesting:
  290. // Testing factory for TraceEventFilter.
  291. typedef std::unique_ptr<TraceEventFilter> (*FilterFactoryForTesting)(
  292. const std::string& /* predicate_name */);
  293. void SetFilterFactoryForTesting(FilterFactoryForTesting factory) {
  294. filter_factory_for_testing_ = factory;
  295. }
  296. // Allows clearing up our singleton instance.
  297. static void ResetForTesting();
  298. // Allow tests to inspect TraceEvents.
  299. TraceEvent* GetEventByHandle(TraceEventHandle handle);
  300. void SetProcessID(int process_id);
  301. // Process sort indices, if set, override the order of a process will appear
  302. // relative to other processes in the trace viewer. Processes are sorted first
  303. // on their sort index, ascending, then by their name, and then tid.
  304. void SetProcessSortIndex(int sort_index);
  305. // Sets the name of the process.
  306. void set_process_name(const std::string& process_name) {
  307. AutoLock lock(lock_);
  308. process_name_ = process_name;
  309. }
  310. bool IsProcessNameEmpty() const { return process_name_.empty(); }
  311. // Processes can have labels in addition to their names. Use labels, for
  312. // instance, to list out the web page titles that a process is handling.
  313. void UpdateProcessLabel(int label_id, const std::string& current_label);
  314. void RemoveProcessLabel(int label_id);
  315. // Thread sort indices, if set, override the order of a thread will appear
  316. // within its process in the trace viewer. Threads are sorted first on their
  317. // sort index, ascending, then by their name, and then tid.
  318. void SetThreadSortIndex(PlatformThreadId thread_id, int sort_index);
  319. // Allow setting an offset between the current TimeTicks time and the time
  320. // that should be reported.
  321. void SetTimeOffset(TimeDelta offset);
  322. size_t GetObserverCountForTest() const;
  323. // Call this method if the current thread may block the message loop to
  324. // prevent the thread from using the thread-local buffer because the thread
  325. // may not handle the flush request in time causing lost of unflushed events.
  326. void SetCurrentThreadBlocksMessageLoop();
  327. #if defined(OS_WIN)
  328. // This function is called by the ETW exporting module whenever the ETW
  329. // keyword (flags) changes. This keyword indicates which categories should be
  330. // exported, so whenever it changes, we adjust accordingly.
  331. void UpdateETWCategoryGroupEnabledFlags();
  332. #endif
  333. // Replaces |logged_events_| with a new TraceBuffer for testing.
  334. void SetTraceBufferForTesting(std::unique_ptr<TraceBuffer> trace_buffer);
  335. private:
  336. typedef unsigned int InternalTraceOptions;
  337. FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
  338. TraceBufferRingBufferGetReturnChunk);
  339. FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
  340. TraceBufferRingBufferHalfIteration);
  341. FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
  342. TraceBufferRingBufferFullIteration);
  343. FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, TraceBufferVectorReportFull);
  344. FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
  345. ConvertTraceConfigToInternalOptions);
  346. FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
  347. TraceRecordAsMuchAsPossibleMode);
  348. FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, ConfigTraceBufferLimit);
  349. friend class base::NoDestructor<TraceLog>;
  350. // MemoryDumpProvider implementation.
  351. bool OnMemoryDump(const MemoryDumpArgs& args,
  352. ProcessMemoryDump* pmd) override;
  353. // Enable/disable each category group based on the current mode_,
  354. // category_filter_ and event_filters_enabled_.
  355. // Enable the category group in the recording mode if category_filter_ matches
  356. // the category group, is not null. Enable category for filtering if any
  357. // filter in event_filters_enabled_ enables it.
  358. void UpdateCategoryRegistry();
  359. void UpdateCategoryState(TraceCategory* category);
  360. void CreateFiltersForTraceConfig();
  361. InternalTraceOptions GetInternalOptionsFromTraceConfig(
  362. const TraceConfig& config);
  363. class ThreadLocalEventBuffer;
  364. class OptionalAutoLock;
  365. struct RegisteredAsyncObserver;
  366. TraceLog();
  367. ~TraceLog() override;
  368. void AddMetadataEventsWhileLocked();
  369. template <typename T>
  370. void AddMetadataEventWhileLocked(int thread_id,
  371. const char* metadata_name,
  372. const char* arg_name,
  373. const T& value);
  374. InternalTraceOptions trace_options() const {
  375. return static_cast<InternalTraceOptions>(
  376. subtle::NoBarrier_Load(&trace_options_));
  377. }
  378. TraceBuffer* trace_buffer() const { return logged_events_.get(); }
  379. TraceBuffer* CreateTraceBuffer();
  380. std::string EventToConsoleMessage(unsigned char phase,
  381. const TimeTicks& timestamp,
  382. TraceEvent* trace_event);
  383. TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
  384. bool check_buffer_is_full);
  385. void CheckIfBufferIsFullWhileLocked();
  386. void SetDisabledWhileLocked(uint8_t modes);
  387. TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
  388. OptionalAutoLock* lock);
  389. void FlushInternal(const OutputCallback& cb,
  390. bool use_worker_thread,
  391. bool discard_events);
  392. // |generation| is used in the following callbacks to check if the callback
  393. // is called for the flush of the current |logged_events_|.
  394. void FlushCurrentThread(int generation, bool discard_events);
  395. // Usually it runs on a different thread.
  396. static void ConvertTraceEventsToTraceFormat(
  397. std::unique_ptr<TraceBuffer> logged_events,
  398. const TraceLog::OutputCallback& flush_output_callback,
  399. const ArgumentFilterPredicate& argument_filter_predicate);
  400. void FinishFlush(int generation, bool discard_events);
  401. void OnFlushTimeout(int generation, bool discard_events);
  402. int generation() const {
  403. return static_cast<int>(subtle::NoBarrier_Load(&generation_));
  404. }
  405. bool CheckGeneration(int generation) const {
  406. return generation == this->generation();
  407. }
  408. void UseNextTraceBuffer();
  409. TimeTicks OffsetNow() const {
  410. // This should be TRACE_TIME_TICKS_NOW but include order makes that hard.
  411. return OffsetTimestamp(base::subtle::TimeTicksNowIgnoringOverride());
  412. }
  413. TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
  414. return timestamp - time_offset_;
  415. }
  416. // Internal representation of trace options since we store the currently used
  417. // trace option as an AtomicWord.
  418. static const InternalTraceOptions kInternalNone;
  419. static const InternalTraceOptions kInternalRecordUntilFull;
  420. static const InternalTraceOptions kInternalRecordContinuously;
  421. static const InternalTraceOptions kInternalEchoToConsole;
  422. static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
  423. static const InternalTraceOptions kInternalEnableArgumentFilter;
  424. // This lock protects TraceLog member accesses (except for members protected
  425. // by thread_info_lock_) from arbitrary threads.
  426. mutable Lock lock_;
  427. // This lock protects accesses to thread_names_, thread_event_start_times_
  428. // and thread_colors_.
  429. Lock thread_info_lock_;
  430. uint8_t enabled_modes_; // See TraceLog::Mode.
  431. int num_traces_recorded_;
  432. std::unique_ptr<TraceBuffer> logged_events_;
  433. std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
  434. // The lock protects observers access.
  435. mutable Lock observers_lock_;
  436. bool dispatching_to_observers_ = false;
  437. std::vector<EnabledStateObserver*> enabled_state_observers_;
  438. std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
  439. async_observers_;
  440. // Manages ownership of the owned observers. The owned observers will also be
  441. // added to |enabled_state_observers_|.
  442. std::vector<std::unique_ptr<EnabledStateObserver>>
  443. owned_enabled_state_observer_copy_;
  444. std::string process_name_;
  445. std::unordered_map<int, std::string> process_labels_;
  446. int process_sort_index_;
  447. std::unordered_map<int, int> thread_sort_indices_;
  448. std::unordered_map<int, std::string> thread_names_;
  449. base::Time process_creation_time_;
  450. // The following two maps are used only when ECHO_TO_CONSOLE.
  451. std::unordered_map<int, base::stack<TimeTicks>> thread_event_start_times_;
  452. std::unordered_map<std::string, int> thread_colors_;
  453. TimeTicks buffer_limit_reached_timestamp_;
  454. // XORed with TraceID to make it unlikely to collide with other processes.
  455. unsigned long long process_id_hash_;
  456. int process_id_;
  457. TimeDelta time_offset_;
  458. subtle::AtomicWord /* Options */ trace_options_;
  459. TraceConfig trace_config_;
  460. TraceConfig::EventFilters enabled_event_filters_;
  461. ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
  462. ThreadLocalBoolean thread_blocks_message_loop_;
  463. ThreadLocalBoolean thread_is_in_trace_event_;
  464. // Contains task runners for the threads that have had at least one event
  465. // added into the local event buffer.
  466. std::unordered_map<int, scoped_refptr<SingleThreadTaskRunner>>
  467. thread_task_runners_;
  468. // For events which can't be added into the thread local buffer, e.g. events
  469. // from threads without a message loop.
  470. std::unique_ptr<TraceBufferChunk> thread_shared_chunk_;
  471. size_t thread_shared_chunk_index_;
  472. // Set when asynchronous Flush is in progress.
  473. OutputCallback flush_output_callback_;
  474. scoped_refptr<SequencedTaskRunner> flush_task_runner_;
  475. ArgumentFilterPredicate argument_filter_predicate_;
  476. MetadataFilterPredicate metadata_filter_predicate_;
  477. subtle::AtomicWord generation_;
  478. bool use_worker_thread_;
  479. std::atomic<AddTraceEventOverrideFunction> add_trace_event_override_{nullptr};
  480. std::atomic<OnFlushFunction> on_flush_override_{nullptr};
  481. std::atomic<UpdateDurationFunction> update_duration_override_{nullptr};
  482. FilterFactoryForTesting filter_factory_for_testing_;
  483. DISALLOW_COPY_AND_ASSIGN(TraceLog);
  484. };
  485. } // namespace trace_event
  486. } // namespace base
  487. #endif // BASE_TRACE_EVENT_TRACE_LOG_H_