persistent_memory_allocator.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. // Copyright (c) 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
  5. #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
  6. #include <stdint.h>
  7. #include <atomic>
  8. #include <memory>
  9. #include <type_traits>
  10. #include "base/atomicops.h"
  11. #include "base/base_export.h"
  12. #include "base/files/file_path.h"
  13. #include "base/gtest_prod_util.h"
  14. #include "base/macros.h"
  15. #include "base/memory/shared_memory_mapping.h"
  16. #include "base/strings/string_piece.h"
  17. namespace base {
  18. class HistogramBase;
  19. class MemoryMappedFile;
  20. // Simple allocator for pieces of a memory block that may be persistent
  21. // to some storage or shared across multiple processes. This class resides
  22. // under base/metrics because it was written for that purpose. It is,
  23. // however, fully general-purpose and can be freely moved to base/memory
  24. // if other uses are found.
  25. //
  26. // This class provides for thread-secure (i.e. safe against other threads
  27. // or processes that may be compromised and thus have malicious intent)
  28. // allocation of memory within a designated block and also a mechanism by
  29. // which other threads can learn of these allocations.
  30. //
  31. // There is (currently) no way to release an allocated block of data because
  32. // doing so would risk invalidating pointers held by other processes and
  33. // greatly complicate the allocation algorithm.
  34. //
  35. // Construction of this object can accept new, clean (i.e. zeroed) memory
  36. // or previously initialized memory. In the first case, construction must
  37. // be allowed to complete before letting other allocators attach to the same
  38. // segment. In other words, don't share the segment until at least one
  39. // allocator has been attached to it.
  40. //
  41. // Note that memory not in active use is not accessed so it is possible to
  42. // use virtual memory, including memory-mapped files, as backing storage with
  43. // the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
  44. //
  45. // OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
  46. // character arrays and manipulating that memory manually, the better way is
  47. // generally to use the "object" methods to create and manage allocations. In
  48. // this way the sizing, type-checking, and construction are all automatic. For
  49. // this to work, however, every type of stored object must define two public
  50. // "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
  51. //
  52. // struct MyPersistentObjectType {
  53. // // SHA1(MyPersistentObjectType): Increment this if structure changes!
  54. // static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
  55. //
  56. // // Expected size for 32/64-bit check. Update this if structure changes!
  57. // static constexpr size_t kExpectedInstanceSize = 20;
  58. //
  59. // ...
  60. // };
  61. //
  62. // kPersistentTypeId: This value is an arbitrary identifier that allows the
  63. // identification of these objects in the allocator, including the ability
  64. // to find them via iteration. The number is arbitrary but using the first
  65. // four bytes of the SHA1 hash of the type name means that there shouldn't
  66. // be any conflicts with other types that may also be stored in the memory.
  67. // The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
  68. // be used to generate the hash if the type name seems common. Use a command
  69. // like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
  70. // If the structure layout changes, ALWAYS increment this number so that
  71. // newer versions of the code don't try to interpret persistent data written
  72. // by older versions with a different layout.
  73. //
  74. // kExpectedInstanceSize: This value is the hard-coded number that matches
  75. // what sizeof(T) would return. By providing it explicitly, the allocator can
  76. // verify that the structure is compatible between both 32-bit and 64-bit
  77. // versions of the code.
  78. //
  79. // Using New manages the memory and then calls the default constructor for the
  80. // object. Given that objects are persistent, no destructor is ever called
  81. // automatically though a caller can explicitly call Delete to destruct it and
  82. // change the type to something indicating it is no longer in use.
  83. //
  84. // Though persistent memory segments are transferrable between programs built
  85. // for different natural word widths, they CANNOT be exchanged between CPUs
  86. // of different endianess. Attempts to do so will simply see the existing data
  87. // as corrupt and refuse to access any of it.
  88. class BASE_EXPORT PersistentMemoryAllocator {
  89. public:
  90. typedef uint32_t Reference;
  91. // These states are used to indicate the overall condition of the memory
  92. // segment irrespective of what is stored within it. Because the data is
  93. // often persistent and thus needs to be readable by different versions of
  94. // a program, these values are fixed and can never change.
  95. enum MemoryState : uint8_t {
  96. // Persistent memory starts all zeros and so shows "uninitialized".
  97. MEMORY_UNINITIALIZED = 0,
  98. // The header has been written and the memory is ready for use.
  99. MEMORY_INITIALIZED = 1,
  100. // The data should be considered deleted. This would be set when the
  101. // allocator is being cleaned up. If file-backed, the file is likely
  102. // to be deleted but since deletion can fail for a variety of reasons,
  103. // having this extra status means a future reader can realize what
  104. // should have happened.
  105. MEMORY_DELETED = 2,
  106. // Outside code can create states starting with this number; these too
  107. // must also never change between code versions.
  108. MEMORY_USER_DEFINED = 100,
  109. };
  110. // Iterator for going through all iterable memory records in an allocator.
  111. // Like the allocator itself, iterators are lock-free and thread-secure.
  112. // That means that multiple threads can share an iterator and the same
  113. // reference will not be returned twice.
  114. //
  115. // The order of the items returned by an iterator matches the order in which
  116. // MakeIterable() was called on them. Once an allocation is made iterable,
  117. // it is always such so the only possible difference between successive
  118. // iterations is for more to be added to the end.
  119. //
  120. // Iteration, in general, is tolerant of corrupted memory. It will return
  121. // what it can and stop only when corruption forces it to. Bad corruption
  122. // could cause the same object to be returned many times but it will
  123. // eventually quit.
  124. class BASE_EXPORT Iterator {
  125. public:
  126. // Constructs an iterator on a given |allocator|, starting at the beginning.
  127. // The allocator must live beyond the lifetime of the iterator. This class
  128. // has read-only access to the allocator (hence "const") but the returned
  129. // references can be used on a read/write version, too.
  130. explicit Iterator(const PersistentMemoryAllocator* allocator);
  131. // As above but resuming from the |starting_after| reference. The first call
  132. // to GetNext() will return the next object found after that reference. The
  133. // reference must be to an "iterable" object; references to non-iterable
  134. // objects (those that never had MakeIterable() called for them) will cause
  135. // a run-time error.
  136. Iterator(const PersistentMemoryAllocator* allocator,
  137. Reference starting_after);
  138. // Resets the iterator back to the beginning.
  139. void Reset();
  140. // Resets the iterator, resuming from the |starting_after| reference.
  141. void Reset(Reference starting_after);
  142. // Returns the previously retrieved reference, or kReferenceNull if none.
  143. // If constructor or reset with a starting_after location, this will return
  144. // that value.
  145. Reference GetLast();
  146. // Gets the next iterable, storing that type in |type_return|. The actual
  147. // return value is a reference to the allocation inside the allocator or
  148. // zero if there are no more. GetNext() may still be called again at a
  149. // later time to retrieve any new allocations that have been added.
  150. Reference GetNext(uint32_t* type_return);
  151. // Similar to above but gets the next iterable of a specific |type_match|.
  152. // This should not be mixed with calls to GetNext() because any allocations
  153. // skipped here due to a type mis-match will never be returned by later
  154. // calls to GetNext() meaning it's possible to completely miss entries.
  155. Reference GetNextOfType(uint32_t type_match);
  156. // As above but works using object type.
  157. template <typename T>
  158. Reference GetNextOfType() {
  159. return GetNextOfType(T::kPersistentTypeId);
  160. }
  161. // As above but works using objects and returns null if not found.
  162. template <typename T>
  163. const T* GetNextOfObject() {
  164. return GetAsObject<T>(GetNextOfType<T>());
  165. }
  166. // Converts references to objects. This is a convenience method so that
  167. // users of the iterator don't need to also have their own pointer to the
  168. // allocator over which the iterator runs in order to retrieve objects.
  169. // Because the iterator is not read/write, only "const" objects can be
  170. // fetched. Non-const objects can be fetched using the reference on a
  171. // non-const (external) pointer to the same allocator (or use const_cast
  172. // to remove the qualifier).
  173. template <typename T>
  174. const T* GetAsObject(Reference ref) const {
  175. return allocator_->GetAsObject<T>(ref);
  176. }
  177. // Similar to GetAsObject() but converts references to arrays of things.
  178. template <typename T>
  179. const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
  180. return allocator_->GetAsArray<T>(ref, type_id, count);
  181. }
  182. // Convert a generic pointer back into a reference. A null reference will
  183. // be returned if |memory| is not inside the persistent segment or does not
  184. // point to an object of the specified |type_id|.
  185. Reference GetAsReference(const void* memory, uint32_t type_id) const {
  186. return allocator_->GetAsReference(memory, type_id);
  187. }
  188. // As above but convert an object back into a reference.
  189. template <typename T>
  190. Reference GetAsReference(const T* obj) const {
  191. return allocator_->GetAsReference(obj);
  192. }
  193. private:
  194. // Weak-pointer to memory allocator being iterated over.
  195. const PersistentMemoryAllocator* allocator_;
  196. // The last record that was returned.
  197. std::atomic<Reference> last_record_;
  198. // The number of records found; used for detecting loops.
  199. std::atomic<uint32_t> record_count_;
  200. DISALLOW_COPY_AND_ASSIGN(Iterator);
  201. };
  202. // Returned information about the internal state of the heap.
  203. struct MemoryInfo {
  204. size_t total;
  205. size_t free;
  206. };
  207. enum : Reference {
  208. // A common "null" reference value.
  209. kReferenceNull = 0,
  210. };
  211. enum : uint32_t {
  212. // A value that will match any type when doing lookups.
  213. kTypeIdAny = 0x00000000,
  214. // A value indicating that the type is in transition. Work is being done
  215. // on the contents to prepare it for a new type to come.
  216. kTypeIdTransitioning = 0xFFFFFFFF,
  217. };
  218. enum : size_t {
  219. kSizeAny = 1 // Constant indicating that any array size is acceptable.
  220. };
  221. // This is the standard file extension (suitable for being passed to the
  222. // AddExtension() method of base::FilePath) for dumps of persistent memory.
  223. static const base::FilePath::CharType kFileExtension[];
  224. // The allocator operates on any arbitrary block of memory. Creation and
  225. // persisting or sharing of that block with another process is the
  226. // responsibility of the caller. The allocator needs to know only the
  227. // block's |base| address, the total |size| of the block, and any internal
  228. // |page| size (zero if not paged) across which allocations should not span.
  229. // The |id| is an arbitrary value the caller can use to identify a
  230. // particular memory segment. It will only be loaded during the initial
  231. // creation of the segment and can be checked by the caller for consistency.
  232. // The |name|, if provided, is used to distinguish histograms for this
  233. // allocator. Only the primary owner of the segment should define this value;
  234. // other processes can learn it from the shared state. If the underlying
  235. // memory is |readonly| then no changes will be made to it. The resulting
  236. // object should be stored as a "const" pointer.
  237. //
  238. // PersistentMemoryAllocator does NOT take ownership of the memory block.
  239. // The caller must manage it and ensure it stays available throughout the
  240. // lifetime of this object.
  241. //
  242. // Memory segments for sharing must have had an allocator attached to them
  243. // before actually being shared. If the memory segment was just created, it
  244. // should be zeroed before being passed here. If it was an existing segment,
  245. // the values here will be compared to copies stored in the shared segment
  246. // as a guard against corruption.
  247. //
  248. // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
  249. // method below) before construction if the definition of the segment can
  250. // vary in any way at run-time. Invalid memory segments will cause a crash.
  251. PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
  252. uint64_t id, base::StringPiece name,
  253. bool readonly);
  254. virtual ~PersistentMemoryAllocator();
  255. // Check if memory segment is acceptable for creation of an Allocator. This
  256. // doesn't do any analysis of the data and so doesn't guarantee that the
  257. // contents are valid, just that the paramaters won't cause the program to
  258. // abort. The IsCorrupt() method will report detection of data problems
  259. // found during construction and general operation.
  260. static bool IsMemoryAcceptable(const void* data, size_t size,
  261. size_t page_size, bool readonly);
  262. // Get the internal identifier for this persistent memory segment.
  263. uint64_t Id() const;
  264. // Get the internal name of this allocator (possibly an empty string).
  265. const char* Name() const;
  266. // Is this segment open only for read?
  267. bool IsReadonly() const { return readonly_; }
  268. // Manage the saved state of the memory.
  269. void SetMemoryState(uint8_t memory_state);
  270. uint8_t GetMemoryState() const;
  271. // Create internal histograms for tracking memory use and allocation sizes
  272. // for allocator of |name| (which can simply be the result of Name()). This
  273. // is done seperately from construction for situations such as when the
  274. // histograms will be backed by memory provided by this very allocator.
  275. //
  276. // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
  277. // with the following histograms:
  278. // UMA.PersistentAllocator.name.Errors
  279. // UMA.PersistentAllocator.name.UsedPct
  280. void CreateTrackingHistograms(base::StringPiece name);
  281. // Flushes the persistent memory to any backing store. This typically does
  282. // nothing but is used by the FilePersistentMemoryAllocator to inform the
  283. // OS that all the data should be sent to the disk immediately. This is
  284. // useful in the rare case where something has just been stored that needs
  285. // to survive a hard shutdown of the machine like from a power failure.
  286. // The |sync| parameter indicates if this call should block until the flush
  287. // is complete but is only advisory and may or may not have an effect
  288. // depending on the capabilities of the OS. Synchronous flushes are allowed
  289. // only from theads that are allowed to do I/O but since |sync| is only
  290. // advisory, all flushes should be done on IO-capable threads.
  291. void Flush(bool sync);
  292. // Direct access to underlying memory segment. If the segment is shared
  293. // across threads or processes, reading data through these values does
  294. // not guarantee consistency. Use with care. Do not write.
  295. const void* data() const { return const_cast<const char*>(mem_base_); }
  296. size_t length() const { return mem_size_; }
  297. size_t size() const { return mem_size_; }
  298. size_t used() const;
  299. // Get an object referenced by a |ref|. For safety reasons, the |type_id|
  300. // code and size-of(|T|) are compared to ensure the reference is valid
  301. // and cannot return an object outside of the memory segment. A |type_id| of
  302. // kTypeIdAny (zero) will match any though the size is still checked. NULL is
  303. // returned if any problem is detected, such as corrupted storage or incorrect
  304. // parameters. Callers MUST check that the returned value is not-null EVERY
  305. // TIME before accessing it or risk crashing! Once dereferenced, the pointer
  306. // is safe to reuse forever.
  307. //
  308. // It is essential that the object be of a fixed size. All fields must be of
  309. // a defined type that does not change based on the compiler or the CPU
  310. // natural word size. Acceptable are char, float, double, and (u)intXX_t.
  311. // Unacceptable are int, bool, and wchar_t which are implementation defined
  312. // with regards to their size.
  313. //
  314. // Alignment must also be consistent. A uint64_t after a uint32_t will pad
  315. // differently between 32 and 64 bit architectures. Either put the bigger
  316. // elements first, group smaller elements into blocks the size of larger
  317. // elements, or manually insert padding fields as appropriate for the
  318. // largest architecture, including at the end.
  319. //
  320. // To protected against mistakes, all objects must have the attribute
  321. // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded
  322. // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
  323. // instance size is not fixed, at least one build will fail.
  324. //
  325. // If the size of a structure changes, the type-ID used to recognize it
  326. // should also change so later versions of the code don't try to read
  327. // incompatible structures from earlier versions.
  328. //
  329. // NOTE: Though this method will guarantee that an object of the specified
  330. // type can be accessed without going outside the bounds of the memory
  331. // segment, it makes no guarantees of the validity of the data within the
  332. // object itself. If it is expected that the contents of the segment could
  333. // be compromised with malicious intent, the object must be hardened as well.
  334. //
  335. // Though the persistent data may be "volatile" if it is shared with
  336. // other processes, such is not necessarily the case. The internal
  337. // "volatile" designation is discarded so as to not propagate the viral
  338. // nature of that keyword to the caller. It can add it back, if necessary,
  339. // based on knowledge of how the allocator is being used.
  340. template <typename T>
  341. T* GetAsObject(Reference ref) {
  342. static_assert(std::is_standard_layout<T>::value, "only standard objects");
  343. static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
  344. static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
  345. return const_cast<T*>(reinterpret_cast<volatile T*>(
  346. GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
  347. }
  348. template <typename T>
  349. const T* GetAsObject(Reference ref) const {
  350. static_assert(std::is_standard_layout<T>::value, "only standard objects");
  351. static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
  352. static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
  353. return const_cast<const T*>(reinterpret_cast<const volatile T*>(
  354. GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
  355. }
  356. // Like GetAsObject but get an array of simple, fixed-size types.
  357. //
  358. // Use a |count| of the required number of array elements, or kSizeAny.
  359. // GetAllocSize() can be used to calculate the upper bound but isn't reliable
  360. // because padding can make space for extra elements that were not written.
  361. //
  362. // Remember that an array of char is a string but may not be NUL terminated.
  363. //
  364. // There are no compile-time or run-time checks to ensure 32/64-bit size
  365. // compatibilty when using these accessors. Only use fixed-size types such
  366. // as char, float, double, or (u)intXX_t.
  367. template <typename T>
  368. T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
  369. static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
  370. return const_cast<T*>(reinterpret_cast<volatile T*>(
  371. GetBlockData(ref, type_id, count * sizeof(T))));
  372. }
  373. template <typename T>
  374. const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
  375. static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
  376. return const_cast<const char*>(reinterpret_cast<const volatile T*>(
  377. GetBlockData(ref, type_id, count * sizeof(T))));
  378. }
  379. // Get the corresponding reference for an object held in persistent memory.
  380. // If the |memory| is not valid or the type does not match, a kReferenceNull
  381. // result will be returned.
  382. Reference GetAsReference(const void* memory, uint32_t type_id) const;
  383. // Get the number of bytes allocated to a block. This is useful when storing
  384. // arrays in order to validate the ending boundary. The returned value will
  385. // include any padding added to achieve the required alignment and so could
  386. // be larger than given in the original Allocate() request.
  387. size_t GetAllocSize(Reference ref) const;
  388. // Access the internal "type" of an object. This generally isn't necessary
  389. // but can be used to "clear" the type and so effectively mark it as deleted
  390. // even though the memory stays valid and allocated. Changing the type is
  391. // an atomic compare/exchange and so requires knowing the existing value.
  392. // It will return false if the existing type is not what is expected.
  393. //
  394. // Changing the type doesn't mean the data is compatible with the new type.
  395. // Passing true for |clear| will zero the memory after the type has been
  396. // changed away from |from_type_id| but before it becomes |to_type_id| meaning
  397. // that it is done in a manner that is thread-safe. Memory is guaranteed to
  398. // be zeroed atomically by machine-word in a monotonically increasing order.
  399. //
  400. // It will likely be necessary to reconstruct the type before it can be used.
  401. // Changing the type WILL NOT invalidate existing pointers to the data, either
  402. // in this process or others, so changing the data structure could have
  403. // unpredicatable results. USE WITH CARE!
  404. uint32_t GetType(Reference ref) const;
  405. bool ChangeType(Reference ref,
  406. uint32_t to_type_id,
  407. uint32_t from_type_id,
  408. bool clear);
  409. // Allocated objects can be added to an internal list that can then be
  410. // iterated over by other processes. If an allocated object can be found
  411. // another way, such as by having its reference within a different object
  412. // that will be made iterable, then this call is not necessary. This always
  413. // succeeds unless corruption is detected; check IsCorrupted() to find out.
  414. // Once an object is made iterable, its position in iteration can never
  415. // change; new iterable objects will always be added after it in the series.
  416. // Changing the type does not alter its "iterable" status.
  417. void MakeIterable(Reference ref);
  418. // Get the information about the amount of free space in the allocator. The
  419. // amount of free space should be treated as approximate due to extras from
  420. // alignment and metadata. Concurrent allocations from other threads will
  421. // also make the true amount less than what is reported.
  422. void GetMemoryInfo(MemoryInfo* meminfo) const;
  423. // If there is some indication that the memory has become corrupted,
  424. // calling this will attempt to prevent further damage by indicating to
  425. // all processes that something is not as expected.
  426. void SetCorrupt() const;
  427. // This can be called to determine if corruption has been detected in the
  428. // segment, possibly my a malicious actor. Once detected, future allocations
  429. // will fail and iteration may not locate all objects.
  430. bool IsCorrupt() const;
  431. // Flag set if an allocation has failed because the memory segment was full.
  432. bool IsFull() const;
  433. // Update those "tracking" histograms which do not get updates during regular
  434. // operation, such as how much memory is currently used. This should be
  435. // called before such information is to be displayed or uploaded.
  436. void UpdateTrackingHistograms();
  437. // While the above works much like malloc & free, these next methods provide
  438. // an "object" interface similar to new and delete.
  439. // Reserve space in the memory segment of the desired |size| and |type_id|.
  440. // A return value of zero indicates the allocation failed, otherwise the
  441. // returned reference can be used by any process to get a real pointer via
  442. // the GetAsObject() or GetAsArray calls. The actual allocated size may be
  443. // larger and will always be a multiple of 8 bytes (64 bits).
  444. Reference Allocate(size_t size, uint32_t type_id);
  445. // Allocate and construct an object in persistent memory. The type must have
  446. // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
  447. // static constexpr fields that are used to ensure compatibility between
  448. // software versions. An optional size parameter can be specified to force
  449. // the allocation to be bigger than the size of the object; this is useful
  450. // when the last field is actually variable length.
  451. template <typename T>
  452. T* New(size_t size) {
  453. if (size < sizeof(T))
  454. size = sizeof(T);
  455. Reference ref = Allocate(size, T::kPersistentTypeId);
  456. void* mem =
  457. const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
  458. if (!mem)
  459. return nullptr;
  460. DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
  461. return new (mem) T();
  462. }
  463. template <typename T>
  464. T* New() {
  465. return New<T>(sizeof(T));
  466. }
  467. // Similar to New, above, but construct the object out of an existing memory
  468. // block and of an expected type. If |clear| is true, memory will be zeroed
  469. // before construction. Though this is not standard object behavior, it
  470. // is present to match with new allocations that always come from zeroed
  471. // memory. Anything previously present simply ceases to exist; no destructor
  472. // is called for it so explicitly Delete() the old object first if need be.
  473. // Calling this will not invalidate existing pointers to the object, either
  474. // in this process or others, so changing the object could have unpredictable
  475. // results. USE WITH CARE!
  476. template <typename T>
  477. T* New(Reference ref, uint32_t from_type_id, bool clear) {
  478. DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
  479. // Make sure the memory is appropriate. This won't be used until after
  480. // the type is changed but checking first avoids the possibility of having
  481. // to change the type back.
  482. void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
  483. if (!mem)
  484. return nullptr;
  485. // Ensure the allocator's internal alignment is sufficient for this object.
  486. // This protects against coding errors in the allocator.
  487. DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
  488. // Change the type, clearing the memory if so desired. The new type is
  489. // "transitioning" so that there is no race condition with the construction
  490. // of the object should another thread be simultaneously iterating over
  491. // data. This will "acquire" the memory so no changes get reordered before
  492. // it.
  493. if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
  494. return nullptr;
  495. // Construct an object of the desired type on this memory, just as if
  496. // New() had been called to create it.
  497. T* obj = new (mem) T();
  498. // Finally change the type to the desired one. This will "release" all of
  499. // the changes above and so provide a consistent view to other threads.
  500. bool success =
  501. ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
  502. DCHECK(success);
  503. return obj;
  504. }
  505. // Deletes an object by destructing it and then changing the type to a
  506. // different value (default 0).
  507. template <typename T>
  508. void Delete(T* obj, uint32_t new_type) {
  509. // Get the reference for the object.
  510. Reference ref = GetAsReference<T>(obj);
  511. // First change the type to "transitioning" so there is no race condition
  512. // where another thread could find the object through iteration while it
  513. // is been destructed. This will "acquire" the memory so no changes get
  514. // reordered before it. It will fail if |ref| is invalid.
  515. if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
  516. return;
  517. // Destruct the object.
  518. obj->~T();
  519. // Finally change the type to the desired value. This will "release" all
  520. // the changes above.
  521. bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
  522. DCHECK(success);
  523. }
  524. template <typename T>
  525. void Delete(T* obj) {
  526. Delete<T>(obj, 0);
  527. }
  528. // As above but works with objects allocated from persistent memory.
  529. template <typename T>
  530. Reference GetAsReference(const T* obj) const {
  531. return GetAsReference(obj, T::kPersistentTypeId);
  532. }
  533. // As above but works with an object allocated from persistent memory.
  534. template <typename T>
  535. void MakeIterable(const T* obj) {
  536. MakeIterable(GetAsReference<T>(obj));
  537. }
  538. protected:
  539. enum MemoryType {
  540. MEM_EXTERNAL,
  541. MEM_MALLOC,
  542. MEM_VIRTUAL,
  543. MEM_SHARED,
  544. MEM_FILE,
  545. };
  546. struct Memory {
  547. Memory(void* b, MemoryType t) : base(b), type(t) {}
  548. void* base;
  549. MemoryType type;
  550. };
  551. // Constructs the allocator. Everything is the same as the public allocator
  552. // except |memory| which is a structure with additional information besides
  553. // the base address.
  554. PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
  555. uint64_t id, base::StringPiece name,
  556. bool readonly);
  557. // Implementation of Flush that accepts how much to flush.
  558. virtual void FlushPartial(size_t length, bool sync);
  559. volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
  560. const MemoryType mem_type_; // Type of memory allocation.
  561. const uint32_t mem_size_; // Size of entire memory segment.
  562. const uint32_t mem_page_; // Page size allocations shouldn't cross.
  563. const size_t vm_page_size_; // The page size used by the OS.
  564. private:
  565. struct SharedMetadata;
  566. struct BlockHeader;
  567. static const uint32_t kAllocAlignment;
  568. static const Reference kReferenceQueue;
  569. // The shared metadata is always located at the top of the memory segment.
  570. // These convenience functions eliminate constant casting of the base
  571. // pointer within the code.
  572. const SharedMetadata* shared_meta() const {
  573. return reinterpret_cast<const SharedMetadata*>(
  574. const_cast<const char*>(mem_base_));
  575. }
  576. SharedMetadata* shared_meta() {
  577. return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
  578. }
  579. // Actual method for doing the allocation.
  580. Reference AllocateImpl(size_t size, uint32_t type_id);
  581. // Get the block header associated with a specific reference.
  582. const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
  583. uint32_t size, bool queue_ok,
  584. bool free_ok) const;
  585. volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
  586. bool queue_ok, bool free_ok) {
  587. return const_cast<volatile BlockHeader*>(
  588. const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
  589. ref, type_id, size, queue_ok, free_ok));
  590. }
  591. // Get the actual data within a block associated with a specific reference.
  592. const volatile void* GetBlockData(Reference ref, uint32_t type_id,
  593. uint32_t size) const;
  594. volatile void* GetBlockData(Reference ref, uint32_t type_id,
  595. uint32_t size) {
  596. return const_cast<volatile void*>(
  597. const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
  598. ref, type_id, size));
  599. }
  600. // Record an error in the internal histogram.
  601. void RecordError(int error) const;
  602. const bool readonly_; // Indicates access to read-only memory.
  603. mutable std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
  604. HistogramBase* allocs_histogram_; // Histogram recording allocs.
  605. HistogramBase* used_histogram_; // Histogram recording used space.
  606. HistogramBase* errors_histogram_; // Histogram recording errors.
  607. friend class PersistentMemoryAllocatorTest;
  608. FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
  609. DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
  610. };
  611. // This allocator uses a local memory block it allocates from the general
  612. // heap. It is generally used when some kind of "death rattle" handler will
  613. // save the contents to persistent storage during process shutdown. It is
  614. // also useful for testing.
  615. class BASE_EXPORT LocalPersistentMemoryAllocator
  616. : public PersistentMemoryAllocator {
  617. public:
  618. LocalPersistentMemoryAllocator(size_t size, uint64_t id,
  619. base::StringPiece name);
  620. ~LocalPersistentMemoryAllocator() override;
  621. private:
  622. // Allocates a block of local memory of the specified |size|, ensuring that
  623. // the memory will not be physically allocated until accessed and will read
  624. // as zero when that happens.
  625. static Memory AllocateLocalMemory(size_t size);
  626. // Deallocates a block of local |memory| of the specified |size|.
  627. static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
  628. DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
  629. };
  630. // This allocator takes a writable shared memory mapping object and performs
  631. // allocation from it. The allocator takes ownership of the mapping object.
  632. class BASE_EXPORT WritableSharedPersistentMemoryAllocator
  633. : public PersistentMemoryAllocator {
  634. public:
  635. WritableSharedPersistentMemoryAllocator(
  636. base::WritableSharedMemoryMapping memory,
  637. uint64_t id,
  638. base::StringPiece name);
  639. ~WritableSharedPersistentMemoryAllocator() override;
  640. // Ensure that the memory isn't so invalid that it would crash when passing it
  641. // to the allocator. This doesn't guarantee the data is valid, just that it
  642. // won't cause the program to abort. The existing IsCorrupt() call will handle
  643. // the rest.
  644. static bool IsSharedMemoryAcceptable(
  645. const base::WritableSharedMemoryMapping& memory);
  646. private:
  647. base::WritableSharedMemoryMapping shared_memory_;
  648. DISALLOW_COPY_AND_ASSIGN(WritableSharedPersistentMemoryAllocator);
  649. };
  650. // This allocator takes a read-only shared memory mapping object and performs
  651. // allocation from it. The allocator takes ownership of the mapping object.
  652. class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator
  653. : public PersistentMemoryAllocator {
  654. public:
  655. ReadOnlySharedPersistentMemoryAllocator(
  656. base::ReadOnlySharedMemoryMapping memory,
  657. uint64_t id,
  658. base::StringPiece name);
  659. ~ReadOnlySharedPersistentMemoryAllocator() override;
  660. // Ensure that the memory isn't so invalid that it would crash when passing it
  661. // to the allocator. This doesn't guarantee the data is valid, just that it
  662. // won't cause the program to abort. The existing IsCorrupt() call will handle
  663. // the rest.
  664. static bool IsSharedMemoryAcceptable(
  665. const base::ReadOnlySharedMemoryMapping& memory);
  666. private:
  667. base::ReadOnlySharedMemoryMapping shared_memory_;
  668. DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedPersistentMemoryAllocator);
  669. };
  670. #if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
  671. // This allocator takes a memory-mapped file object and performs allocation
  672. // from it. The allocator takes ownership of the file object.
  673. class BASE_EXPORT FilePersistentMemoryAllocator
  674. : public PersistentMemoryAllocator {
  675. public:
  676. // A |max_size| of zero will use the length of the file as the maximum
  677. // size. The |file| object must have been already created with sufficient
  678. // permissions (read, read/write, or read/write/extend).
  679. FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
  680. size_t max_size,
  681. uint64_t id,
  682. base::StringPiece name,
  683. bool read_only);
  684. ~FilePersistentMemoryAllocator() override;
  685. // Ensure that the file isn't so invalid that it would crash when passing it
  686. // to the allocator. This doesn't guarantee the file is valid, just that it
  687. // won't cause the program to abort. The existing IsCorrupt() call will handle
  688. // the rest.
  689. static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
  690. // Load all or a portion of the file into memory for fast access. This can
  691. // be used to force the disk access to be done on a background thread and
  692. // then have the data available to be read on the main thread with a greatly
  693. // reduced risk of blocking due to I/O. The risk isn't eliminated completely
  694. // because the system could always release the memory when under pressure
  695. // but this can happen to any block of memory (i.e. swapped out).
  696. void Cache();
  697. protected:
  698. // PersistentMemoryAllocator:
  699. void FlushPartial(size_t length, bool sync) override;
  700. private:
  701. std::unique_ptr<MemoryMappedFile> mapped_file_;
  702. DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
  703. };
  704. #endif // !defined(OS_NACL)
  705. // An allocation that is defined but not executed until required at a later
  706. // time. This allows for potential users of an allocation to be decoupled
  707. // from the logic that defines it. In addition, there can be multiple users
  708. // of the same allocation or any region thereof that are guaranteed to always
  709. // use the same space. It's okay to copy/move these objects.
  710. //
  711. // This is a top-level class instead of an inner class of the PMA so that it
  712. // can be forward-declared in other header files without the need to include
  713. // the full contents of this file.
  714. class BASE_EXPORT DelayedPersistentAllocation {
  715. public:
  716. using Reference = PersistentMemoryAllocator::Reference;
  717. // Creates a delayed allocation using the specified |allocator|. When
  718. // needed, the memory will be allocated using the specified |type| and
  719. // |size|. If |offset| is given, the returned pointer will be at that
  720. // offset into the segment; this allows combining allocations into a
  721. // single persistent segment to reduce overhead and means an "all or
  722. // nothing" request. Note that |size| is always the total memory size
  723. // and |offset| is just indicating the start of a block within it. If
  724. // |make_iterable| was true, the allocation will made iterable when it
  725. // is created; already existing allocations are not changed.
  726. //
  727. // Once allocated, a reference to the segment will be stored at |ref|.
  728. // This shared location must be initialized to zero (0); it is checked
  729. // with every Get() request to see if the allocation has already been
  730. // done. If reading |ref| outside of this object, be sure to do an
  731. // "acquire" load. Don't write to it -- leave that to this object.
  732. //
  733. // For convenience, methods taking both Atomic32 and std::atomic<Reference>
  734. // are defined.
  735. DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
  736. subtle::Atomic32* ref,
  737. uint32_t type,
  738. size_t size,
  739. bool make_iterable);
  740. DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
  741. subtle::Atomic32* ref,
  742. uint32_t type,
  743. size_t size,
  744. size_t offset,
  745. bool make_iterable);
  746. DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
  747. std::atomic<Reference>* ref,
  748. uint32_t type,
  749. size_t size,
  750. bool make_iterable);
  751. DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
  752. std::atomic<Reference>* ref,
  753. uint32_t type,
  754. size_t size,
  755. size_t offset,
  756. bool make_iterable);
  757. ~DelayedPersistentAllocation();
  758. // Gets a pointer to the defined allocation. This will realize the request
  759. // and update the reference provided during construction. The memory will
  760. // be zeroed the first time it is returned, after that it is shared with
  761. // all other Get() requests and so shows any changes made to it elsewhere.
  762. //
  763. // If the allocation fails for any reason, null will be returned. This works
  764. // even on "const" objects because the allocation is already defined, just
  765. // delayed.
  766. void* Get() const;
  767. // Gets the internal reference value. If this returns a non-zero value then
  768. // a subsequent call to Get() will do nothing but convert that reference into
  769. // a memory location -- useful for accessing an existing allocation without
  770. // creating one unnecessarily.
  771. Reference reference() const {
  772. return reference_->load(std::memory_order_relaxed);
  773. }
  774. private:
  775. // The underlying object that does the actual allocation of memory. Its
  776. // lifetime must exceed that of all DelayedPersistentAllocation objects
  777. // that use it.
  778. PersistentMemoryAllocator* const allocator_;
  779. // The desired type and size of the allocated segment plus the offset
  780. // within it for the defined request.
  781. const uint32_t type_;
  782. const uint32_t size_;
  783. const uint32_t offset_;
  784. // Flag indicating if allocation should be made iterable when done.
  785. const bool make_iterable_;
  786. // The location at which a reference to the allocated segment is to be
  787. // stored once the allocation is complete. If multiple delayed allocations
  788. // share the same pointer then an allocation on one will amount to an
  789. // allocation for all.
  790. volatile std::atomic<Reference>* const reference_;
  791. // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
  792. };
  793. } // namespace base
  794. #endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_