PageRenderTime 52ms CodeModel.GetById 28ms RepoModel.GetById 1ms app.codeStats 0ms

/tensorflow/core/common_runtime/bfc_allocator.h

https://gitlab.com/github-cloud-corporation/tensorflow
C Header | 415 lines | 242 code | 81 blank | 92 comment | 13 complexity | 280c580a161c96defa937be2a6e071e4 MD5 | raw file
  1. /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_COMMON_RUNTIME_BFC_ALLOCATOR_H_
  13. #define TENSORFLOW_COMMON_RUNTIME_BFC_ALLOCATOR_H_
  14. #include <memory>
  15. #include <string>
  16. #include <unordered_map>
  17. #include <vector>
  18. #include "tensorflow/core/common_runtime/allocator_retry.h"
  19. #include "tensorflow/core/common_runtime/visitable_allocator.h"
  20. #include "tensorflow/core/lib/gtl/stl_util.h"
  21. #include "tensorflow/core/lib/strings/strcat.h"
  22. #include "tensorflow/core/platform/macros.h"
  23. #include "tensorflow/core/platform/mutex.h"
  24. #include "tensorflow/core/platform/thread_annotations.h"
  25. #include "tensorflow/core/platform/types.h"
  26. #include "tensorflow/core/protobuf/config.pb.h"
  27. namespace tensorflow {
  28. // A memory allocator that implements a 'best-fit with coalescing'
  29. // algorithm. This is essentially a very simple version of Doug Lea's
  30. // malloc (dlmalloc).
  31. //
  32. // The goal of this allocator is to support defragmentation via
  33. // coalescing. One assumption we make is that the process using this
  34. // allocator owns pretty much all of the memory, and that nearly
  35. // all requests to allocate memory go through this interface.
  36. class BFCAllocator : public VisitableAllocator {
  37. public:
  38. // Takes ownership of sub_allocator.
  39. BFCAllocator(SubAllocator* sub_allocator, size_t total_memory,
  40. bool allow_growth, const string& name);
  41. ~BFCAllocator() override;
  42. string Name() override { return name_; }
  43. void* AllocateRaw(size_t alignment, size_t num_bytes) override;
  44. void* AllocateRaw(size_t alignment, size_t num_bytes,
  45. const AllocationAttributes& allocation_attr) override;
  46. void DeallocateRaw(void* ptr) override;
  47. void AddAllocVisitor(Visitor visitor) override;
  48. // Does nothing, because memory is never freed.
  49. void AddFreeVisitor(Visitor visitor) override {}
  50. bool TracksAllocationSizes() override;
  51. size_t RequestedSize(void* ptr) override;
  52. size_t AllocatedSize(void* ptr) override;
  53. int64 AllocationId(void* ptr) override;
  54. void GetStats(AllocatorStats* stats) override;
  55. private:
  56. struct Bin;
  57. void* AllocateRawInternal(size_t alignment, size_t num_bytes,
  58. bool dump_log_on_failure);
  59. void DeallocateRawInternal(void* ptr);
  60. // A ChunkHandle is an index into the chunks_ vector in BFCAllocator
  61. // kInvalidChunkHandle means an invalid chunk
  62. typedef int ChunkHandle;
  63. static const int kInvalidChunkHandle = -1;
  64. typedef int BinNum;
  65. static const int kInvalidBinNum = -1;
  66. static const int kNumBins = 21;
  67. // Chunks point to memory. Their prev/next pointers form a
  68. // doubly-linked list of addresses sorted by base address that
  69. // must be contiguous. Chunks contain information about whether
  70. // they are in use or whether they are free, and contain a pointer
  71. // to the bin they are in.
  72. struct Chunk {
  73. size_t size = 0; // Full size of buffer.
  74. // We sometimes give chunks that are larger than needed to reduce
  75. // fragmentation. requested_size keeps track of what the client
  76. // actually wanted so we can understand whether our splitting
  77. // strategy is efficient.
  78. size_t requested_size = 0;
  79. // allocation_id is set to -1 when the chunk is not in use. It is assigned a
  80. // value greater than zero before the chunk is returned from
  81. // AllocateRaw, and this value is unique among values assigned by
  82. // the parent allocator.
  83. int64 allocation_id = -1;
  84. void* ptr = nullptr; // pointer to granted subbuffer.
  85. // If not kInvalidChunkHandle, the memory referred to by 'prev' is directly
  86. // preceding the memory used by this chunk. E.g., It should start
  87. // at 'ptr - prev->size'
  88. ChunkHandle prev = kInvalidChunkHandle;
  89. // If not kInvalidChunkHandle, the memory referred to by 'next' is directly
  90. // following the memory used by this chunk. E.g., It should be at
  91. // 'ptr + size'
  92. ChunkHandle next = kInvalidChunkHandle;
  93. // What bin are we in?
  94. BinNum bin_num = kInvalidBinNum;
  95. bool in_use() const { return allocation_id != -1; }
  96. string DebugString(BFCAllocator* a,
  97. bool recurse) NO_THREAD_SAFETY_ANALYSIS {
  98. string dbg;
  99. strings::StrAppend(&dbg, " Size: ", strings::HumanReadableNumBytes(size),
  100. " | Requested Size: ",
  101. strings::HumanReadableNumBytes(requested_size),
  102. " | in_use: ", in_use());
  103. if (recurse && prev != BFCAllocator::kInvalidChunkHandle) {
  104. Chunk* p = a->ChunkFromHandle(prev);
  105. strings::StrAppend(&dbg, ", prev: ", p->DebugString(a, false));
  106. }
  107. if (recurse && next != BFCAllocator::kInvalidChunkHandle) {
  108. Chunk* n = a->ChunkFromHandle(next);
  109. strings::StrAppend(&dbg, ", next: ", n->DebugString(a, false));
  110. }
  111. return dbg;
  112. }
  113. };
  114. // A Bin is a collection of similar-sized free chunks.
  115. struct Bin {
  116. // All chunks in this bin have >= bin_size memory.
  117. size_t bin_size = 0;
  118. struct ChunkComparator {
  119. explicit ChunkComparator(BFCAllocator* allocator)
  120. : allocator_(allocator) {}
  121. // Sort first by size and then use pointer address as a tie breaker.
  122. bool operator()(const ChunkHandle ha,
  123. const ChunkHandle hb) const NO_THREAD_SAFETY_ANALYSIS {
  124. const Chunk* a = allocator_->ChunkFromHandle(ha);
  125. const Chunk* b = allocator_->ChunkFromHandle(hb);
  126. if (a->size != b->size) {
  127. return a->size < b->size;
  128. }
  129. return a->ptr < b->ptr;
  130. }
  131. private:
  132. BFCAllocator* allocator_; // The parent allocator
  133. };
  134. typedef std::set<ChunkHandle, ChunkComparator> FreeChunkSet;
  135. // List of free chunks within the bin, sorted by chunk size.
  136. // Chunk * not owned.
  137. FreeChunkSet free_chunks;
  138. Bin(BFCAllocator* allocator, size_t bs)
  139. : bin_size(bs), free_chunks(ChunkComparator(allocator)) {}
  140. };
  141. static const size_t kMinAllocationBits = 8;
  142. static const size_t kMinAllocationSize = 1 << kMinAllocationBits;
  143. // AllocationRegion maps pointers to ChunkHandles for a single
  144. // contiguous memory region.
  145. //
  146. // This class is thread-compatible.
  147. class AllocationRegion {
  148. public:
  149. AllocationRegion(void* ptr, size_t memory_size)
  150. : ptr_(ptr),
  151. memory_size_(memory_size),
  152. end_ptr_(
  153. static_cast<void*>(static_cast<char*>(ptr_) + memory_size_)) {
  154. DCHECK_EQ(0, memory_size % kMinAllocationSize);
  155. const size_t n_handles =
  156. (memory_size + kMinAllocationSize - 1) / kMinAllocationSize;
  157. handles_ = new ChunkHandle[n_handles];
  158. for (size_t i = 0; i < n_handles; i++) {
  159. handles_[i] = kInvalidChunkHandle;
  160. }
  161. }
  162. AllocationRegion() {}
  163. ~AllocationRegion() { delete[] handles_; }
  164. AllocationRegion(AllocationRegion&& other) { Swap(other); }
  165. AllocationRegion& operator=(AllocationRegion&& other) {
  166. Swap(other);
  167. return *this;
  168. }
  169. void* ptr() const { return ptr_; }
  170. void* end_ptr() const { return end_ptr_; }
  171. size_t memory_size() const { return memory_size_; }
  172. ChunkHandle get_handle(const void* p) const {
  173. return handles_[IndexFor(p)];
  174. }
  175. void set_handle(const void* p, ChunkHandle h) { handles_[IndexFor(p)] = h; }
  176. void erase(const void* p) { set_handle(p, kInvalidChunkHandle); }
  177. private:
  178. void Swap(AllocationRegion& other) {
  179. std::swap(ptr_, other.ptr_);
  180. std::swap(memory_size_, other.memory_size_);
  181. std::swap(end_ptr_, other.end_ptr_);
  182. std::swap(handles_, other.handles_);
  183. }
  184. int IndexFor(const void* p) const {
  185. std::uintptr_t p_int = reinterpret_cast<std::uintptr_t>(p);
  186. std::uintptr_t base_int = reinterpret_cast<std::uintptr_t>(ptr_);
  187. DCHECK_GE(p_int, base_int);
  188. DCHECK_LT(p_int, base_int + memory_size_);
  189. return static_cast<int>(((p_int - base_int) >> kMinAllocationBits));
  190. }
  191. // Metadata about the allocation region.
  192. void* ptr_ = nullptr;
  193. size_t memory_size_ = 0;
  194. void* end_ptr_ = nullptr;
  195. // Array of size "memory_size / kMinAllocationSize". It is
  196. // indexed by (p-base) / kMinAllocationSize, contains ChunkHandle
  197. // for the memory allocation represented by "p"
  198. ChunkHandle* handles_ = nullptr;
  199. TF_DISALLOW_COPY_AND_ASSIGN(AllocationRegion);
  200. };
  201. // RegionManager aggregates one or more "AllocationRegions" and provides
  202. // a layer of indirection from pointers to the underlying ChunkHandle,
  203. // allowing allocation across multiple discontiguous memory regions.
  204. //
  205. // This class is thread-compatible.
  206. class RegionManager {
  207. public:
  208. RegionManager() {}
  209. ~RegionManager() {}
  210. void AddAllocationRegion(void* ptr, size_t memory_size) {
  211. // Insert sorted by end_ptr
  212. auto entry =
  213. std::upper_bound(regions_.begin(), regions_.end(), ptr, &Comparator);
  214. regions_.insert(entry, AllocationRegion(ptr, memory_size));
  215. }
  216. ChunkHandle get_handle(const void* p) const {
  217. return RegionFor(p)->get_handle(p);
  218. }
  219. void set_handle(const void* p, ChunkHandle h) {
  220. return MutableRegionFor(p)->set_handle(p, h);
  221. }
  222. void erase(const void* p) { return MutableRegionFor(p)->erase(p); }
  223. const std::vector<AllocationRegion>& regions() const { return regions_; }
  224. private:
  225. static bool Comparator(const void* ptr, const AllocationRegion& other) {
  226. return ptr < other.end_ptr();
  227. }
  228. AllocationRegion* MutableRegionFor(const void* p) {
  229. return const_cast<AllocationRegion*>(RegionFor(p));
  230. }
  231. const AllocationRegion* RegionFor(const void* p) const {
  232. auto entry =
  233. std::upper_bound(regions_.begin(), regions_.end(), p, &Comparator);
  234. if (entry != regions_.end()) {
  235. return &(*entry);
  236. }
  237. LOG(FATAL) << "Could not find Region for " << p;
  238. return nullptr;
  239. }
  240. private:
  241. std::vector<AllocationRegion> regions_;
  242. };
  243. // Returns 'bytes' rounded up to the next highest kMinAllocationSize.
  244. size_t RoundedBytes(size_t bytes);
  245. // Try to add a new memory region that can satisfy an allocation of
  246. // 'rounded_bytes' bytes. Returns true on success and false on
  247. // failure.
  248. bool Extend(size_t rounded_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  249. // Returns a pointer to an underlying allocated chunk of size
  250. // 'rounded_bytes'.
  251. void* FindChunkPtr(BinNum bin_num, size_t rounded_bytes, size_t num_bytes)
  252. EXCLUSIVE_LOCKS_REQUIRED(lock_);
  253. // Splits the chunk specified by 'h' into two chunks, one at least
  254. // of size 'num_bytes'.
  255. void SplitChunk(ChunkHandle h, size_t num_bytes)
  256. EXCLUSIVE_LOCKS_REQUIRED(lock_);
  257. // Merges the two chunk handles. Requires that the chunks are
  258. // contiguous in their allocation.
  259. void Merge(ChunkHandle h, ChunkHandle h2) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  260. // Frees the memory represented by 'h', coalescing the chunk if
  261. // possible.
  262. void FreeAndMaybeCoalesce(ChunkHandle h) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  263. // Adds the chunk 'h' to the proper free bin.
  264. void InsertFreeChunkIntoBin(ChunkHandle h) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  265. // Removes the free chunk pointed to by 'c' from the set free_chunks.
  266. void RemoveFreeChunkIterFromBin(Bin::FreeChunkSet* free_chunks,
  267. const Bin::FreeChunkSet::iterator& c)
  268. EXCLUSIVE_LOCKS_REQUIRED(lock_);
  269. // Removes a free chunk from the bin.
  270. void RemoveFreeChunkFromBin(ChunkHandle h) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  271. // Removes the chunk metadata represented by 'h'.
  272. void DeleteChunk(ChunkHandle h) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  273. string RenderOccupancy() EXCLUSIVE_LOCKS_REQUIRED(lock_);
  274. void DumpMemoryLog(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  275. ChunkHandle AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(lock_);
  276. void DeallocateChunk(ChunkHandle h) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  277. Chunk* ChunkFromHandle(ChunkHandle h) EXCLUSIVE_LOCKS_REQUIRED(lock_);
  278. AllocatorRetry retry_helper_;
  279. // Structures immutable after construction
  280. size_t memory_limit_ = 0;
  281. inline int Log2FloorNonZero(uint64 n) {
  282. #if defined(__GNUC__)
  283. return 63 ^ __builtin_clzll(n);
  284. #else
  285. int r = 0;
  286. while (n > 0) {
  287. r++;
  288. n >>= 1;
  289. }
  290. return r;
  291. #endif
  292. }
  293. // Map from bin size to Bin
  294. Bin* BinFromIndex(BinNum index) {
  295. return reinterpret_cast<Bin*>(&(bins_space_[index * sizeof(Bin)]));
  296. }
  297. size_t BinNumToSize(BinNum index) {
  298. return static_cast<size_t>(256) << index;
  299. }
  300. BinNum BinNumForSize(size_t bytes) {
  301. uint64 v = std::max<size_t>(bytes, 256) >> kMinAllocationBits;
  302. int b = std::min(kNumBins - 1, Log2FloorNonZero(v));
  303. return b;
  304. }
  305. Bin* BinForSize(size_t bytes) { return BinFromIndex(BinNumForSize(bytes)); }
  306. char bins_space_[sizeof(Bin) * kNumBins];
  307. // The size of the current region allocation.
  308. size_t curr_region_allocation_bytes_;
  309. // The total number of allocated bytes by the allocator.
  310. size_t total_region_allocated_bytes_ = 0;
  311. // An indicator that expansion of a region has hit the limits
  312. // of the available memory.
  313. bool started_backpedal_ = false;
  314. std::unique_ptr<SubAllocator> suballocator_;
  315. string name_;
  316. // Structures mutable after construction
  317. mutable mutex lock_;
  318. RegionManager region_manager_ GUARDED_BY(lock_);
  319. std::vector<Chunk> chunks_;
  320. ChunkHandle free_chunks_list_; // Ptr to head of linked list of free Chunks
  321. // Called once on each region, ASAP.
  322. std::vector<Visitor> region_visitors_;
  323. // Counter containing the next unique identifier to assign to a
  324. // newly-created chunk.
  325. int64 next_allocation_id_ GUARDED_BY(lock_);
  326. // Stats.
  327. AllocatorStats stats_ GUARDED_BY(lock_);
  328. TF_DISALLOW_COPY_AND_ASSIGN(BFCAllocator);
  329. };
  330. } // namespace tensorflow
  331. #endif // TENSORFLOW_COMMON_RUNTIME_BFC_ALLOCATOR_H_