PageRenderTime 60ms CodeModel.GetById 30ms RepoModel.GetById 1ms app.codeStats 0ms

/libs/binder/MemoryDealer.cpp

https://gitlab.com/Atomic-ROM/frameworks_native
C++ | 468 lines | 361 code | 71 blank | 36 comment | 57 complexity | ce96b996203c57c5ab0b9b79188e88a0 MD5 | raw file
  1. /*
  2. * Copyright (C) 2007 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define LOG_TAG "MemoryDealer"
  17. #include <binder/MemoryDealer.h>
  18. #include <binder/IPCThreadState.h>
  19. #include <binder/MemoryBase.h>
  20. #include <utils/Log.h>
  21. #include <utils/SortedVector.h>
  22. #include <utils/String8.h>
  23. #include <utils/threads.h>
  24. #include <stdint.h>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <fcntl.h>
  28. #include <unistd.h>
  29. #include <errno.h>
  30. #include <string.h>
  31. #include <sys/stat.h>
  32. #include <sys/types.h>
  33. #include <sys/mman.h>
  34. #include <sys/file.h>
  35. namespace android {
  36. // ----------------------------------------------------------------------------
  37. /*
  38. * A simple templatized doubly linked-list implementation
  39. */
  40. template <typename NODE>
  41. class LinkedList
  42. {
  43. NODE* mFirst;
  44. NODE* mLast;
  45. public:
  46. LinkedList() : mFirst(0), mLast(0) { }
  47. bool isEmpty() const { return mFirst == 0; }
  48. NODE const* head() const { return mFirst; }
  49. NODE* head() { return mFirst; }
  50. NODE const* tail() const { return mLast; }
  51. NODE* tail() { return mLast; }
  52. void insertAfter(NODE* node, NODE* newNode) {
  53. newNode->prev = node;
  54. newNode->next = node->next;
  55. if (node->next == 0) mLast = newNode;
  56. else node->next->prev = newNode;
  57. node->next = newNode;
  58. }
  59. void insertBefore(NODE* node, NODE* newNode) {
  60. newNode->prev = node->prev;
  61. newNode->next = node;
  62. if (node->prev == 0) mFirst = newNode;
  63. else node->prev->next = newNode;
  64. node->prev = newNode;
  65. }
  66. void insertHead(NODE* newNode) {
  67. if (mFirst == 0) {
  68. mFirst = mLast = newNode;
  69. newNode->prev = newNode->next = 0;
  70. } else {
  71. newNode->prev = 0;
  72. newNode->next = mFirst;
  73. mFirst->prev = newNode;
  74. mFirst = newNode;
  75. }
  76. }
  77. void insertTail(NODE* newNode) {
  78. if (mLast == 0) {
  79. insertHead(newNode);
  80. } else {
  81. newNode->prev = mLast;
  82. newNode->next = 0;
  83. mLast->next = newNode;
  84. mLast = newNode;
  85. }
  86. }
  87. NODE* remove(NODE* node) {
  88. if (node->prev == 0) mFirst = node->next;
  89. else node->prev->next = node->next;
  90. if (node->next == 0) mLast = node->prev;
  91. else node->next->prev = node->prev;
  92. return node;
  93. }
  94. };
  95. // ----------------------------------------------------------------------------
  96. class Allocation : public MemoryBase {
  97. public:
  98. Allocation(const sp<MemoryDealer>& dealer,
  99. const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
  100. virtual ~Allocation();
  101. private:
  102. sp<MemoryDealer> mDealer;
  103. };
  104. // ----------------------------------------------------------------------------
  105. class SimpleBestFitAllocator
  106. {
  107. enum {
  108. PAGE_ALIGNED = 0x00000001
  109. };
  110. public:
  111. SimpleBestFitAllocator(size_t size);
  112. ~SimpleBestFitAllocator();
  113. size_t allocate(size_t size, uint32_t flags = 0);
  114. status_t deallocate(size_t offset);
  115. size_t size() const;
  116. void dump(const char* what) const;
  117. void dump(String8& res, const char* what) const;
  118. private:
  119. struct chunk_t {
  120. chunk_t(size_t start, size_t size)
  121. : start(start), size(size), free(1), prev(0), next(0) {
  122. }
  123. size_t start;
  124. size_t size : 28;
  125. int free : 4;
  126. mutable chunk_t* prev;
  127. mutable chunk_t* next;
  128. };
  129. ssize_t alloc(size_t size, uint32_t flags);
  130. chunk_t* dealloc(size_t start);
  131. void dump_l(const char* what) const;
  132. void dump_l(String8& res, const char* what) const;
  133. static const int kMemoryAlign;
  134. mutable Mutex mLock;
  135. LinkedList<chunk_t> mList;
  136. size_t mHeapSize;
  137. };
  138. // ----------------------------------------------------------------------------
  139. Allocation::Allocation(
  140. const sp<MemoryDealer>& dealer,
  141. const sp<IMemoryHeap>& heap, ssize_t offset, size_t size)
  142. : MemoryBase(heap, offset, size), mDealer(dealer)
  143. {
  144. #ifndef NDEBUG
  145. void* const start_ptr = (void*)(intptr_t(heap->base()) + offset);
  146. memset(start_ptr, 0xda, size);
  147. #endif
  148. }
  149. Allocation::~Allocation()
  150. {
  151. size_t freedOffset = getOffset();
  152. size_t freedSize = getSize();
  153. if (freedSize) {
  154. /* NOTE: it's VERY important to not free allocations of size 0 because
  155. * they're special as they don't have any record in the allocator
  156. * and could alias some real allocation (their offset is zero). */
  157. // keep the size to unmap in excess
  158. size_t pagesize = getpagesize();
  159. size_t start = freedOffset;
  160. size_t end = start + freedSize;
  161. start &= ~(pagesize-1);
  162. end = (end + pagesize-1) & ~(pagesize-1);
  163. // give back to the kernel the pages we don't need
  164. size_t free_start = freedOffset;
  165. size_t free_end = free_start + freedSize;
  166. if (start < free_start)
  167. start = free_start;
  168. if (end > free_end)
  169. end = free_end;
  170. start = (start + pagesize-1) & ~(pagesize-1);
  171. end &= ~(pagesize-1);
  172. if (start < end) {
  173. void* const start_ptr = (void*)(intptr_t(getHeap()->base()) + start);
  174. size_t size = end-start;
  175. #ifndef NDEBUG
  176. memset(start_ptr, 0xdf, size);
  177. #endif
  178. // MADV_REMOVE is not defined on Dapper based Goobuntu
  179. #ifdef MADV_REMOVE
  180. if (size) {
  181. int err = madvise(start_ptr, size, MADV_REMOVE);
  182. ALOGW_IF(err, "madvise(%p, %zu, MADV_REMOVE) returned %s",
  183. start_ptr, size, err<0 ? strerror(errno) : "Ok");
  184. }
  185. #endif
  186. }
  187. // This should be done after madvise(MADV_REMOVE), otherwise madvise()
  188. // might kick out the memory region that's allocated and/or written
  189. // right after the deallocation.
  190. mDealer->deallocate(freedOffset);
  191. }
  192. }
  193. // ----------------------------------------------------------------------------
  194. MemoryDealer::MemoryDealer(size_t size, const char* name, uint32_t flags)
  195. : mHeap(new MemoryHeapBase(size, flags, name)),
  196. mAllocator(new SimpleBestFitAllocator(size))
  197. {
  198. }
  199. MemoryDealer::~MemoryDealer()
  200. {
  201. delete mAllocator;
  202. }
  203. sp<IMemory> MemoryDealer::allocate(size_t size)
  204. {
  205. sp<IMemory> memory;
  206. const ssize_t offset = allocator()->allocate(size);
  207. if (offset >= 0) {
  208. memory = new Allocation(this, heap(), offset, size);
  209. }
  210. return memory;
  211. }
  212. void MemoryDealer::deallocate(size_t offset)
  213. {
  214. allocator()->deallocate(offset);
  215. }
  216. void MemoryDealer::dump(const char* what) const
  217. {
  218. allocator()->dump(what);
  219. }
  220. const sp<IMemoryHeap>& MemoryDealer::heap() const {
  221. return mHeap;
  222. }
  223. SimpleBestFitAllocator* MemoryDealer::allocator() const {
  224. return mAllocator;
  225. }
  226. // ----------------------------------------------------------------------------
  227. // align all the memory blocks on a cache-line boundary
  228. const int SimpleBestFitAllocator::kMemoryAlign = 32;
  229. SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
  230. {
  231. size_t pagesize = getpagesize();
  232. mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
  233. chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
  234. mList.insertHead(node);
  235. }
  236. SimpleBestFitAllocator::~SimpleBestFitAllocator()
  237. {
  238. while(!mList.isEmpty()) {
  239. delete mList.remove(mList.head());
  240. }
  241. }
  242. size_t SimpleBestFitAllocator::size() const
  243. {
  244. return mHeapSize;
  245. }
  246. size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
  247. {
  248. Mutex::Autolock _l(mLock);
  249. ssize_t offset = alloc(size, flags);
  250. return offset;
  251. }
  252. status_t SimpleBestFitAllocator::deallocate(size_t offset)
  253. {
  254. Mutex::Autolock _l(mLock);
  255. chunk_t const * const freed = dealloc(offset);
  256. if (freed) {
  257. return NO_ERROR;
  258. }
  259. return NAME_NOT_FOUND;
  260. }
  261. ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
  262. {
  263. if (size == 0) {
  264. return 0;
  265. }
  266. size = (size + kMemoryAlign-1) / kMemoryAlign;
  267. chunk_t* free_chunk = 0;
  268. chunk_t* cur = mList.head();
  269. size_t pagesize = getpagesize();
  270. while (cur) {
  271. int extra = 0;
  272. if (flags & PAGE_ALIGNED)
  273. extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
  274. // best fit
  275. if (cur->free && (cur->size >= (size+extra))) {
  276. if ((!free_chunk) || (cur->size < free_chunk->size)) {
  277. free_chunk = cur;
  278. }
  279. if (cur->size == size) {
  280. break;
  281. }
  282. }
  283. cur = cur->next;
  284. }
  285. if (free_chunk) {
  286. const size_t free_size = free_chunk->size;
  287. free_chunk->free = 0;
  288. free_chunk->size = size;
  289. if (free_size > size) {
  290. int extra = 0;
  291. if (flags & PAGE_ALIGNED)
  292. extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
  293. if (extra) {
  294. chunk_t* split = new chunk_t(free_chunk->start, extra);
  295. free_chunk->start += extra;
  296. mList.insertBefore(free_chunk, split);
  297. }
  298. ALOGE_IF((flags&PAGE_ALIGNED) &&
  299. ((free_chunk->start*kMemoryAlign)&(pagesize-1)),
  300. "PAGE_ALIGNED requested, but page is not aligned!!!");
  301. const ssize_t tail_free = free_size - (size+extra);
  302. if (tail_free > 0) {
  303. chunk_t* split = new chunk_t(
  304. free_chunk->start + free_chunk->size, tail_free);
  305. mList.insertAfter(free_chunk, split);
  306. }
  307. }
  308. return (free_chunk->start)*kMemoryAlign;
  309. }
  310. return NO_MEMORY;
  311. }
  312. SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
  313. {
  314. start = start / kMemoryAlign;
  315. chunk_t* cur = mList.head();
  316. while (cur) {
  317. if (cur->start == start) {
  318. LOG_FATAL_IF(cur->free,
  319. "block at offset 0x%08lX of size 0x%08lX already freed",
  320. cur->start*kMemoryAlign, cur->size*kMemoryAlign);
  321. // merge freed blocks together
  322. chunk_t* freed = cur;
  323. cur->free = 1;
  324. do {
  325. chunk_t* const p = cur->prev;
  326. chunk_t* const n = cur->next;
  327. if (p && (p->free || !cur->size)) {
  328. freed = p;
  329. p->size += cur->size;
  330. mList.remove(cur);
  331. delete cur;
  332. }
  333. cur = n;
  334. } while (cur && cur->free);
  335. #ifndef NDEBUG
  336. if (!freed->free) {
  337. dump_l("dealloc (!freed->free)");
  338. }
  339. #endif
  340. LOG_FATAL_IF(!freed->free,
  341. "freed block at offset 0x%08lX of size 0x%08lX is not free!",
  342. freed->start * kMemoryAlign, freed->size * kMemoryAlign);
  343. return freed;
  344. }
  345. cur = cur->next;
  346. }
  347. return 0;
  348. }
  349. void SimpleBestFitAllocator::dump(const char* what) const
  350. {
  351. Mutex::Autolock _l(mLock);
  352. dump_l(what);
  353. }
  354. void SimpleBestFitAllocator::dump_l(const char* what) const
  355. {
  356. String8 result;
  357. dump_l(result, what);
  358. ALOGD("%s", result.string());
  359. }
  360. void SimpleBestFitAllocator::dump(String8& result,
  361. const char* what) const
  362. {
  363. Mutex::Autolock _l(mLock);
  364. dump_l(result, what);
  365. }
  366. void SimpleBestFitAllocator::dump_l(String8& result,
  367. const char* what) const
  368. {
  369. size_t size = 0;
  370. int32_t i = 0;
  371. chunk_t const* cur = mList.head();
  372. const size_t SIZE = 256;
  373. char buffer[SIZE];
  374. snprintf(buffer, SIZE, " %s (%p, size=%u)\n",
  375. what, this, (unsigned int)mHeapSize);
  376. result.append(buffer);
  377. while (cur) {
  378. const char* errs[] = {"", "| link bogus NP",
  379. "| link bogus PN", "| link bogus NP+PN" };
  380. int np = ((cur->next) && cur->next->prev != cur) ? 1 : 0;
  381. int pn = ((cur->prev) && cur->prev->next != cur) ? 2 : 0;
  382. snprintf(buffer, SIZE, " %3u: %p | 0x%08X | 0x%08X | %s %s\n",
  383. i, cur, int(cur->start*kMemoryAlign),
  384. int(cur->size*kMemoryAlign),
  385. int(cur->free) ? "F" : "A",
  386. errs[np|pn]);
  387. result.append(buffer);
  388. if (!cur->free)
  389. size += cur->size*kMemoryAlign;
  390. i++;
  391. cur = cur->next;
  392. }
  393. snprintf(buffer, SIZE,
  394. " size allocated: %u (%u KB)\n", int(size), int(size/1024));
  395. result.append(buffer);
  396. }
  397. }; // namespace android