PageRenderTime 49ms CodeModel.GetById 17ms RepoModel.GetById 1ms app.codeStats 0ms

/indra/llcommon/llmemory.cpp

https://bitbucket.org/lindenlab/viewer-beta/
C++ | 2286 lines | 1695 code | 347 blank | 244 comment | 248 complexity | c408966c7b52a3d7f44f756208b157d6 MD5 | raw file
Possible License(s): LGPL-2.1
  1. /**
  2. * @file llmemory.cpp
  3. * @brief Very special memory allocation/deallocation stuff here
  4. *
  5. * $LicenseInfo:firstyear=2002&license=viewerlgpl$
  6. * Second Life Viewer Source Code
  7. * Copyright (C) 2010, Linden Research, Inc.
  8. *
  9. * This library is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation;
  12. * version 2.1 of the License only.
  13. *
  14. * This library is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with this library; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. *
  23. * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
  24. * $/LicenseInfo$
  25. */
  26. #include "linden_common.h"
  27. //#if MEM_TRACK_MEM
  28. #include "llthread.h"
  29. //#endif
  30. #if defined(LL_WINDOWS)
  31. //# include <windows.h>
  32. # include <psapi.h>
  33. #elif defined(LL_DARWIN)
  34. # include <sys/types.h>
  35. # include <mach/task.h>
  36. # include <mach/mach_init.h>
  37. #elif LL_LINUX || LL_SOLARIS
  38. # include <unistd.h>
  39. #endif
  40. #include "llmemory.h"
  41. #include "llsys.h"
  42. #include "llframetimer.h"
  43. //----------------------------------------------------------------------------
  44. //static
  45. char* LLMemory::reserveMem = 0;
  46. U32 LLMemory::sAvailPhysicalMemInKB = U32_MAX ;
  47. U32 LLMemory::sMaxPhysicalMemInKB = 0;
  48. U32 LLMemory::sAllocatedMemInKB = 0;
  49. U32 LLMemory::sAllocatedPageSizeInKB = 0 ;
  50. U32 LLMemory::sMaxHeapSizeInKB = U32_MAX ;
  51. BOOL LLMemory::sEnableMemoryFailurePrevention = FALSE;
  52. #if __DEBUG_PRIVATE_MEM__
  53. LLPrivateMemoryPoolManager::mem_allocation_info_t LLPrivateMemoryPoolManager::sMemAllocationTracker;
  54. #endif
  55. //static
  56. void LLMemory::initClass()
  57. {
  58. if (!reserveMem)
  59. {
  60. reserveMem = new char[16*1024]; // reserve 16K for out of memory error handling
  61. }
  62. }
  63. //static
  64. void LLMemory::cleanupClass()
  65. {
  66. delete [] reserveMem;
  67. reserveMem = NULL;
  68. }
  69. //static
  70. void LLMemory::freeReserve()
  71. {
  72. delete [] reserveMem;
  73. reserveMem = NULL;
  74. }
  75. //static
  76. void LLMemory::initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure)
  77. {
  78. sMaxHeapSizeInKB = (U32)(max_heap_size_gb * 1024 * 1024) ;
  79. sEnableMemoryFailurePrevention = prevent_heap_failure ;
  80. }
  81. //static
  82. void LLMemory::updateMemoryInfo()
  83. {
  84. #if LL_WINDOWS
  85. HANDLE self = GetCurrentProcess();
  86. PROCESS_MEMORY_COUNTERS counters;
  87. if (!GetProcessMemoryInfo(self, &counters, sizeof(counters)))
  88. {
  89. llwarns << "GetProcessMemoryInfo failed" << llendl;
  90. return ;
  91. }
  92. sAllocatedMemInKB = (U32)(counters.WorkingSetSize / 1024) ;
  93. sAllocatedPageSizeInKB = (U32)(counters.PagefileUsage / 1024) ;
  94. U32 avail_phys, avail_virtual;
  95. LLMemoryInfo::getAvailableMemoryKB(avail_phys, avail_virtual) ;
  96. sMaxPhysicalMemInKB = llmin(avail_phys + sAllocatedMemInKB, sMaxHeapSizeInKB);
  97. if(sMaxPhysicalMemInKB > sAllocatedMemInKB)
  98. {
  99. sAvailPhysicalMemInKB = sMaxPhysicalMemInKB - sAllocatedMemInKB ;
  100. }
  101. else
  102. {
  103. sAvailPhysicalMemInKB = 0 ;
  104. }
  105. #else
  106. //not valid for other systems for now.
  107. sAllocatedMemInKB = (U32)(LLMemory::getCurrentRSS() / 1024) ;
  108. sMaxPhysicalMemInKB = U32_MAX ;
  109. sAvailPhysicalMemInKB = U32_MAX ;
  110. #endif
  111. return ;
  112. }
  113. //
  114. //this function is to test if there is enough space with the size in the virtual address space.
  115. //it does not do any real allocation
  116. //if success, it returns the address where the memory chunk can fit in;
  117. //otherwise it returns NULL.
  118. //
  119. //static
  120. void* LLMemory::tryToAlloc(void* address, U32 size)
  121. {
  122. #if LL_WINDOWS
  123. address = VirtualAlloc(address, size, MEM_RESERVE | MEM_TOP_DOWN, PAGE_NOACCESS) ;
  124. if(address)
  125. {
  126. if(!VirtualFree(address, 0, MEM_RELEASE))
  127. {
  128. llerrs << "error happens when free some memory reservation." << llendl ;
  129. }
  130. }
  131. return address ;
  132. #else
  133. return (void*)0x01 ; //skip checking
  134. #endif
  135. }
  136. //static
  137. void LLMemory::logMemoryInfo(BOOL update)
  138. {
  139. if(update)
  140. {
  141. updateMemoryInfo() ;
  142. LLPrivateMemoryPoolManager::getInstance()->updateStatistics() ;
  143. }
  144. llinfos << "Current allocated physical memory(KB): " << sAllocatedMemInKB << llendl ;
  145. llinfos << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << llendl ;
  146. llinfos << "Current availabe physical memory(KB): " << sAvailPhysicalMemInKB << llendl ;
  147. llinfos << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << llendl ;
  148. llinfos << "--- private pool information -- " << llendl ;
  149. llinfos << "Total reserved (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalReservedSize / 1024 << llendl ;
  150. llinfos << "Total allocated (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalAllocatedSize / 1024 << llendl ;
  151. }
  152. //return 0: everything is normal;
  153. //return 1: the memory pool is low, but not in danger;
  154. //return -1: the memory pool is in danger, is about to crash.
  155. //static
  156. bool LLMemory::isMemoryPoolLow()
  157. {
  158. static const U32 LOW_MEMEOY_POOL_THRESHOLD_KB = 64 * 1024 ; //64 MB for emergency use
  159. const static U32 MAX_SIZE_CHECKED_MEMORY_BLOCK = 64 * 1024 * 1024 ; //64 MB
  160. static void* last_reserved_address = NULL ;
  161. if(!sEnableMemoryFailurePrevention)
  162. {
  163. return false ; //no memory failure prevention.
  164. }
  165. if(sAvailPhysicalMemInKB < (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2)) //out of physical memory
  166. {
  167. return true ;
  168. }
  169. if(sAllocatedPageSizeInKB + (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2) > sMaxHeapSizeInKB) //out of virtual address space.
  170. {
  171. return true ;
  172. }
  173. bool is_low = (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB ||
  174. sAllocatedPageSizeInKB + LOW_MEMEOY_POOL_THRESHOLD_KB > sMaxHeapSizeInKB) ;
  175. //check the virtual address space fragmentation
  176. if(!is_low)
  177. {
  178. if(!last_reserved_address)
  179. {
  180. last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
  181. }
  182. else
  183. {
  184. last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
  185. if(!last_reserved_address) //failed, try once more
  186. {
  187. last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
  188. }
  189. }
  190. is_low = !last_reserved_address ; //allocation failed
  191. }
  192. return is_low ;
  193. }
  194. //static
  195. U32 LLMemory::getAvailableMemKB()
  196. {
  197. return sAvailPhysicalMemInKB ;
  198. }
  199. //static
  200. U32 LLMemory::getMaxMemKB()
  201. {
  202. return sMaxPhysicalMemInKB ;
  203. }
  204. //static
  205. U32 LLMemory::getAllocatedMemKB()
  206. {
  207. return sAllocatedMemInKB ;
  208. }
  209. void* ll_allocate (size_t size)
  210. {
  211. if (size == 0)
  212. {
  213. llwarns << "Null allocation" << llendl;
  214. }
  215. void *p = malloc(size);
  216. if (p == NULL)
  217. {
  218. LLMemory::freeReserve();
  219. llerrs << "Out of memory Error" << llendl;
  220. }
  221. return p;
  222. }
  223. //----------------------------------------------------------------------------
  224. #if defined(LL_WINDOWS)
  225. U64 LLMemory::getCurrentRSS()
  226. {
  227. HANDLE self = GetCurrentProcess();
  228. PROCESS_MEMORY_COUNTERS counters;
  229. if (!GetProcessMemoryInfo(self, &counters, sizeof(counters)))
  230. {
  231. llwarns << "GetProcessMemoryInfo failed" << llendl;
  232. return 0;
  233. }
  234. return counters.WorkingSetSize;
  235. }
  236. //static
  237. U32 LLMemory::getWorkingSetSize()
  238. {
  239. PROCESS_MEMORY_COUNTERS pmc ;
  240. U32 ret = 0 ;
  241. if (GetProcessMemoryInfo( GetCurrentProcess(), &pmc, sizeof(pmc)) )
  242. {
  243. ret = pmc.WorkingSetSize ;
  244. }
  245. return ret ;
  246. }
  247. #elif defined(LL_DARWIN)
  248. /*
  249. The API used here is not capable of dealing with 64-bit memory sizes, but is available before 10.4.
  250. Once we start requiring 10.4, we can use the updated API, which looks like this:
  251. task_basic_info_64_data_t basicInfo;
  252. mach_msg_type_number_t basicInfoCount = TASK_BASIC_INFO_64_COUNT;
  253. if (task_info(mach_task_self(), TASK_BASIC_INFO_64, (task_info_t)&basicInfo, &basicInfoCount) == KERN_SUCCESS)
  254. Of course, this doesn't gain us anything unless we start building the viewer as a 64-bit executable, since that's the only way
  255. for our memory allocation to exceed 2^32.
  256. */
  257. // if (sysctl(ctl, 2, &page_size, &size, NULL, 0) == -1)
  258. // {
  259. // llwarns << "Couldn't get page size" << llendl;
  260. // return 0;
  261. // } else {
  262. // return page_size;
  263. // }
  264. // }
  265. U64 LLMemory::getCurrentRSS()
  266. {
  267. U64 residentSize = 0;
  268. task_basic_info_data_t basicInfo;
  269. mach_msg_type_number_t basicInfoCount = TASK_BASIC_INFO_COUNT;
  270. if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&basicInfo, &basicInfoCount) == KERN_SUCCESS)
  271. {
  272. residentSize = basicInfo.resident_size;
  273. // If we ever wanted it, the process virtual size is also available as:
  274. // virtualSize = basicInfo.virtual_size;
  275. // llinfos << "resident size is " << residentSize << llendl;
  276. }
  277. else
  278. {
  279. llwarns << "task_info failed" << llendl;
  280. }
  281. return residentSize;
  282. }
  283. U32 LLMemory::getWorkingSetSize()
  284. {
  285. return 0 ;
  286. }
  287. #elif defined(LL_LINUX)
  288. U64 LLMemory::getCurrentRSS()
  289. {
  290. static const char statPath[] = "/proc/self/stat";
  291. LLFILE *fp = LLFile::fopen(statPath, "r");
  292. U64 rss = 0;
  293. if (fp == NULL)
  294. {
  295. llwarns << "couldn't open " << statPath << llendl;
  296. goto bail;
  297. }
  298. // Eee-yew! See Documentation/filesystems/proc.txt in your
  299. // nearest friendly kernel tree for details.
  300. {
  301. int ret = fscanf(fp, "%*d (%*[^)]) %*c %*d %*d %*d %*d %*d %*d %*d "
  302. "%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %Lu",
  303. &rss);
  304. if (ret != 1)
  305. {
  306. llwarns << "couldn't parse contents of " << statPath << llendl;
  307. rss = 0;
  308. }
  309. }
  310. fclose(fp);
  311. bail:
  312. return rss;
  313. }
  314. U32 LLMemory::getWorkingSetSize()
  315. {
  316. return 0 ;
  317. }
  318. #elif LL_SOLARIS
  319. #include <sys/types.h>
  320. #include <sys/stat.h>
  321. #include <fcntl.h>
  322. #define _STRUCTURED_PROC 1
  323. #include <sys/procfs.h>
  324. U64 LLMemory::getCurrentRSS()
  325. {
  326. char path [LL_MAX_PATH]; /* Flawfinder: ignore */
  327. sprintf(path, "/proc/%d/psinfo", (int)getpid());
  328. int proc_fd = -1;
  329. if((proc_fd = open(path, O_RDONLY)) == -1){
  330. llwarns << "LLmemory::getCurrentRSS() unable to open " << path << ". Returning 0 RSS!" << llendl;
  331. return 0;
  332. }
  333. psinfo_t proc_psinfo;
  334. if(read(proc_fd, &proc_psinfo, sizeof(psinfo_t)) != sizeof(psinfo_t)){
  335. llwarns << "LLmemory::getCurrentRSS() Unable to read from " << path << ". Returning 0 RSS!" << llendl;
  336. close(proc_fd);
  337. return 0;
  338. }
  339. close(proc_fd);
  340. return((U64)proc_psinfo.pr_rssize * 1024);
  341. }
  342. U32 LLMemory::getWorkingSetSize()
  343. {
  344. return 0 ;
  345. }
  346. #else
  347. U64 LLMemory::getCurrentRSS()
  348. {
  349. return 0;
  350. }
  351. U32 LLMemory::getWorkingSetSize()
  352. {
  353. return 0;
  354. }
  355. #endif
  356. //--------------------------------------------------------------------------------------------------
  357. #if MEM_TRACK_MEM
  358. #include "llframetimer.h"
  359. //static
  360. LLMemTracker* LLMemTracker::sInstance = NULL ;
  361. LLMemTracker::LLMemTracker()
  362. {
  363. mLastAllocatedMem = LLMemory::getWorkingSetSize() ;
  364. mCapacity = 128 ;
  365. mCurIndex = 0 ;
  366. mCounter = 0 ;
  367. mDrawnIndex = 0 ;
  368. mPaused = FALSE ;
  369. mMutexp = new LLMutex() ;
  370. mStringBuffer = new char*[128] ;
  371. mStringBuffer[0] = new char[mCapacity * 128] ;
  372. for(S32 i = 1 ; i < mCapacity ; i++)
  373. {
  374. mStringBuffer[i] = mStringBuffer[i-1] + 128 ;
  375. }
  376. }
  377. LLMemTracker::~LLMemTracker()
  378. {
  379. delete[] mStringBuffer[0] ;
  380. delete[] mStringBuffer;
  381. delete mMutexp ;
  382. }
  383. //static
  384. LLMemTracker* LLMemTracker::getInstance()
  385. {
  386. if(!sInstance)
  387. {
  388. sInstance = new LLMemTracker() ;
  389. }
  390. return sInstance ;
  391. }
  392. //static
  393. void LLMemTracker::release()
  394. {
  395. if(sInstance)
  396. {
  397. delete sInstance ;
  398. sInstance = NULL ;
  399. }
  400. }
  401. //static
  402. void LLMemTracker::track(const char* function, const int line)
  403. {
  404. static const S32 MIN_ALLOCATION = 0 ; //1KB
  405. if(mPaused)
  406. {
  407. return ;
  408. }
  409. U32 allocated_mem = LLMemory::getWorkingSetSize() ;
  410. LLMutexLock lock(mMutexp) ;
  411. S32 delta_mem = allocated_mem - mLastAllocatedMem ;
  412. mLastAllocatedMem = allocated_mem ;
  413. if(delta_mem <= 0)
  414. {
  415. return ; //occupied memory does not grow
  416. }
  417. if(delta_mem < MIN_ALLOCATION)
  418. {
  419. return ;
  420. }
  421. char* buffer = mStringBuffer[mCurIndex++] ;
  422. F32 time = (F32)LLFrameTimer::getElapsedSeconds() ;
  423. S32 hours = (S32)(time / (60*60));
  424. S32 mins = (S32)((time - hours*(60*60)) / 60);
  425. S32 secs = (S32)((time - hours*(60*60) - mins*60));
  426. strcpy(buffer, function) ;
  427. sprintf(buffer + strlen(function), " line: %d DeltaMem: %d (bytes) Time: %d:%02d:%02d", line, delta_mem, hours,mins,secs) ;
  428. if(mCounter < mCapacity)
  429. {
  430. mCounter++ ;
  431. }
  432. if(mCurIndex >= mCapacity)
  433. {
  434. mCurIndex = 0 ;
  435. }
  436. }
  437. //static
  438. void LLMemTracker::preDraw(BOOL pause)
  439. {
  440. mMutexp->lock() ;
  441. mPaused = pause ;
  442. mDrawnIndex = mCurIndex - 1;
  443. mNumOfDrawn = 0 ;
  444. }
  445. //static
  446. void LLMemTracker::postDraw()
  447. {
  448. mMutexp->unlock() ;
  449. }
  450. //static
  451. const char* LLMemTracker::getNextLine()
  452. {
  453. if(mNumOfDrawn >= mCounter)
  454. {
  455. return NULL ;
  456. }
  457. mNumOfDrawn++;
  458. if(mDrawnIndex < 0)
  459. {
  460. mDrawnIndex = mCapacity - 1 ;
  461. }
  462. return mStringBuffer[mDrawnIndex--] ;
  463. }
  464. #endif //MEM_TRACK_MEM
  465. //--------------------------------------------------------------------------------------------------
  466. //--------------------------------------------------------------------------------------------------
  467. //--------------------------------------------------------------------------------------------------
  468. //minimum slot size and minimal slot size interval
  469. const U32 ATOMIC_MEM_SLOT = 16 ; //bytes
  470. //minimum block sizes (page size) for small allocation, medium allocation, large allocation
  471. const U32 MIN_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {2 << 10, 4 << 10, 16 << 10} ; //
  472. //maximum block sizes for small allocation, medium allocation, large allocation
  473. const U32 MAX_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {64 << 10, 1 << 20, 4 << 20} ;
  474. //minimum slot sizes for small allocation, medium allocation, large allocation
  475. const U32 MIN_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {ATOMIC_MEM_SLOT, 2 << 10, 512 << 10};
  476. //maximum slot sizes for small allocation, medium allocation, large allocation
  477. const U32 MAX_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {(2 << 10) - ATOMIC_MEM_SLOT, (512 - 2) << 10, 4 << 20};
  478. //size of a block with multiple slots can not exceed CUT_OFF_SIZE
  479. const U32 CUT_OFF_SIZE = (64 << 10) ; //64 KB
  480. //max number of slots in a block
  481. const U32 MAX_NUM_SLOTS_IN_A_BLOCK = llmin(MIN_BLOCK_SIZES[0] / ATOMIC_MEM_SLOT, ATOMIC_MEM_SLOT * 8) ;
  482. //-------------------------------------------------------------
  483. //align val to be integer times of ATOMIC_MEM_SLOT
  484. U32 align(U32 val)
  485. {
  486. U32 aligned = (val / ATOMIC_MEM_SLOT) * ATOMIC_MEM_SLOT ;
  487. if(aligned < val)
  488. {
  489. aligned += ATOMIC_MEM_SLOT ;
  490. }
  491. return aligned ;
  492. }
  493. //-------------------------------------------------------------
  494. //class LLPrivateMemoryPool::LLMemoryBlock
  495. //-------------------------------------------------------------
  496. //
  497. //each memory block could fit for two page sizes: 0.75 * mSlotSize, which starts from the beginning of the memory chunk and grow towards the end of the
  498. //the block; another is mSlotSize, which starts from the end of the block and grows towards the beginning of the block.
  499. //
  500. LLPrivateMemoryPool::LLMemoryBlock::LLMemoryBlock()
  501. {
  502. //empty
  503. }
  504. LLPrivateMemoryPool::LLMemoryBlock::~LLMemoryBlock()
  505. {
  506. //empty
  507. }
  508. //create and initialize a memory block
  509. void LLPrivateMemoryPool::LLMemoryBlock::init(char* buffer, U32 buffer_size, U32 slot_size)
  510. {
  511. mBuffer = buffer ;
  512. mBufferSize = buffer_size ;
  513. mSlotSize = slot_size ;
  514. mTotalSlots = buffer_size / mSlotSize ;
  515. llassert_always(buffer_size / mSlotSize <= MAX_NUM_SLOTS_IN_A_BLOCK) ; //max number is 128
  516. mAllocatedSlots = 0 ;
  517. mDummySize = 0 ;
  518. //init the bit map.
  519. //mark free bits
  520. if(mTotalSlots > 32) //reserve extra space from mBuffer to store bitmap if needed.
  521. {
  522. mDummySize = ATOMIC_MEM_SLOT ;
  523. mTotalSlots -= (mDummySize + mSlotSize - 1) / mSlotSize ;
  524. mUsageBits = 0 ;
  525. S32 usage_bit_len = (mTotalSlots + 31) / 32 ;
  526. for(S32 i = 0 ; i < usage_bit_len - 1 ; i++)
  527. {
  528. *((U32*)mBuffer + i) = 0 ;
  529. }
  530. for(S32 i = usage_bit_len - 1 ; i < mDummySize / sizeof(U32) ; i++)
  531. {
  532. *((U32*)mBuffer + i) = 0xffffffff ;
  533. }
  534. if(mTotalSlots & 31)
  535. {
  536. *((U32*)mBuffer + usage_bit_len - 2) = (0xffffffff << (mTotalSlots & 31)) ;
  537. }
  538. }
  539. else//no extra bitmap space reserved
  540. {
  541. mUsageBits = 0 ;
  542. if(mTotalSlots & 31)
  543. {
  544. mUsageBits = (0xffffffff << (mTotalSlots & 31)) ;
  545. }
  546. }
  547. mSelf = this ;
  548. mNext = NULL ;
  549. mPrev = NULL ;
  550. llassert_always(mTotalSlots > 0) ;
  551. }
  552. //mark this block to be free with the memory [mBuffer, mBuffer + mBufferSize).
  553. void LLPrivateMemoryPool::LLMemoryBlock::setBuffer(char* buffer, U32 buffer_size)
  554. {
  555. mBuffer = buffer ;
  556. mBufferSize = buffer_size ;
  557. mSelf = NULL ;
  558. mTotalSlots = 0 ; //set the block is free.
  559. }
  560. //reserve a slot
  561. char* LLPrivateMemoryPool::LLMemoryBlock::allocate()
  562. {
  563. llassert_always(mAllocatedSlots < mTotalSlots) ;
  564. //find a free slot
  565. U32* bits = NULL ;
  566. U32 k = 0 ;
  567. if(mUsageBits != 0xffffffff)
  568. {
  569. bits = &mUsageBits ;
  570. }
  571. else if(mDummySize > 0)//go to extra space
  572. {
  573. for(S32 i = 0 ; i < mDummySize / sizeof(U32); i++)
  574. {
  575. if(*((U32*)mBuffer + i) != 0xffffffff)
  576. {
  577. bits = (U32*)mBuffer + i ;
  578. k = i + 1 ;
  579. break ;
  580. }
  581. }
  582. }
  583. S32 idx = 0 ;
  584. U32 tmp = *bits ;
  585. for(; tmp & 1 ; tmp >>= 1, idx++) ;
  586. //set the slot reserved
  587. if(!idx)
  588. {
  589. *bits |= 1 ;
  590. }
  591. else
  592. {
  593. *bits |= (1 << idx) ;
  594. }
  595. mAllocatedSlots++ ;
  596. return mBuffer + mDummySize + (k * 32 + idx) * mSlotSize ;
  597. }
  598. //free a slot
  599. void LLPrivateMemoryPool::LLMemoryBlock::freeMem(void* addr)
  600. {
  601. //bit index
  602. U32 idx = ((U32)addr - (U32)mBuffer - mDummySize) / mSlotSize ;
  603. U32* bits = &mUsageBits ;
  604. if(idx >= 32)
  605. {
  606. bits = (U32*)mBuffer + (idx - 32) / 32 ;
  607. }
  608. //reset the bit
  609. if(idx & 31)
  610. {
  611. *bits &= ~(1 << (idx & 31)) ;
  612. }
  613. else
  614. {
  615. *bits &= ~1 ;
  616. }
  617. mAllocatedSlots-- ;
  618. }
  619. //for debug use: reset the entire bitmap.
  620. void LLPrivateMemoryPool::LLMemoryBlock::resetBitMap()
  621. {
  622. for(S32 i = 0 ; i < mDummySize / sizeof(U32) ; i++)
  623. {
  624. *((U32*)mBuffer + i) = 0 ;
  625. }
  626. mUsageBits = 0 ;
  627. }
  628. //-------------------------------------------------------------------
  629. //class LLMemoryChunk
  630. //--------------------------------------------------------------------
  631. LLPrivateMemoryPool::LLMemoryChunk::LLMemoryChunk()
  632. {
  633. //empty
  634. }
  635. LLPrivateMemoryPool::LLMemoryChunk::~LLMemoryChunk()
  636. {
  637. //empty
  638. }
  639. //create and init a memory chunk
  640. void LLPrivateMemoryPool::LLMemoryChunk::init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size)
  641. {
  642. mBuffer = buffer ;
  643. mBufferSize = buffer_size ;
  644. mAlloatedSize = 0 ;
  645. mMetaBuffer = mBuffer + sizeof(LLMemoryChunk) ;
  646. mMinBlockSize = min_block_size; //page size
  647. mMinSlotSize = min_slot_size;
  648. mMaxSlotSize = max_slot_size ;
  649. mBlockLevels = mMaxSlotSize / mMinSlotSize ;
  650. mPartitionLevels = max_block_size / mMinBlockSize + 1 ;
  651. S32 max_num_blocks = (buffer_size - sizeof(LLMemoryChunk) - mBlockLevels * sizeof(LLMemoryBlock*) - mPartitionLevels * sizeof(LLMemoryBlock*)) /
  652. (mMinBlockSize + sizeof(LLMemoryBlock)) ;
  653. //meta data space
  654. mBlocks = (LLMemoryBlock*)mMetaBuffer ; //space reserved for all memory blocks.
  655. mAvailBlockList = (LLMemoryBlock**)((char*)mBlocks + sizeof(LLMemoryBlock) * max_num_blocks) ;
  656. mFreeSpaceList = (LLMemoryBlock**)((char*)mAvailBlockList + sizeof(LLMemoryBlock*) * mBlockLevels) ;
  657. //data buffer, which can be used for allocation
  658. mDataBuffer = (char*)mFreeSpaceList + sizeof(LLMemoryBlock*) * mPartitionLevels ;
  659. //alignmnet
  660. mDataBuffer = mBuffer + align(mDataBuffer - mBuffer) ;
  661. //init
  662. for(U32 i = 0 ; i < mBlockLevels; i++)
  663. {
  664. mAvailBlockList[i] = NULL ;
  665. }
  666. for(U32 i = 0 ; i < mPartitionLevels ; i++)
  667. {
  668. mFreeSpaceList[i] = NULL ;
  669. }
  670. //assign the entire chunk to the first block
  671. mBlocks[0].mPrev = NULL ;
  672. mBlocks[0].mNext = NULL ;
  673. mBlocks[0].setBuffer(mDataBuffer, buffer_size - (mDataBuffer - mBuffer)) ;
  674. addToFreeSpace(&mBlocks[0]) ;
  675. mNext = NULL ;
  676. mPrev = NULL ;
  677. }
  678. //static
  679. U32 LLPrivateMemoryPool::LLMemoryChunk::getMaxOverhead(U32 data_buffer_size, U32 min_slot_size,
  680. U32 max_slot_size, U32 min_block_size, U32 max_block_size)
  681. {
  682. //for large allocations, reserve some extra memory for meta data to avoid wasting much
  683. if(data_buffer_size / min_slot_size < 64) //large allocations
  684. {
  685. U32 overhead = sizeof(LLMemoryChunk) + (data_buffer_size / min_block_size) * sizeof(LLMemoryBlock) +
  686. sizeof(LLMemoryBlock*) * (max_slot_size / min_slot_size) + sizeof(LLMemoryBlock*) * (max_block_size / min_block_size + 1) ;
  687. //round to integer times of min_block_size
  688. overhead = ((overhead + min_block_size - 1) / min_block_size) * min_block_size ;
  689. return overhead ;
  690. }
  691. else
  692. {
  693. return 0 ; //do not reserve extra overhead if for small allocations
  694. }
  695. }
  696. char* LLPrivateMemoryPool::LLMemoryChunk::allocate(U32 size)
  697. {
  698. if(mMinSlotSize > size)
  699. {
  700. size = mMinSlotSize ;
  701. }
  702. if(mAlloatedSize + size > mBufferSize - (mDataBuffer - mBuffer))
  703. {
  704. return NULL ; //no enough space in this chunk.
  705. }
  706. char* p = NULL ;
  707. U32 blk_idx = getBlockLevel(size);
  708. LLMemoryBlock* blk = NULL ;
  709. //check if there is free block available
  710. if(mAvailBlockList[blk_idx])
  711. {
  712. blk = mAvailBlockList[blk_idx] ;
  713. p = blk->allocate() ;
  714. if(blk->isFull())
  715. {
  716. popAvailBlockList(blk_idx) ;
  717. }
  718. }
  719. //ask for a new block
  720. if(!p)
  721. {
  722. blk = addBlock(blk_idx) ;
  723. if(blk)
  724. {
  725. p = blk->allocate() ;
  726. if(blk->isFull())
  727. {
  728. popAvailBlockList(blk_idx) ;
  729. }
  730. }
  731. }
  732. //ask for space from larger blocks
  733. if(!p)
  734. {
  735. for(S32 i = blk_idx + 1 ; i < mBlockLevels; i++)
  736. {
  737. if(mAvailBlockList[i])
  738. {
  739. blk = mAvailBlockList[i] ;
  740. p = blk->allocate() ;
  741. if(blk->isFull())
  742. {
  743. popAvailBlockList(i) ;
  744. }
  745. break ;
  746. }
  747. }
  748. }
  749. if(p && blk)
  750. {
  751. mAlloatedSize += blk->getSlotSize() ;
  752. }
  753. return p ;
  754. }
  755. void LLPrivateMemoryPool::LLMemoryChunk::freeMem(void* addr)
  756. {
  757. U32 blk_idx = getPageIndex((U32)addr) ;
  758. LLMemoryBlock* blk = (LLMemoryBlock*)(mMetaBuffer + blk_idx * sizeof(LLMemoryBlock)) ;
  759. blk = blk->mSelf ;
  760. bool was_full = blk->isFull() ;
  761. blk->freeMem(addr) ;
  762. mAlloatedSize -= blk->getSlotSize() ;
  763. if(blk->empty())
  764. {
  765. removeBlock(blk) ;
  766. }
  767. else if(was_full)
  768. {
  769. addToAvailBlockList(blk) ;
  770. }
  771. }
  772. bool LLPrivateMemoryPool::LLMemoryChunk::empty()
  773. {
  774. return !mAlloatedSize ;
  775. }
  776. bool LLPrivateMemoryPool::LLMemoryChunk::containsAddress(const char* addr) const
  777. {
  778. return (U32)mBuffer <= (U32)addr && (U32)mBuffer + mBufferSize > (U32)addr ;
  779. }
  780. //debug use
  781. void LLPrivateMemoryPool::LLMemoryChunk::dump()
  782. {
  783. #if 0
  784. //sanity check
  785. //for(S32 i = 0 ; i < mBlockLevels ; i++)
  786. //{
  787. // LLMemoryBlock* blk = mAvailBlockList[i] ;
  788. // while(blk)
  789. // {
  790. // blk_list.push_back(blk) ;
  791. // blk = blk->mNext ;
  792. // }
  793. //}
  794. for(S32 i = 0 ; i < mPartitionLevels ; i++)
  795. {
  796. LLMemoryBlock* blk = mFreeSpaceList[i] ;
  797. while(blk)
  798. {
  799. blk_list.push_back(blk) ;
  800. blk = blk->mNext ;
  801. }
  802. }
  803. std::sort(blk_list.begin(), blk_list.end(), LLMemoryBlock::CompareAddress());
  804. U32 total_size = blk_list[0]->getBufferSize() ;
  805. for(U32 i = 1 ; i < blk_list.size(); i++)
  806. {
  807. total_size += blk_list[i]->getBufferSize() ;
  808. if((U32)blk_list[i]->getBuffer() < (U32)blk_list[i-1]->getBuffer() + blk_list[i-1]->getBufferSize())
  809. {
  810. llerrs << "buffer corrupted." << llendl ;
  811. }
  812. }
  813. llassert_always(total_size + mMinBlockSize >= mBufferSize - ((U32)mDataBuffer - (U32)mBuffer)) ;
  814. U32 blk_num = (mBufferSize - (mDataBuffer - mBuffer)) / mMinBlockSize ;
  815. for(U32 i = 0 ; i < blk_num ; )
  816. {
  817. LLMemoryBlock* blk = &mBlocks[i] ;
  818. if(blk->mSelf)
  819. {
  820. U32 end = blk->getBufferSize() / mMinBlockSize ;
  821. for(U32 j = 0 ; j < end ; j++)
  822. {
  823. llassert_always(blk->mSelf == blk || !blk->mSelf) ;
  824. }
  825. i += end ;
  826. }
  827. else
  828. {
  829. llerrs << "gap happens" << llendl ;
  830. }
  831. }
  832. #endif
  833. #if 0
  834. llinfos << "---------------------------" << llendl ;
  835. llinfos << "Chunk buffer: " << (U32)getBuffer() << " size: " << getBufferSize() << llendl ;
  836. llinfos << "available blocks ... " << llendl ;
  837. for(S32 i = 0 ; i < mBlockLevels ; i++)
  838. {
  839. LLMemoryBlock* blk = mAvailBlockList[i] ;
  840. while(blk)
  841. {
  842. llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ;
  843. blk = blk->mNext ;
  844. }
  845. }
  846. llinfos << "free blocks ... " << llendl ;
  847. for(S32 i = 0 ; i < mPartitionLevels ; i++)
  848. {
  849. LLMemoryBlock* blk = mFreeSpaceList[i] ;
  850. while(blk)
  851. {
  852. llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ;
  853. blk = blk->mNext ;
  854. }
  855. }
  856. #endif
  857. }
  858. //compute the size for a block, the size is round to integer times of mMinBlockSize.
  859. U32 LLPrivateMemoryPool::LLMemoryChunk::calcBlockSize(U32 slot_size)
  860. {
  861. //
  862. //Note: we try to make a block to have 32 slots if the size is not over 32 pages
  863. //32 is the number of bits of an integer in a 32-bit system
  864. //
  865. U32 block_size;
  866. U32 cut_off_size = llmin(CUT_OFF_SIZE, (U32)(mMinBlockSize << 5)) ;
  867. if((slot_size << 5) <= mMinBlockSize)//for small allocations, return one page
  868. {
  869. block_size = mMinBlockSize ;
  870. }
  871. else if(slot_size >= cut_off_size)//for large allocations, return one-slot block
  872. {
  873. block_size = (slot_size / mMinBlockSize) * mMinBlockSize ;
  874. if(block_size < slot_size)
  875. {
  876. block_size += mMinBlockSize ;
  877. }
  878. }
  879. else //medium allocations
  880. {
  881. if((slot_size << 5) >= cut_off_size)
  882. {
  883. block_size = cut_off_size ;
  884. }
  885. else
  886. {
  887. block_size = ((slot_size << 5) / mMinBlockSize) * mMinBlockSize ;
  888. }
  889. }
  890. llassert_always(block_size >= slot_size) ;
  891. return block_size ;
  892. }
  893. //create a new block in the chunk
  894. LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::addBlock(U32 blk_idx)
  895. {
  896. U32 slot_size = mMinSlotSize * (blk_idx + 1) ;
  897. U32 preferred_block_size = calcBlockSize(slot_size) ;
  898. U16 idx = getPageLevel(preferred_block_size);
  899. LLMemoryBlock* blk = NULL ;
  900. if(mFreeSpaceList[idx])//if there is free slot for blk_idx
  901. {
  902. blk = createNewBlock(mFreeSpaceList[idx], preferred_block_size, slot_size, blk_idx) ;
  903. }
  904. else if(mFreeSpaceList[mPartitionLevels - 1]) //search free pool
  905. {
  906. blk = createNewBlock(mFreeSpaceList[mPartitionLevels - 1], preferred_block_size, slot_size, blk_idx) ;
  907. }
  908. else //search for other non-preferred but enough space slot.
  909. {
  910. S32 min_idx = 0 ;
  911. if(slot_size > mMinBlockSize)
  912. {
  913. min_idx = getPageLevel(slot_size) ;
  914. }
  915. for(S32 i = (S32)idx - 1 ; i >= min_idx ; i--) //search the small slots first
  916. {
  917. if(mFreeSpaceList[i])
  918. {
  919. U32 new_preferred_block_size = mFreeSpaceList[i]->getBufferSize();
  920. new_preferred_block_size = (new_preferred_block_size / mMinBlockSize) * mMinBlockSize ; //round to integer times of mMinBlockSize.
  921. //create a NEW BLOCK THERE.
  922. if(new_preferred_block_size >= slot_size) //at least there is space for one slot.
  923. {
  924. blk = createNewBlock(mFreeSpaceList[i], new_preferred_block_size, slot_size, blk_idx) ;
  925. }
  926. break ;
  927. }
  928. }
  929. if(!blk)
  930. {
  931. for(U16 i = idx + 1 ; i < mPartitionLevels - 1; i++) //search the large slots
  932. {
  933. if(mFreeSpaceList[i])
  934. {
  935. //create a NEW BLOCK THERE.
  936. blk = createNewBlock(mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ;
  937. break ;
  938. }
  939. }
  940. }
  941. }
  942. return blk ;
  943. }
  944. //create a new block at the designed location
  945. LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx)
  946. {
  947. //unlink from the free space
  948. removeFromFreeSpace(blk) ;
  949. //check the rest space
  950. U32 new_free_blk_size = blk->getBufferSize() - buffer_size ;
  951. if(new_free_blk_size < mMinBlockSize) //can not partition the memory into size smaller than mMinBlockSize
  952. {
  953. new_free_blk_size = 0 ; //discard the last small extra space.
  954. }
  955. //add the rest space back to the free list
  956. if(new_free_blk_size > 0) //blk still has free space
  957. {
  958. LLMemoryBlock* next_blk = blk + (buffer_size / mMinBlockSize) ;
  959. next_blk->mPrev = NULL ;
  960. next_blk->mNext = NULL ;
  961. next_blk->setBuffer(blk->getBuffer() + buffer_size, new_free_blk_size) ;
  962. addToFreeSpace(next_blk) ;
  963. }
  964. blk->init(blk->getBuffer(), buffer_size, slot_size) ;
  965. //insert to the available block list...
  966. mAvailBlockList[blk_idx] = blk ;
  967. //mark the address map: all blocks covered by this block space pointing back to this block.
  968. U32 end = (buffer_size / mMinBlockSize) ;
  969. for(U32 i = 1 ; i < end ; i++)
  970. {
  971. (blk + i)->mSelf = blk ;
  972. }
  973. return blk ;
  974. }
  975. //delete a block, release the block to the free pool.
  976. void LLPrivateMemoryPool::LLMemoryChunk::removeBlock(LLMemoryBlock* blk)
  977. {
  978. //remove from the available block list
  979. if(blk->mPrev)
  980. {
  981. blk->mPrev->mNext = blk->mNext ;
  982. }
  983. if(blk->mNext)
  984. {
  985. blk->mNext->mPrev = blk->mPrev ;
  986. }
  987. U32 blk_idx = getBlockLevel(blk->getSlotSize());
  988. if(mAvailBlockList[blk_idx] == blk)
  989. {
  990. mAvailBlockList[blk_idx] = blk->mNext ;
  991. }
  992. blk->mNext = NULL ;
  993. blk->mPrev = NULL ;
  994. //mark it free
  995. blk->setBuffer(blk->getBuffer(), blk->getBufferSize()) ;
  996. #if 1
  997. //merge blk with neighbors if possible
  998. if(blk->getBuffer() > mDataBuffer) //has the left neighbor
  999. {
  1000. if((blk - 1)->mSelf->isFree())
  1001. {
  1002. LLMemoryBlock* left_blk = (blk - 1)->mSelf ;
  1003. removeFromFreeSpace((blk - 1)->mSelf);
  1004. left_blk->setBuffer(left_blk->getBuffer(), left_blk->getBufferSize() + blk->getBufferSize()) ;
  1005. blk = left_blk ;
  1006. }
  1007. }
  1008. if(blk->getBuffer() + blk->getBufferSize() <= mBuffer + mBufferSize - mMinBlockSize) //has the right neighbor
  1009. {
  1010. U32 d = blk->getBufferSize() / mMinBlockSize ;
  1011. if((blk + d)->isFree())
  1012. {
  1013. LLMemoryBlock* right_blk = blk + d ;
  1014. removeFromFreeSpace(blk + d) ;
  1015. blk->setBuffer(blk->getBuffer(), blk->getBufferSize() + right_blk->getBufferSize()) ;
  1016. }
  1017. }
  1018. #endif
  1019. addToFreeSpace(blk) ;
  1020. return ;
  1021. }
  1022. //the top block in the list is full, pop it out of the list
  1023. void LLPrivateMemoryPool::LLMemoryChunk::popAvailBlockList(U32 blk_idx)
  1024. {
  1025. if(mAvailBlockList[blk_idx])
  1026. {
  1027. LLMemoryBlock* next = mAvailBlockList[blk_idx]->mNext ;
  1028. if(next)
  1029. {
  1030. next->mPrev = NULL ;
  1031. }
  1032. mAvailBlockList[blk_idx]->mPrev = NULL ;
  1033. mAvailBlockList[blk_idx]->mNext = NULL ;
  1034. mAvailBlockList[blk_idx] = next ;
  1035. }
  1036. }
  1037. //add the block back to the free pool
  1038. void LLPrivateMemoryPool::LLMemoryChunk::addToFreeSpace(LLMemoryBlock* blk)
  1039. {
  1040. llassert_always(!blk->mPrev) ;
  1041. llassert_always(!blk->mNext) ;
  1042. U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
  1043. (blk + free_idx)->mSelf = blk ; //mark the end pointing back to the head.
  1044. free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
  1045. blk->mNext = mFreeSpaceList[free_idx] ;
  1046. if(mFreeSpaceList[free_idx])
  1047. {
  1048. mFreeSpaceList[free_idx]->mPrev = blk ;
  1049. }
  1050. mFreeSpaceList[free_idx] = blk ;
  1051. blk->mPrev = NULL ;
  1052. blk->mSelf = blk ;
  1053. return ;
  1054. }
  1055. //remove the space from the free pool
  1056. void LLPrivateMemoryPool::LLMemoryChunk::removeFromFreeSpace(LLMemoryBlock* blk)
  1057. {
  1058. U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
  1059. free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
  1060. if(mFreeSpaceList[free_idx] == blk)
  1061. {
  1062. mFreeSpaceList[free_idx] = blk->mNext ;
  1063. }
  1064. if(blk->mPrev)
  1065. {
  1066. blk->mPrev->mNext = blk->mNext ;
  1067. }
  1068. if(blk->mNext)
  1069. {
  1070. blk->mNext->mPrev = blk->mPrev ;
  1071. }
  1072. blk->mNext = NULL ;
  1073. blk->mPrev = NULL ;
  1074. blk->mSelf = NULL ;
  1075. return ;
  1076. }
  1077. void LLPrivateMemoryPool::LLMemoryChunk::addToAvailBlockList(LLMemoryBlock* blk)
  1078. {
  1079. llassert_always(!blk->mPrev) ;
  1080. llassert_always(!blk->mNext) ;
  1081. U32 blk_idx = getBlockLevel(blk->getSlotSize());
  1082. blk->mNext = mAvailBlockList[blk_idx] ;
  1083. if(blk->mNext)
  1084. {
  1085. blk->mNext->mPrev = blk ;
  1086. }
  1087. blk->mPrev = NULL ;
  1088. mAvailBlockList[blk_idx] = blk ;
  1089. return ;
  1090. }
  1091. U32 LLPrivateMemoryPool::LLMemoryChunk::getPageIndex(U32 addr)
  1092. {
  1093. return (addr - (U32)mDataBuffer) / mMinBlockSize ;
  1094. }
  1095. //for mAvailBlockList
  1096. U32 LLPrivateMemoryPool::LLMemoryChunk::getBlockLevel(U32 size)
  1097. {
  1098. llassert(size >= mMinSlotSize && size <= mMaxSlotSize) ;
  1099. //start from 0
  1100. return (size + mMinSlotSize - 1) / mMinSlotSize - 1 ;
  1101. }
  1102. //for mFreeSpaceList
  1103. U16 LLPrivateMemoryPool::LLMemoryChunk::getPageLevel(U32 size)
  1104. {
  1105. //start from 0
  1106. U16 level = size / mMinBlockSize - 1 ;
  1107. if(level >= mPartitionLevels)
  1108. {
  1109. level = mPartitionLevels - 1 ;
  1110. }
  1111. return level ;
  1112. }
  1113. //-------------------------------------------------------------------
  1114. //class LLPrivateMemoryPool
  1115. //--------------------------------------------------------------------
  1116. const U32 CHUNK_SIZE = 4 << 20 ; //4 MB
  1117. const U32 LARGE_CHUNK_SIZE = 4 * CHUNK_SIZE ; //16 MB
  1118. LLPrivateMemoryPool::LLPrivateMemoryPool(S32 type, U32 max_pool_size) :
  1119. mMutexp(NULL),
  1120. mReservedPoolSize(0),
  1121. mHashFactor(1),
  1122. mType(type),
  1123. mMaxPoolSize(max_pool_size)
  1124. {
  1125. if(type == STATIC_THREADED || type == VOLATILE_THREADED)
  1126. {
  1127. mMutexp = new LLMutex(NULL) ;
  1128. }
  1129. for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
  1130. {
  1131. mChunkList[i] = NULL ;
  1132. }
  1133. mNumOfChunks = 0 ;
  1134. }
  1135. LLPrivateMemoryPool::~LLPrivateMemoryPool()
  1136. {
  1137. destroyPool();
  1138. delete mMutexp ;
  1139. }
  1140. char* LLPrivateMemoryPool::allocate(U32 size)
  1141. {
  1142. if(!size)
  1143. {
  1144. return NULL ;
  1145. }
  1146. //if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it
  1147. if(size >= CHUNK_SIZE)
  1148. {
  1149. return (char*)malloc(size) ;
  1150. }
  1151. char* p = NULL ;
  1152. //find the appropriate chunk
  1153. S32 chunk_idx = getChunkIndex(size) ;
  1154. lock() ;
  1155. LLMemoryChunk* chunk = mChunkList[chunk_idx];
  1156. while(chunk)
  1157. {
  1158. if((p = chunk->allocate(size)))
  1159. {
  1160. break ;
  1161. }
  1162. chunk = chunk->mNext ;
  1163. }
  1164. //fetch new memory chunk
  1165. if(!p)
  1166. {
  1167. if(mReservedPoolSize + CHUNK_SIZE > mMaxPoolSize)
  1168. {
  1169. chunk = mChunkList[chunk_idx];
  1170. while(chunk)
  1171. {
  1172. if((p = chunk->allocate(size)))
  1173. {
  1174. break ;
  1175. }
  1176. chunk = chunk->mNext ;
  1177. }
  1178. }
  1179. else
  1180. {
  1181. chunk = addChunk(chunk_idx) ;
  1182. if(chunk)
  1183. {
  1184. p = chunk->allocate(size) ;
  1185. }
  1186. }
  1187. }
  1188. unlock() ;
  1189. if(!p) //to get memory from the private pool failed, try the heap directly
  1190. {
  1191. static bool to_log = true ;
  1192. if(to_log)
  1193. {
  1194. llwarns << "The memory pool overflows, now using heap directly!" << llendl ;
  1195. to_log = false ;
  1196. }
  1197. return (char*)malloc(size) ;
  1198. }
  1199. return p ;
  1200. }
  1201. void LLPrivateMemoryPool::freeMem(void* addr)
  1202. {
  1203. if(!addr)
  1204. {
  1205. return ;
  1206. }
  1207. lock() ;
  1208. LLMemoryChunk* chunk = findChunk((char*)addr) ;
  1209. if(!chunk)
  1210. {
  1211. free(addr) ; //release from heap
  1212. }
  1213. else
  1214. {
  1215. chunk->freeMem(addr) ;
  1216. if(chunk->empty())
  1217. {
  1218. removeChunk(chunk) ;
  1219. }
  1220. }
  1221. unlock() ;
  1222. }
  1223. void LLPrivateMemoryPool::dump()
  1224. {
  1225. }
  1226. U32 LLPrivateMemoryPool::getTotalAllocatedSize()
  1227. {
  1228. U32 total_allocated = 0 ;
  1229. LLMemoryChunk* chunk ;
  1230. for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
  1231. {
  1232. chunk = mChunkList[i];
  1233. while(chunk)
  1234. {
  1235. total_allocated += chunk->getAllocatedSize() ;
  1236. chunk = chunk->mNext ;
  1237. }
  1238. }
  1239. return total_allocated ;
  1240. }
  1241. void LLPrivateMemoryPool::lock()
  1242. {
  1243. if(mMutexp)
  1244. {
  1245. mMutexp->lock() ;
  1246. }
  1247. }
  1248. void LLPrivateMemoryPool::unlock()
  1249. {
  1250. if(mMutexp)
  1251. {
  1252. mMutexp->unlock() ;
  1253. }
  1254. }
  1255. S32 LLPrivateMemoryPool::getChunkIndex(U32 size)
  1256. {
  1257. S32 i ;
  1258. for(i = 0 ; size > MAX_SLOT_SIZES[i]; i++);
  1259. llassert_always(i < SUPER_ALLOCATION);
  1260. return i ;
  1261. }
  1262. //destroy the entire pool
  1263. void LLPrivateMemoryPool::destroyPool()
  1264. {
  1265. lock() ;
  1266. if(mNumOfChunks > 0)
  1267. {
  1268. llwarns << "There is some memory not freed when destroy the memory pool!" << llendl ;
  1269. }
  1270. mNumOfChunks = 0 ;
  1271. mChunkHashList.clear() ;
  1272. mHashFactor = 1 ;
  1273. for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
  1274. {
  1275. mChunkList[i] = NULL ;
  1276. }
  1277. unlock() ;
  1278. }
  1279. bool LLPrivateMemoryPool::checkSize(U32 asked_size)
  1280. {
  1281. if(mReservedPoolSize + asked_size > mMaxPoolSize)
  1282. {
  1283. llinfos << "Max pool size: " << mMaxPoolSize << llendl ;
  1284. llinfos << "Total reserved size: " << mReservedPoolSize + asked_size << llendl ;
  1285. llinfos << "Total_allocated Size: " << getTotalAllocatedSize() << llendl ;
  1286. //llerrs << "The pool is overflowing..." << llendl ;
  1287. return false ;
  1288. }
  1289. return true ;
  1290. }
  1291. LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index)
  1292. {
  1293. U32 preferred_size ;
  1294. U32 overhead ;
  1295. if(chunk_index < LARGE_ALLOCATION)
  1296. {
  1297. preferred_size = CHUNK_SIZE ; //4MB
  1298. overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],
  1299. MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
  1300. }
  1301. else
  1302. {
  1303. preferred_size = LARGE_CHUNK_SIZE ; //16MB
  1304. overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],
  1305. MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
  1306. }
  1307. if(!checkSize(preferred_size + overhead))
  1308. {
  1309. return NULL ;
  1310. }
  1311. mReservedPoolSize += preferred_size + overhead ;
  1312. char* buffer = (char*)malloc(preferred_size + overhead) ;
  1313. if(!buffer)
  1314. {
  1315. return NULL ;
  1316. }
  1317. LLMemoryChunk* chunk = new (buffer) LLMemoryChunk() ;
  1318. chunk->init(buffer, preferred_size + overhead, MIN_SLOT_SIZES[chunk_index],
  1319. MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
  1320. //add to the tail of the linked list
  1321. {
  1322. if(!mChunkList[chunk_index])
  1323. {
  1324. mChunkList[chunk_index] = chunk ;
  1325. }
  1326. else
  1327. {
  1328. LLMemoryChunk* cur = mChunkList[chunk_index] ;
  1329. while(cur->mNext)
  1330. {
  1331. cur = cur->mNext ;
  1332. }
  1333. cur->mNext = chunk ;
  1334. chunk->mPrev = cur ;
  1335. }
  1336. }
  1337. //insert into the hash table
  1338. addToHashTable(chunk) ;
  1339. mNumOfChunks++;
  1340. return chunk ;
  1341. }
  1342. void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk)
  1343. {
  1344. if(!chunk)
  1345. {
  1346. return ;
  1347. }
  1348. //remove from the linked list
  1349. for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
  1350. {
  1351. if(mChunkList[i] == chunk)
  1352. {
  1353. mChunkList[i] = chunk->mNext ;
  1354. }
  1355. }
  1356. if(chunk->mPrev)
  1357. {
  1358. chunk->mPrev->mNext = chunk->mNext ;
  1359. }
  1360. if(chunk->mNext)
  1361. {
  1362. chunk->mNext->mPrev = chunk->mPrev ;
  1363. }
  1364. //remove from the hash table
  1365. removeFromHashTable(chunk) ;
  1366. mNumOfChunks--;
  1367. mReservedPoolSize -= chunk->getBufferSize() ;
  1368. //release memory
  1369. free(chunk->getBuffer()) ;
  1370. }
  1371. U16 LLPrivateMemoryPool::findHashKey(const char* addr)
  1372. {
  1373. return (((U32)addr) / CHUNK_SIZE) % mHashFactor ;
  1374. }
  1375. LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::findChunk(const char* addr)
  1376. {
  1377. U16 key = findHashKey(addr) ;
  1378. if(mChunkHashList.size() <= key)
  1379. {
  1380. return NULL ;
  1381. }
  1382. return mChunkHashList[key].findChunk(addr) ;
  1383. }
  1384. void LLPrivateMemoryPool::addToHashTable(LLMemoryChunk* chunk)
  1385. {
  1386. static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 719, 997, 1523, 0xFFFF};
  1387. U16 i ;
  1388. if(mChunkHashList.empty())
  1389. {
  1390. mHashFactor = HASH_FACTORS[0] ;
  1391. rehash() ;
  1392. }
  1393. U16 start_key = findHashKey(chunk->getBuffer()) ;
  1394. U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
  1395. bool need_rehash = false ;
  1396. if(mChunkHashList[start_key].hasElement(chunk))
  1397. {
  1398. return; //already inserted.
  1399. }
  1400. need_rehash = mChunkHashList[start_key].add(chunk) ;
  1401. if(start_key == end_key && !need_rehash)
  1402. {
  1403. return ; //done
  1404. }
  1405. if(!need_rehash)
  1406. {
  1407. need_rehash = mChunkHashList[end_key].add(chunk) ;
  1408. }
  1409. if(!need_rehash)
  1410. {
  1411. if(end_key < start_key)
  1412. {
  1413. need_rehash = fillHashTable(start_key + 1, mHashFactor, chunk) ;
  1414. if(!need_rehash)
  1415. {
  1416. need_rehash = fillHashTable(0, end_key, chunk) ;
  1417. }
  1418. }
  1419. else
  1420. {
  1421. need_rehash = fillHashTable(start_key + 1, end_key, chunk) ;
  1422. }
  1423. }
  1424. if(need_rehash)
  1425. {
  1426. i = 0 ;
  1427. while(HASH_FACTORS[i] <= mHashFactor) i++;
  1428. mHashFactor = HASH_FACTORS[i] ;
  1429. llassert_always(mHashFactor != 0xFFFF) ;//stop point to prevent endlessly recursive calls
  1430. rehash() ;
  1431. }
  1432. }
  1433. void LLPrivateMemoryPool::removeFromHashTable(LLMemoryChunk* chunk)
  1434. {
  1435. U16 start_key = findHashKey(chunk->getBuffer()) ;
  1436. U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
  1437. mChunkHashList[start_key].remove(chunk) ;
  1438. if(start_key == end_key)
  1439. {
  1440. return ; //done
  1441. }
  1442. mChunkHashList[end_key].remove(chunk) ;
  1443. if(end_key < start_key)
  1444. {
  1445. for(U16 i = start_key + 1 ; i < mHashFactor; i++)
  1446. {
  1447. mChunkHashList[i].remove(chunk) ;
  1448. }
  1449. for(U16 i = 0 ; i < end_key; i++)
  1450. {
  1451. mChunkHashList[i].remove(chunk) ;
  1452. }
  1453. }
  1454. else
  1455. {
  1456. for(U16 i = start_key + 1 ; i < end_key; i++)
  1457. {
  1458. mChunkHashList[i].remove(chunk) ;
  1459. }
  1460. }
  1461. }
  1462. void LLPrivateMemoryPool::rehash()
  1463. {
  1464. llinfos << "new hash factor: " << mHashFactor << llendl ;
  1465. mChunkHashList.clear() ;
  1466. mChunkHashList.resize(mHashFactor) ;
  1467. LLMemoryChunk* chunk ;
  1468. for(U16 i = 0 ; i < SUPER_ALLOCATION ; i++)
  1469. {
  1470. chunk = mChunkList[i] ;
  1471. while(chunk)
  1472. {
  1473. addToHashTable(chunk) ;
  1474. chunk = chunk->mNext ;
  1475. }
  1476. }
  1477. }
  1478. bool LLPrivateMemoryPool::fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk)
  1479. {
  1480. for(U16 i = start; i < end; i++)
  1481. {
  1482. if(mChunkHashList[i].add(chunk))
  1483. {
  1484. return true ;
  1485. }
  1486. }
  1487. return false ;
  1488. }
  1489. //--------------------------------------------------------------------
  1490. // class LLChunkHashElement
  1491. //--------------------------------------------------------------------
  1492. LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::LLChunkHashElement::findChunk(const char* addr)
  1493. {
  1494. if(mFirst && mFirst->containsAddress(addr))
  1495. {
  1496. return mFirst ;
  1497. }
  1498. else if(mSecond && mSecond->containsAddress(addr))
  1499. {
  1500. return mSecond ;
  1501. }
  1502. return NULL ;
  1503. }
  1504. //return false if successfully inserted to the hash slot.
  1505. bool LLPrivateMemoryPool::LLChunkHashElement::add(LLPrivateMemoryPool::LLMemoryChunk* chunk)
  1506. {
  1507. llassert_always(!hasElement(chunk)) ;
  1508. if(!mFirst)
  1509. {
  1510. mFirst = chunk ;
  1511. }
  1512. else if(!mSecond)
  1513. {
  1514. mSecond = chunk ;
  1515. }
  1516. else
  1517. {
  1518. return true ; //failed
  1519. }
  1520. return false ;
  1521. }
  1522. void LLPrivateMemoryPool::LLChunkHashElement::remove(LLPrivateMemoryPool::LLMemoryChunk* chunk)
  1523. {
  1524. if(mFirst == chunk)
  1525. {
  1526. mFirst = NULL ;
  1527. }
  1528. else if(mSecond ==chunk)
  1529. {
  1530. mSecond = NULL ;
  1531. }
  1532. else
  1533. {
  1534. llerrs << "This slot does not contain this chunk!" << llendl ;
  1535. }
  1536. }
  1537. //--------------------------------------------------------------------
  1538. //class LLPrivateMemoryPoolManager
  1539. //--------------------------------------------------------------------
  1540. LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::sInstance = NULL ;
  1541. BOOL LLPrivateMemoryPoolManager::sPrivatePoolEnabled = FALSE ;
  1542. std::vector<LLPrivateMemoryPool*> LLPrivateMemoryPoolManager::sDanglingPoolList ;
  1543. LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager(BOOL enabled, U32 max_pool_size)
  1544. {
  1545. mPoolList.resize(LLPrivateMemoryPool::MAX_TYPES) ;
  1546. for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
  1547. {
  1548. mPoolList[i] = NULL ;
  1549. }
  1550. sPrivatePoolEnabled = enabled ;
  1551. const U32 MAX_POOL_SIZE = 256 * 1024 * 1024 ; //256 MB
  1552. mMaxPrivatePoolSize = llmax(max_pool_size, MAX_POOL_SIZE) ;
  1553. }
  1554. LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager()
  1555. {
  1556. #if __DEBUG_PRIVATE_MEM__
  1557. if(!sMemAllocationTracker.empty())
  1558. {
  1559. llwarns << "there is potential memory leaking here. The list of not freed memory blocks are from: " <<llendl ;
  1560. S32 k = 0 ;
  1561. for(mem_allocation_info_t::iterator iter = sMemAllocationTracker.begin() ; iter != sMemAllocationTracker.end() ; ++iter)
  1562. {
  1563. llinfos << k++ << ", " << (U32)iter->first << " : " << iter->second << llendl ;
  1564. }
  1565. sMemAllocationTracker.clear() ;
  1566. }
  1567. #endif
  1568. #if 0
  1569. //all private pools should be released by their owners before reaching here.
  1570. for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
  1571. {
  1572. llassert_always(!mPoolList[i]) ;
  1573. }
  1574. mPoolList.clear() ;
  1575. #else
  1576. //forcefully release all memory
  1577. for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
  1578. {
  1579. if(mPoolList[i])
  1580. {
  1581. if(mPoolList[i]->isEmpty())
  1582. {
  1583. delete mPoolList[i] ;
  1584. }
  1585. else
  1586. {
  1587. //can not delete this pool because it has alloacted memory to be freed.
  1588. //move it to the dangling list.
  1589. sDanglingPoolList.push_back(mPoolList[i]) ;
  1590. }
  1591. mPoolList[i] = NULL ;
  1592. }
  1593. }
  1594. mPoolList.clear() ;
  1595. #endif
  1596. }
  1597. //static
  1598. void LLPrivateMemoryPoolManager::initClass(BOOL enabled, U32 max_pool_size)
  1599. {
  1600. llassert_always(!sInstance) ;
  1601. sInstance = new LLPrivateMemoryPoolManager(enabled, max_pool_size) ;
  1602. }
  1603. //static
  1604. LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::getInstance()
  1605. {
  1606. //if(!sInstance)
  1607. //{
  1608. // sInstance = new LLPrivateMemoryPoolManager(FALSE) ;
  1609. //}
  1610. return sInstance ;
  1611. }
  1612. //static
  1613. void LLPrivateMemoryPoolManager::destroyClass()
  1614. {
  1615. if(sInstance)
  1616. {
  1617. delete sInstance ;
  1618. sInstance = NULL ;
  1619. }
  1620. }
  1621. LLPrivateMemoryPool* LLPrivateMemoryPoolManager::newPool(S32 type)
  1622. {
  1623. if(!sPrivatePoolEnabled)
  1624. {
  1625. return NULL ;
  1626. }
  1627. if(!mPoolList[type])
  1628. {
  1629. mPoolList[type] = new LLPrivateMemoryPool(type, mMaxPrivatePoolSize) ;
  1630. }
  1631. return mPoolList[type] ;
  1632. }
  1633. void LLPrivateMemoryPoolManager::deletePool(LLPrivateMemoryPool* pool)
  1634. {
  1635. if(pool && pool->isEmpty())
  1636. {
  1637. mPoolList[pool->getType()] = NULL ;
  1638. delete pool;
  1639. }
  1640. }
  1641. //debug
  1642. void LLPrivateMemoryPoolManager::updateStatistics()
  1643. {
  1644. mTotalReservedSize = 0 ;
  1645. mTotalAllocatedSize = 0 ;
  1646. for(U32 i = 0; i < mPoolList.size(); i++)
  1647. {
  1648. if(mPoolList[i])
  1649. {
  1650. mTotalReservedSize += mPoolList[i]->getTotalReservedSize() ;
  1651. mTotalAllocatedSize += mPoolList[i]->getTotalAllocatedSize() ;
  1652. }
  1653. }
  1654. }
  1655. #if __DEBUG_PRIVATE_MEM__
  1656. //static
  1657. char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line)
  1658. {
  1659. char* p ;
  1660. if(!poolp)
  1661. {
  1662. p = (char*)malloc(size) ;
  1663. }
  1664. else
  1665. {
  1666. p = poolp->allocate(size) ;
  1667. }
  1668. if(p)
  1669. {
  1670. char num[16] ;
  1671. sprintf(num, " line: %d ", line) ;
  1672. std::string str(function) ;
  1673. str += num;
  1674. sMemAllocationTracker[p] = str ;
  1675. }
  1676. return p ;
  1677. }
  1678. #else
  1679. //static
  1680. char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size)
  1681. {
  1682. if(poolp)
  1683. {
  1684. return poolp->allocate(size) ;
  1685. }
  1686. else
  1687. {
  1688. return (char*)malloc(size) ;
  1689. }
  1690. }
  1691. #endif
  1692. //static
  1693. void LLPrivateMemoryPoolManager::freeMem(LLPrivateMemoryPool* poolp, void* addr)
  1694. {
  1695. if(!addr)
  1696. {
  1697. return ;
  1698. }
  1699. #if __DEBUG_PRIVATE_MEM__
  1700. sMemAllocationTracker.erase((char*)addr) ;
  1701. #endif
  1702. if(poolp)
  1703. {
  1704. poolp->freeMem(addr) ;
  1705. }
  1706. else
  1707. {
  1708. if(!sPrivatePoolEnabled)
  1709. {
  1710. free(addr) ; //private pool is disabled.
  1711. }
  1712. else if(!sInstance) //the private memory manager is destroyed, try the dangling list
  1713. {
  1714. for(S32 i = 0 ; i < sDanglingPoolList.size(); i++)
  1715. {
  1716. if(sDanglingPoolList[i]->findChunk((char*)addr))
  1717. {
  1718. sDanglingPoolList[i]->freeMem(addr) ;
  1719. if(sDanglingPoolList[i]->isEmpty())
  1720. {
  1721. delete sDanglingPoolList[i] ;
  1722. if(i < sDanglingPoolList.size() - 1)
  1723. {
  1724. sDanglingPoolList[i] = sDanglingPoolList[sDanglingPoolList.size() - 1] ;
  1725. }
  1726. sDanglingPoolList.pop_back() ;
  1727. }
  1728. addr = NULL ;
  1729. break ;
  1730. }
  1731. }
  1732. llassert_always(!addr) ; //addr should be release before hitting here!
  1733. }
  1734. else
  1735. {
  1736. llerrs << "private pool is used before initialized.!" << llendl ;
  1737. }
  1738. }
  1739. }
  1740. //--------------------------------------------------------------------
  1741. //class LLPrivateMemoryPoolTester
  1742. //--------------------------------------------------------------------
  1743. #if 0
  1744. LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::sInstance = NULL ;
  1745. LLPrivateMemoryPool* LLPrivateMemoryPoolTester::sPool = NULL ;
  1746. LLPrivateMemoryPoolTester::LLPrivateMemoryPoolTester()
  1747. {
  1748. }
  1749. LLPrivateMemoryPoolTester::~LLPrivateMemoryPoolTester()
  1750. {
  1751. }
  1752. //static
  1753. LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::getInstance()
  1754. {
  1755. if(!sInstance)
  1756. {
  1757. sInstance = ::new LLPrivateMemoryPoolTester() ;
  1758. }
  1759. return sInstance ;
  1760. }
  1761. //static
  1762. void LLPrivateMemoryPoolTester::destroy()
  1763. {
  1764. if(sInstance)
  1765. {
  1766. ::delete sInstance ;
  1767. sInstance = NULL ;
  1768. }
  1769. if(sPool)
  1770. {
  1771. LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
  1772. sPool = NULL ;
  1773. }
  1774. }
  1775. void LLPrivateMemoryPoolTester::run(S32 type)
  1776. {
  1777. if(sPool)
  1778. {
  1779. LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
  1780. }
  1781. sPool = LLPrivateMemoryPoolManager::getInstance()->newPool(type) ;
  1782. //run the test
  1783. correctnessTest() ;
  1784. performanceTest() ;
  1785. //fragmentationtest() ;
  1786. //release pool.
  1787. LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
  1788. sPool = NULL ;
  1789. }
  1790. void LLPrivateMemoryPoolTester::test(U32 min_size, U32 max_size, U32 stride, U32 times,
  1791. bool random_deletion, bool output_statistics)
  1792. {
  1793. U32 levels = (max_size - min_size) / stride + 1 ;
  1794. char*** p ;
  1795. U32 i, j ;
  1796. U32 total_allocated_size = 0 ;
  1797. //allocate space for p ;
  1798. if(!(p = ::new char**[times]) || !(*p = ::new char*[times * levels]))
  1799. {
  1800. llerrs << "memory initialization for p failed" << llendl ;
  1801. }
  1802. //init
  1803. for(i = 0 ; i < times; i++)
  1804. {
  1805. p[i] = *p + i * levels ;
  1806. for(j = 0 ; j < levels; j++)
  1807. {
  1808. p[i][j] = NULL ;
  1809. }
  1810. }
  1811. //allocation
  1812. U32 size ;
  1813. for(i = 0 ; i < times ; i++)
  1814. {
  1815. for(j = 0 ; j < levels; j++)
  1816. {
  1817. size = min_size + j * stride ;
  1818. p[i][j] = ALLOCATE_MEM(sPool, size) ;
  1819. total_allocated_size+= size ;
  1820. *(U32*)p[i][j] = i ;
  1821. *((U32*)p[i][j] + 1) = j ;
  1822. //p[i][j][size - 1] = '\0' ; //access the last element to verify the success of the allocation.
  1823. //randomly release memory
  1824. if(random_deletion)
  1825. {
  1826. S32 k = rand() % levels ;
  1827. if(p[i][k])
  1828. {
  1829. llassert_always(*(U32*)p[i][k] == i && *((U32*)p[i][k] + 1) == k) ;
  1830. FREE_MEM(sPool, p[i][k]) ;
  1831. total_allocated_size -= min_size + k * stride ;
  1832. p[i][k] = NULL ;
  1833. }
  1834. }
  1835. }
  1836. }
  1837. //output pool allocation statistics
  1838. if(output_statistics)
  1839. {
  1840. }
  1841. //release all memory allocations
  1842. for(i = 0 ; i < times; i++)
  1843. {
  1844. for(j = 0 ; j < levels; j++)
  1845. {
  1846. if(p[i][j])
  1847. {
  1848. llassert_always(*(U32*)p[i][j] == i && *((U32*)p[i][j] + 1) == j) ;
  1849. FREE_MEM(sPool, p[i][j]) ;
  1850. total_allocated_size -= min_size + j * stride ;
  1851. p[i][j] = NULL ;
  1852. }
  1853. }
  1854. }
  1855. ::delete[] *p ;
  1856. ::delete[] p ;
  1857. }
  1858. void LLPrivateMemoryPoolTester::testAndTime(U32 size, U32 times)
  1859. {
  1860. LLTimer timer ;
  1861. llinfos << " -**********************- " << llendl ;
  1862. llinfos << "test size: " << size << " test times: " << times << llendl ;
  1863. timer.reset() ;
  1864. char** p = new char*[times] ;
  1865. //using the customized memory pool
  1866. //allocation
  1867. for(U32 i = 0 ; i < times; i++)
  1868. {
  1869. p[i] = ALLOCATE_MEM(sPool, size) ;
  1870. if(!p[i])
  1871. {
  1872. llerrs << "allocation failed" << llendl ;
  1873. }
  1874. }
  1875. //de-allocation
  1876. for(U32 i = 0 ; i < times; i++)
  1877. {
  1878. FREE_MEM(sPool, p[i]) ;
  1879. p[i] = NULL ;
  1880. }
  1881. llinfos << "time spent using customized memory pool: " << timer.getElapsedTimeF32() << llendl ;
  1882. timer.reset() ;
  1883. //using the standard allocator/de-allocator:
  1884. //allocation
  1885. for(U32 i = 0 ; i < times; i++)
  1886. {
  1887. p[i] = ::new char[size] ;
  1888. if(!p[i])
  1889. {
  1890. llerrs << "allocation failed" << llendl ;
  1891. }
  1892. }
  1893. //de-allocation
  1894. for(U32 i = 0 ; i < times; i++)
  1895. {
  1896. ::delete[] p[i] ;
  1897. p[i] = NULL ;
  1898. }
  1899. llinfos << "time spent using standard allocator/de-allocator: " << timer.getElapsedTimeF32() << llendl ;
  1900. delete[] p;
  1901. }
  1902. void LLPrivateMemoryPoolTester::correctnessTest()
  1903. {
  1904. //try many different sized allocation, and all kinds of edge cases, access the allocated memory
  1905. //to see if allocation is right.
  1906. //edge case
  1907. char* p = ALLOCATE_MEM(sPool, 0) ;
  1908. FREE_MEM(sPool, p) ;
  1909. //small sized
  1910. // [8 bytes, 2KB), each asks for 256 allocations and deallocations
  1911. test(8, 2040, 8, 256, true, true) ;
  1912. //medium sized
  1913. //[2KB, 512KB), each asks for 16 allocations and deallocations
  1914. test(2048, 512 * 1024 - 2048, 2048, 16, true, true) ;
  1915. //large sized
  1916. //[512KB, 4MB], each asks for 8 allocations and deallocations
  1917. test(512 * 1024, 4 * 1024 * 1024, 64 * 1024, 6, true, true) ;
  1918. }
  1919. void LLPrivateMemoryPoolTester::performanceTest()
  1920. {
  1921. U32 test_size[3] = {768, 3* 1024, 3* 1024 * 1024};
  1922. //small sized
  1923. testAndTime(test_size[0], 8) ;
  1924. //medium sized
  1925. testAndTime(test_size[1], 8) ;
  1926. //large sized
  1927. testAndTime(test_size[2], 8) ;
  1928. }
  1929. void LLPrivateMemoryPoolTester::fragmentationtest()
  1930. {
  1931. //for internal fragmentation statistics:
  1932. //every time when asking for a new chunk during correctness test, and performance test,
  1933. //print out the chunk usage statistices.
  1934. }
  1935. #endif
  1936. //--------------------------------------------------------------------