PageRenderTime 131ms CodeModel.GetById 14ms app.highlight 103ms RepoModel.GetById 1ms app.codeStats 0ms

/indra/llcommon/llmemory.cpp

https://bitbucket.org/lindenlab/viewer-beta/
C++ | 2286 lines | 1695 code | 347 blank | 244 comment | 248 complexity | c408966c7b52a3d7f44f756208b157d6 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/** 
   2 * @file llmemory.cpp
   3 * @brief Very special memory allocation/deallocation stuff here
   4 *
   5 * $LicenseInfo:firstyear=2002&license=viewerlgpl$
   6 * Second Life Viewer Source Code
   7 * Copyright (C) 2010, Linden Research, Inc.
   8 * 
   9 * This library is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU Lesser General Public
  11 * License as published by the Free Software Foundation;
  12 * version 2.1 of the License only.
  13 * 
  14 * This library is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  17 * Lesser General Public License for more details.
  18 * 
  19 * You should have received a copy of the GNU Lesser General Public
  20 * License along with this library; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
  22 * 
  23 * Linden Research, Inc., 945 Battery Street, San Francisco, CA  94111  USA
  24 * $/LicenseInfo$
  25 */
  26
  27#include "linden_common.h"
  28
  29
  30//#if MEM_TRACK_MEM
  31#include "llthread.h"
  32//#endif
  33
  34#if defined(LL_WINDOWS)
  35//# include <windows.h>
  36# include <psapi.h>
  37#elif defined(LL_DARWIN)
  38# include <sys/types.h>
  39# include <mach/task.h>
  40# include <mach/mach_init.h>
  41#elif LL_LINUX || LL_SOLARIS
  42# include <unistd.h>
  43#endif
  44
  45#include "llmemory.h"
  46
  47#include "llsys.h"
  48#include "llframetimer.h"
  49//----------------------------------------------------------------------------
  50
  51//static
  52char* LLMemory::reserveMem = 0;
  53U32 LLMemory::sAvailPhysicalMemInKB = U32_MAX ;
  54U32 LLMemory::sMaxPhysicalMemInKB = 0;
  55U32 LLMemory::sAllocatedMemInKB = 0;
  56U32 LLMemory::sAllocatedPageSizeInKB = 0 ;
  57U32 LLMemory::sMaxHeapSizeInKB = U32_MAX ;
  58BOOL LLMemory::sEnableMemoryFailurePrevention = FALSE;
  59
  60#if __DEBUG_PRIVATE_MEM__
  61LLPrivateMemoryPoolManager::mem_allocation_info_t LLPrivateMemoryPoolManager::sMemAllocationTracker;
  62#endif
  63
  64//static
  65void LLMemory::initClass()
  66{
  67	if (!reserveMem)
  68	{
  69		reserveMem = new char[16*1024]; // reserve 16K for out of memory error handling
  70	}
  71}
  72
  73//static
  74void LLMemory::cleanupClass()
  75{
  76	delete [] reserveMem;
  77	reserveMem = NULL;
  78}
  79
  80//static
  81void LLMemory::freeReserve()
  82{
  83	delete [] reserveMem;
  84	reserveMem = NULL;
  85}
  86
  87//static 
  88void LLMemory::initMaxHeapSizeGB(F32 max_heap_size_gb, BOOL prevent_heap_failure)
  89{
  90	sMaxHeapSizeInKB = (U32)(max_heap_size_gb * 1024 * 1024) ;
  91	sEnableMemoryFailurePrevention = prevent_heap_failure ;
  92}
  93
  94//static 
  95void LLMemory::updateMemoryInfo() 
  96{
  97#if LL_WINDOWS	
  98	HANDLE self = GetCurrentProcess();
  99	PROCESS_MEMORY_COUNTERS counters;
 100	
 101	if (!GetProcessMemoryInfo(self, &counters, sizeof(counters)))
 102	{
 103		llwarns << "GetProcessMemoryInfo failed" << llendl;
 104		return ;
 105	}
 106
 107	sAllocatedMemInKB = (U32)(counters.WorkingSetSize / 1024) ;
 108	sAllocatedPageSizeInKB = (U32)(counters.PagefileUsage / 1024) ;
 109
 110	U32 avail_phys, avail_virtual;
 111	LLMemoryInfo::getAvailableMemoryKB(avail_phys, avail_virtual) ;
 112	sMaxPhysicalMemInKB = llmin(avail_phys + sAllocatedMemInKB, sMaxHeapSizeInKB);
 113
 114	if(sMaxPhysicalMemInKB > sAllocatedMemInKB)
 115	{
 116		sAvailPhysicalMemInKB = sMaxPhysicalMemInKB - sAllocatedMemInKB ;
 117	}
 118	else
 119	{
 120		sAvailPhysicalMemInKB = 0 ;
 121	}
 122#else
 123	//not valid for other systems for now.
 124	sAllocatedMemInKB = (U32)(LLMemory::getCurrentRSS() / 1024) ;
 125	sMaxPhysicalMemInKB = U32_MAX ;
 126	sAvailPhysicalMemInKB = U32_MAX ;
 127#endif
 128
 129	return ;
 130}
 131
 132//
 133//this function is to test if there is enough space with the size in the virtual address space.
 134//it does not do any real allocation
 135//if success, it returns the address where the memory chunk can fit in;
 136//otherwise it returns NULL.
 137//
 138//static 
 139void* LLMemory::tryToAlloc(void* address, U32 size)
 140{
 141#if LL_WINDOWS
 142	address = VirtualAlloc(address, size, MEM_RESERVE | MEM_TOP_DOWN, PAGE_NOACCESS) ;
 143	if(address)
 144	{
 145		if(!VirtualFree(address, 0, MEM_RELEASE))
 146		{
 147			llerrs << "error happens when free some memory reservation." << llendl ;
 148		}
 149	}
 150	return address ;
 151#else
 152	return (void*)0x01 ; //skip checking
 153#endif	
 154}
 155
 156//static 
 157void LLMemory::logMemoryInfo(BOOL update)
 158{
 159	if(update)
 160	{
 161		updateMemoryInfo() ;
 162		LLPrivateMemoryPoolManager::getInstance()->updateStatistics() ;
 163	}
 164
 165	llinfos << "Current allocated physical memory(KB): " << sAllocatedMemInKB << llendl ;
 166	llinfos << "Current allocated page size (KB): " << sAllocatedPageSizeInKB << llendl ;
 167	llinfos << "Current availabe physical memory(KB): " << sAvailPhysicalMemInKB << llendl ;
 168	llinfos << "Current max usable memory(KB): " << sMaxPhysicalMemInKB << llendl ;
 169
 170	llinfos << "--- private pool information -- " << llendl ;
 171	llinfos << "Total reserved (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalReservedSize / 1024 << llendl ;
 172	llinfos << "Total allocated (KB): " << LLPrivateMemoryPoolManager::getInstance()->mTotalAllocatedSize / 1024 << llendl ;
 173}
 174
 175//return 0: everything is normal;
 176//return 1: the memory pool is low, but not in danger;
 177//return -1: the memory pool is in danger, is about to crash.
 178//static 
 179bool LLMemory::isMemoryPoolLow()
 180{
 181	static const U32 LOW_MEMEOY_POOL_THRESHOLD_KB = 64 * 1024 ; //64 MB for emergency use
 182	const static U32 MAX_SIZE_CHECKED_MEMORY_BLOCK = 64 * 1024 * 1024 ; //64 MB
 183	static void* last_reserved_address = NULL ;
 184
 185	if(!sEnableMemoryFailurePrevention)
 186	{
 187		return false ; //no memory failure prevention.
 188	}
 189
 190	if(sAvailPhysicalMemInKB < (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2)) //out of physical memory
 191	{
 192		return true ;
 193	}
 194
 195	if(sAllocatedPageSizeInKB + (LOW_MEMEOY_POOL_THRESHOLD_KB >> 2) > sMaxHeapSizeInKB) //out of virtual address space.
 196	{
 197		return true ;
 198	}
 199
 200	bool is_low = (S32)(sAvailPhysicalMemInKB < LOW_MEMEOY_POOL_THRESHOLD_KB || 
 201		sAllocatedPageSizeInKB + LOW_MEMEOY_POOL_THRESHOLD_KB > sMaxHeapSizeInKB) ;
 202
 203	//check the virtual address space fragmentation
 204	if(!is_low)
 205	{
 206		if(!last_reserved_address)
 207		{
 208			last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
 209		}
 210		else
 211		{
 212			last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
 213			if(!last_reserved_address) //failed, try once more
 214			{
 215				last_reserved_address = LLMemory::tryToAlloc(last_reserved_address, MAX_SIZE_CHECKED_MEMORY_BLOCK) ;
 216			}
 217		}
 218
 219		is_low = !last_reserved_address ; //allocation failed
 220	}
 221
 222	return is_low ;
 223}
 224
 225//static 
 226U32 LLMemory::getAvailableMemKB() 
 227{
 228	return sAvailPhysicalMemInKB ;
 229}
 230
 231//static 
 232U32 LLMemory::getMaxMemKB() 
 233{
 234	return sMaxPhysicalMemInKB ;
 235}
 236
 237//static 
 238U32 LLMemory::getAllocatedMemKB() 
 239{
 240	return sAllocatedMemInKB ;
 241}
 242
 243void* ll_allocate (size_t size)
 244{
 245	if (size == 0)
 246	{
 247		llwarns << "Null allocation" << llendl;
 248	}
 249	void *p = malloc(size);
 250	if (p == NULL)
 251	{
 252		LLMemory::freeReserve();
 253		llerrs << "Out of memory Error" << llendl;
 254	}
 255	return p;
 256}
 257
 258//----------------------------------------------------------------------------
 259
 260#if defined(LL_WINDOWS)
 261
 262U64 LLMemory::getCurrentRSS()
 263{
 264	HANDLE self = GetCurrentProcess();
 265	PROCESS_MEMORY_COUNTERS counters;
 266	
 267	if (!GetProcessMemoryInfo(self, &counters, sizeof(counters)))
 268	{
 269		llwarns << "GetProcessMemoryInfo failed" << llendl;
 270		return 0;
 271	}
 272
 273	return counters.WorkingSetSize;
 274}
 275
 276//static 
 277U32 LLMemory::getWorkingSetSize()
 278{
 279    PROCESS_MEMORY_COUNTERS pmc ;
 280	U32 ret = 0 ;
 281
 282    if (GetProcessMemoryInfo( GetCurrentProcess(), &pmc, sizeof(pmc)) )
 283	{
 284		ret = pmc.WorkingSetSize ;
 285	}
 286
 287	return ret ;
 288}
 289
 290#elif defined(LL_DARWIN)
 291
 292/* 
 293	The API used here is not capable of dealing with 64-bit memory sizes, but is available before 10.4.
 294	
 295	Once we start requiring 10.4, we can use the updated API, which looks like this:
 296	
 297	task_basic_info_64_data_t basicInfo;
 298	mach_msg_type_number_t  basicInfoCount = TASK_BASIC_INFO_64_COUNT;
 299	if (task_info(mach_task_self(), TASK_BASIC_INFO_64, (task_info_t)&basicInfo, &basicInfoCount) == KERN_SUCCESS)
 300	
 301	Of course, this doesn't gain us anything unless we start building the viewer as a 64-bit executable, since that's the only way
 302	for our memory allocation to exceed 2^32.
 303*/
 304
 305// 	if (sysctl(ctl, 2, &page_size, &size, NULL, 0) == -1)
 306// 	{
 307// 		llwarns << "Couldn't get page size" << llendl;
 308// 		return 0;
 309// 	} else {
 310// 		return page_size;
 311// 	}
 312// }
 313
 314U64 LLMemory::getCurrentRSS()
 315{
 316	U64 residentSize = 0;
 317	task_basic_info_data_t basicInfo;
 318	mach_msg_type_number_t  basicInfoCount = TASK_BASIC_INFO_COUNT;
 319	if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&basicInfo, &basicInfoCount) == KERN_SUCCESS)
 320	{
 321		residentSize = basicInfo.resident_size;
 322
 323		// If we ever wanted it, the process virtual size is also available as:
 324		// virtualSize = basicInfo.virtual_size;
 325		
 326//		llinfos << "resident size is " << residentSize << llendl;
 327	}
 328	else
 329	{
 330		llwarns << "task_info failed" << llendl;
 331	}
 332
 333	return residentSize;
 334}
 335
 336U32 LLMemory::getWorkingSetSize()
 337{
 338	return 0 ;
 339}
 340
 341#elif defined(LL_LINUX)
 342
 343U64 LLMemory::getCurrentRSS()
 344{
 345	static const char statPath[] = "/proc/self/stat";
 346	LLFILE *fp = LLFile::fopen(statPath, "r");
 347	U64 rss = 0;
 348
 349	if (fp == NULL)
 350	{
 351		llwarns << "couldn't open " << statPath << llendl;
 352		goto bail;
 353	}
 354
 355	// Eee-yew!	 See Documentation/filesystems/proc.txt in your
 356	// nearest friendly kernel tree for details.
 357	
 358	{
 359		int ret = fscanf(fp, "%*d (%*[^)]) %*c %*d %*d %*d %*d %*d %*d %*d "
 360						 "%*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %Lu",
 361						 &rss);
 362		if (ret != 1)
 363		{
 364			llwarns << "couldn't parse contents of " << statPath << llendl;
 365			rss = 0;
 366		}
 367	}
 368	
 369	fclose(fp);
 370
 371bail:
 372	return rss;
 373}
 374
 375U32 LLMemory::getWorkingSetSize()
 376{
 377	return 0 ;
 378}
 379
 380#elif LL_SOLARIS
 381#include <sys/types.h>
 382#include <sys/stat.h>
 383#include <fcntl.h>
 384#define _STRUCTURED_PROC 1
 385#include <sys/procfs.h>
 386
 387U64 LLMemory::getCurrentRSS()
 388{
 389	char path [LL_MAX_PATH];	/* Flawfinder: ignore */ 
 390
 391	sprintf(path, "/proc/%d/psinfo", (int)getpid());
 392	int proc_fd = -1;
 393	if((proc_fd = open(path, O_RDONLY)) == -1){
 394		llwarns << "LLmemory::getCurrentRSS() unable to open " << path << ". Returning 0 RSS!" << llendl;
 395		return 0;
 396	}
 397	psinfo_t proc_psinfo;
 398	if(read(proc_fd, &proc_psinfo, sizeof(psinfo_t)) != sizeof(psinfo_t)){
 399		llwarns << "LLmemory::getCurrentRSS() Unable to read from " << path << ". Returning 0 RSS!" << llendl;
 400		close(proc_fd);
 401		return 0;
 402	}
 403
 404	close(proc_fd);
 405
 406	return((U64)proc_psinfo.pr_rssize * 1024);
 407}
 408
 409U32 LLMemory::getWorkingSetSize()
 410{
 411	return 0 ;
 412}
 413
 414#else
 415
 416U64 LLMemory::getCurrentRSS()
 417{
 418	return 0;
 419}
 420
 421U32 LLMemory::getWorkingSetSize()
 422{
 423	return 0;
 424}
 425
 426#endif
 427
 428//--------------------------------------------------------------------------------------------------
 429#if MEM_TRACK_MEM
 430#include "llframetimer.h"
 431
 432//static 
 433LLMemTracker* LLMemTracker::sInstance = NULL ;
 434
 435LLMemTracker::LLMemTracker()
 436{
 437	mLastAllocatedMem = LLMemory::getWorkingSetSize() ;
 438	mCapacity = 128 ;	
 439	mCurIndex = 0 ;
 440	mCounter = 0 ;
 441	mDrawnIndex = 0 ;
 442	mPaused = FALSE ;
 443
 444	mMutexp = new LLMutex() ;
 445	mStringBuffer = new char*[128] ;
 446	mStringBuffer[0] = new char[mCapacity * 128] ;
 447	for(S32 i = 1 ; i < mCapacity ; i++)
 448	{
 449		mStringBuffer[i] = mStringBuffer[i-1] + 128 ;
 450	}
 451}
 452
 453LLMemTracker::~LLMemTracker()
 454{
 455	delete[] mStringBuffer[0] ;
 456	delete[] mStringBuffer;
 457	delete mMutexp ;
 458}
 459
 460//static 
 461LLMemTracker* LLMemTracker::getInstance()
 462{
 463	if(!sInstance)
 464	{
 465		sInstance = new LLMemTracker() ;
 466	}
 467	return sInstance ;
 468}
 469
 470//static 
 471void LLMemTracker::release() 
 472{
 473	if(sInstance)
 474	{
 475		delete sInstance ;
 476		sInstance = NULL ;
 477	}
 478}
 479
 480//static
 481void LLMemTracker::track(const char* function, const int line)
 482{
 483	static const S32 MIN_ALLOCATION = 0 ; //1KB
 484
 485	if(mPaused)
 486	{
 487		return ;
 488	}
 489
 490	U32 allocated_mem = LLMemory::getWorkingSetSize() ;
 491
 492	LLMutexLock lock(mMutexp) ;
 493
 494	S32 delta_mem = allocated_mem - mLastAllocatedMem ;
 495	mLastAllocatedMem = allocated_mem ;
 496
 497	if(delta_mem <= 0)
 498	{
 499		return ; //occupied memory does not grow
 500	}
 501
 502	if(delta_mem < MIN_ALLOCATION)
 503	{
 504		return ;
 505	}
 506		
 507	char* buffer = mStringBuffer[mCurIndex++] ;
 508	F32 time = (F32)LLFrameTimer::getElapsedSeconds() ;
 509	S32 hours = (S32)(time / (60*60));
 510	S32 mins = (S32)((time - hours*(60*60)) / 60);
 511	S32 secs = (S32)((time - hours*(60*60) - mins*60));
 512	strcpy(buffer, function) ;
 513	sprintf(buffer + strlen(function), " line: %d DeltaMem: %d (bytes) Time: %d:%02d:%02d", line, delta_mem, hours,mins,secs) ;
 514
 515	if(mCounter < mCapacity)
 516	{
 517		mCounter++ ;
 518	}
 519	if(mCurIndex >= mCapacity)
 520	{
 521		mCurIndex = 0 ;		
 522	}
 523}
 524
 525
 526//static 
 527void LLMemTracker::preDraw(BOOL pause) 
 528{
 529	mMutexp->lock() ;
 530
 531	mPaused = pause ;
 532	mDrawnIndex = mCurIndex - 1;
 533	mNumOfDrawn = 0 ;
 534}
 535	
 536//static 
 537void LLMemTracker::postDraw() 
 538{
 539	mMutexp->unlock() ;
 540}
 541
 542//static 
 543const char* LLMemTracker::getNextLine() 
 544{
 545	if(mNumOfDrawn >= mCounter)
 546	{
 547		return NULL ;
 548	}
 549	mNumOfDrawn++;
 550
 551	if(mDrawnIndex < 0)
 552	{
 553		mDrawnIndex = mCapacity - 1 ;
 554	}
 555
 556	return mStringBuffer[mDrawnIndex--] ;
 557}
 558
 559#endif //MEM_TRACK_MEM
 560//--------------------------------------------------------------------------------------------------
 561
 562
 563//--------------------------------------------------------------------------------------------------
 564//--------------------------------------------------------------------------------------------------
 565//minimum slot size and minimal slot size interval
 566const U32 ATOMIC_MEM_SLOT = 16 ; //bytes
 567
 568//minimum block sizes (page size) for small allocation, medium allocation, large allocation 
 569const U32 MIN_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {2 << 10, 4 << 10, 16 << 10} ; //
 570
 571//maximum block sizes for small allocation, medium allocation, large allocation 
 572const U32 MAX_BLOCK_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION] = {64 << 10, 1 << 20, 4 << 20} ;
 573
 574//minimum slot sizes for small allocation, medium allocation, large allocation 
 575const U32 MIN_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION]  = {ATOMIC_MEM_SLOT, 2 << 10, 512 << 10};
 576
 577//maximum slot sizes for small allocation, medium allocation, large allocation 
 578const U32 MAX_SLOT_SIZES[LLPrivateMemoryPool::SUPER_ALLOCATION]  = {(2 << 10) - ATOMIC_MEM_SLOT, (512 - 2) << 10, 4 << 20};
 579
 580//size of a block with multiple slots can not exceed CUT_OFF_SIZE
 581const U32 CUT_OFF_SIZE = (64 << 10) ; //64 KB
 582
 583//max number of slots in a block
 584const U32 MAX_NUM_SLOTS_IN_A_BLOCK = llmin(MIN_BLOCK_SIZES[0] / ATOMIC_MEM_SLOT, ATOMIC_MEM_SLOT * 8) ;
 585
 586//-------------------------------------------------------------
 587//align val to be integer times of ATOMIC_MEM_SLOT
 588U32 align(U32 val)
 589{
 590	U32 aligned = (val / ATOMIC_MEM_SLOT) * ATOMIC_MEM_SLOT ;
 591	if(aligned < val)
 592	{
 593		aligned += ATOMIC_MEM_SLOT ;
 594	}
 595
 596	return aligned ;
 597}
 598
 599//-------------------------------------------------------------
 600//class LLPrivateMemoryPool::LLMemoryBlock
 601//-------------------------------------------------------------
 602//
 603//each memory block could fit for two page sizes: 0.75 * mSlotSize, which starts from the beginning of the memory chunk and grow towards the end of the
 604//the block; another is mSlotSize, which starts from the end of the block and grows towards the beginning of the block.
 605//
 606LLPrivateMemoryPool::LLMemoryBlock::LLMemoryBlock()
 607{
 608	//empty
 609}
 610		
 611LLPrivateMemoryPool::LLMemoryBlock::~LLMemoryBlock() 
 612{
 613	//empty
 614}
 615
 616//create and initialize a memory block
 617void LLPrivateMemoryPool::LLMemoryBlock::init(char* buffer, U32 buffer_size, U32 slot_size)
 618{
 619	mBuffer = buffer ;
 620	mBufferSize = buffer_size ;
 621	mSlotSize = slot_size ;
 622	mTotalSlots = buffer_size / mSlotSize ;	
 623	
 624	llassert_always(buffer_size / mSlotSize <= MAX_NUM_SLOTS_IN_A_BLOCK) ; //max number is 128
 625	
 626	mAllocatedSlots = 0 ;
 627	mDummySize = 0 ;
 628
 629	//init the bit map.
 630	//mark free bits	
 631	if(mTotalSlots > 32) //reserve extra space from mBuffer to store bitmap if needed.
 632	{
 633		mDummySize = ATOMIC_MEM_SLOT ;		
 634		mTotalSlots -= (mDummySize + mSlotSize - 1) / mSlotSize ;
 635		mUsageBits = 0 ;
 636
 637		S32 usage_bit_len = (mTotalSlots + 31) / 32 ;
 638		
 639		for(S32 i = 0 ; i < usage_bit_len - 1 ; i++)
 640		{
 641			*((U32*)mBuffer + i) = 0 ;
 642		}
 643		for(S32 i = usage_bit_len - 1 ; i < mDummySize / sizeof(U32) ; i++)
 644		{
 645			*((U32*)mBuffer + i) = 0xffffffff ;
 646		}
 647
 648		if(mTotalSlots & 31)
 649		{
 650			*((U32*)mBuffer + usage_bit_len - 2) = (0xffffffff << (mTotalSlots & 31)) ;
 651		}		
 652	}	
 653	else//no extra bitmap space reserved
 654	{
 655		mUsageBits = 0 ;
 656		if(mTotalSlots & 31)
 657		{
 658			mUsageBits = (0xffffffff << (mTotalSlots & 31)) ;
 659		}
 660	}
 661
 662	mSelf = this ;
 663	mNext = NULL ;
 664	mPrev = NULL ;
 665
 666	llassert_always(mTotalSlots > 0) ;
 667}
 668
 669//mark this block to be free with the memory [mBuffer, mBuffer + mBufferSize).
 670void LLPrivateMemoryPool::LLMemoryBlock::setBuffer(char* buffer, U32 buffer_size)
 671{
 672	mBuffer = buffer ;
 673	mBufferSize = buffer_size ;
 674	mSelf = NULL ;
 675	mTotalSlots = 0 ; //set the block is free.
 676}
 677
 678//reserve a slot
 679char* LLPrivateMemoryPool::LLMemoryBlock::allocate() 
 680{
 681	llassert_always(mAllocatedSlots < mTotalSlots) ;
 682	
 683	//find a free slot
 684	U32* bits = NULL ;
 685	U32  k = 0 ;
 686	if(mUsageBits != 0xffffffff)
 687	{
 688		bits = &mUsageBits ;
 689	}
 690	else if(mDummySize > 0)//go to extra space
 691	{		
 692		for(S32 i = 0 ; i < mDummySize / sizeof(U32); i++)
 693		{
 694			if(*((U32*)mBuffer + i) != 0xffffffff)
 695			{
 696				bits = (U32*)mBuffer + i ;
 697				k = i + 1 ;
 698				break ;
 699			}
 700		}
 701	}	
 702	S32 idx = 0 ;
 703	U32 tmp = *bits ;
 704	for(; tmp & 1 ; tmp >>= 1, idx++) ;
 705
 706	//set the slot reserved
 707	if(!idx)
 708	{
 709		*bits |= 1 ;
 710	}
 711	else
 712	{
 713		*bits |= (1 << idx) ;
 714	}
 715
 716	mAllocatedSlots++ ;
 717	
 718	return mBuffer + mDummySize + (k * 32 + idx) * mSlotSize ;
 719}
 720
 721//free a slot
 722void  LLPrivateMemoryPool::LLMemoryBlock::freeMem(void* addr) 
 723{
 724	//bit index
 725	U32 idx = ((U32)addr - (U32)mBuffer - mDummySize) / mSlotSize ;
 726
 727	U32* bits = &mUsageBits ;
 728	if(idx >= 32)
 729	{
 730		bits = (U32*)mBuffer + (idx - 32) / 32 ;
 731	}
 732
 733	//reset the bit
 734	if(idx & 31)
 735	{
 736		*bits &= ~(1 << (idx & 31)) ;
 737	}
 738	else
 739	{
 740		*bits &= ~1 ;
 741	}
 742
 743	mAllocatedSlots-- ;
 744}
 745
 746//for debug use: reset the entire bitmap.
 747void  LLPrivateMemoryPool::LLMemoryBlock::resetBitMap()
 748{
 749	for(S32 i = 0 ; i < mDummySize / sizeof(U32) ; i++)
 750	{
 751		*((U32*)mBuffer + i) = 0 ;
 752	}
 753	mUsageBits = 0 ;
 754}
 755//-------------------------------------------------------------------
 756//class LLMemoryChunk
 757//--------------------------------------------------------------------
 758LLPrivateMemoryPool::LLMemoryChunk::LLMemoryChunk()
 759{
 760	//empty
 761}
 762
 763LLPrivateMemoryPool::LLMemoryChunk::~LLMemoryChunk()
 764{
 765	//empty
 766}
 767
 768//create and init a memory chunk
 769void LLPrivateMemoryPool::LLMemoryChunk::init(char* buffer, U32 buffer_size, U32 min_slot_size, U32 max_slot_size, U32 min_block_size, U32 max_block_size) 
 770{
 771	mBuffer = buffer ;
 772	mBufferSize = buffer_size ;
 773	mAlloatedSize = 0 ;
 774
 775	mMetaBuffer = mBuffer + sizeof(LLMemoryChunk) ;
 776
 777	mMinBlockSize = min_block_size; //page size
 778	mMinSlotSize = min_slot_size;
 779	mMaxSlotSize = max_slot_size ;
 780	mBlockLevels = mMaxSlotSize / mMinSlotSize ;
 781	mPartitionLevels = max_block_size / mMinBlockSize + 1 ;
 782
 783	S32 max_num_blocks = (buffer_size - sizeof(LLMemoryChunk) - mBlockLevels * sizeof(LLMemoryBlock*) - mPartitionLevels * sizeof(LLMemoryBlock*)) / 
 784		                 (mMinBlockSize + sizeof(LLMemoryBlock)) ;
 785	//meta data space
 786	mBlocks = (LLMemoryBlock*)mMetaBuffer ; //space reserved for all memory blocks.
 787	mAvailBlockList = (LLMemoryBlock**)((char*)mBlocks + sizeof(LLMemoryBlock) * max_num_blocks) ; 
 788	mFreeSpaceList = (LLMemoryBlock**)((char*)mAvailBlockList + sizeof(LLMemoryBlock*) * mBlockLevels) ; 
 789	
 790	//data buffer, which can be used for allocation
 791	mDataBuffer = (char*)mFreeSpaceList + sizeof(LLMemoryBlock*) * mPartitionLevels ;
 792	
 793	//alignmnet
 794	mDataBuffer = mBuffer + align(mDataBuffer - mBuffer) ;
 795	
 796	//init
 797	for(U32 i = 0 ; i < mBlockLevels; i++)
 798	{
 799		mAvailBlockList[i] = NULL ;
 800	}
 801	for(U32 i = 0 ; i < mPartitionLevels ; i++)
 802	{
 803		mFreeSpaceList[i] = NULL ;
 804	}
 805
 806	//assign the entire chunk to the first block
 807	mBlocks[0].mPrev = NULL ;
 808	mBlocks[0].mNext = NULL ;
 809	mBlocks[0].setBuffer(mDataBuffer, buffer_size - (mDataBuffer - mBuffer)) ;
 810	addToFreeSpace(&mBlocks[0]) ;
 811
 812	mNext = NULL ;
 813	mPrev = NULL ;
 814}
 815
 816//static 
 817U32 LLPrivateMemoryPool::LLMemoryChunk::getMaxOverhead(U32 data_buffer_size, U32 min_slot_size, 
 818													   U32 max_slot_size, U32 min_block_size, U32 max_block_size)
 819{
 820	//for large allocations, reserve some extra memory for meta data to avoid wasting much 
 821	if(data_buffer_size / min_slot_size < 64) //large allocations
 822	{
 823		U32 overhead = sizeof(LLMemoryChunk) + (data_buffer_size / min_block_size) * sizeof(LLMemoryBlock) +
 824			sizeof(LLMemoryBlock*) * (max_slot_size / min_slot_size) + sizeof(LLMemoryBlock*) * (max_block_size / min_block_size + 1) ;
 825
 826		//round to integer times of min_block_size
 827		overhead = ((overhead + min_block_size - 1) / min_block_size) * min_block_size ;
 828		return overhead ;
 829	}
 830	else
 831	{
 832		return 0 ; //do not reserve extra overhead if for small allocations
 833	}
 834}
 835
 836char* LLPrivateMemoryPool::LLMemoryChunk::allocate(U32 size)
 837{
 838	if(mMinSlotSize > size)
 839	{
 840		size = mMinSlotSize ;
 841	}
 842	if(mAlloatedSize + size  > mBufferSize - (mDataBuffer - mBuffer))
 843	{
 844		return NULL ; //no enough space in this chunk.
 845	}
 846
 847	char* p = NULL ;
 848	U32 blk_idx = getBlockLevel(size);
 849
 850	LLMemoryBlock* blk = NULL ;
 851
 852	//check if there is free block available
 853	if(mAvailBlockList[blk_idx])
 854	{
 855		blk = mAvailBlockList[blk_idx] ;
 856		p = blk->allocate() ;
 857		
 858		if(blk->isFull())
 859		{
 860			popAvailBlockList(blk_idx) ;
 861		}
 862	}
 863
 864	//ask for a new block
 865	if(!p)
 866	{
 867		blk = addBlock(blk_idx) ;
 868		if(blk)
 869		{
 870			p = blk->allocate() ;
 871
 872			if(blk->isFull())
 873			{
 874				popAvailBlockList(blk_idx) ;
 875			}
 876		}
 877	}
 878
 879	//ask for space from larger blocks
 880	if(!p)
 881	{
 882		for(S32 i = blk_idx + 1 ; i < mBlockLevels; i++)
 883		{
 884			if(mAvailBlockList[i])
 885			{
 886				blk = mAvailBlockList[i] ;
 887				p = blk->allocate() ;
 888
 889				if(blk->isFull())
 890				{
 891					popAvailBlockList(i) ;
 892				}
 893				break ;
 894			}
 895		}
 896	}
 897
 898	if(p && blk)
 899	{		
 900		mAlloatedSize += blk->getSlotSize() ;
 901	}
 902	return p ;
 903}
 904
 905void LLPrivateMemoryPool::LLMemoryChunk::freeMem(void* addr)
 906{	
 907	U32 blk_idx = getPageIndex((U32)addr) ;
 908	LLMemoryBlock* blk = (LLMemoryBlock*)(mMetaBuffer + blk_idx * sizeof(LLMemoryBlock)) ;
 909	blk = blk->mSelf ;
 910
 911	bool was_full = blk->isFull() ;
 912	blk->freeMem(addr) ;
 913	mAlloatedSize -= blk->getSlotSize() ;
 914
 915	if(blk->empty())
 916	{
 917		removeBlock(blk) ;
 918	}
 919	else if(was_full)
 920	{
 921		addToAvailBlockList(blk) ;
 922	}	
 923}
 924
 925bool LLPrivateMemoryPool::LLMemoryChunk::empty()
 926{
 927	return !mAlloatedSize ;
 928}
 929
 930bool LLPrivateMemoryPool::LLMemoryChunk::containsAddress(const char* addr) const
 931{
 932	return (U32)mBuffer <= (U32)addr && (U32)mBuffer + mBufferSize > (U32)addr ;
 933}
 934
 935//debug use
 936void LLPrivateMemoryPool::LLMemoryChunk::dump()
 937{
 938#if 0
 939	//sanity check
 940	//for(S32 i = 0 ; i < mBlockLevels ; i++)
 941	//{
 942	//	LLMemoryBlock* blk = mAvailBlockList[i] ;
 943	//	while(blk)
 944	//	{
 945	//		blk_list.push_back(blk) ;
 946	//		blk = blk->mNext ;
 947	//	}
 948	//}
 949	for(S32 i = 0 ; i < mPartitionLevels ; i++)
 950	{
 951		LLMemoryBlock* blk = mFreeSpaceList[i] ;
 952		while(blk)
 953		{
 954			blk_list.push_back(blk) ;
 955			blk = blk->mNext ;
 956		}
 957	}
 958
 959	std::sort(blk_list.begin(), blk_list.end(), LLMemoryBlock::CompareAddress());
 960
 961	U32 total_size = blk_list[0]->getBufferSize() ;
 962	for(U32 i = 1 ; i < blk_list.size(); i++)
 963	{
 964		total_size += blk_list[i]->getBufferSize() ;
 965		if((U32)blk_list[i]->getBuffer() < (U32)blk_list[i-1]->getBuffer() + blk_list[i-1]->getBufferSize())
 966		{
 967			llerrs << "buffer corrupted." << llendl ;
 968		}
 969	}
 970
 971	llassert_always(total_size + mMinBlockSize >= mBufferSize - ((U32)mDataBuffer - (U32)mBuffer)) ;
 972
 973	U32 blk_num = (mBufferSize - (mDataBuffer - mBuffer)) / mMinBlockSize ;
 974	for(U32 i = 0 ; i < blk_num ; )
 975	{
 976		LLMemoryBlock* blk = &mBlocks[i] ;
 977		if(blk->mSelf)
 978		{
 979			U32 end = blk->getBufferSize() / mMinBlockSize ;
 980			for(U32 j = 0 ; j < end ; j++)
 981			{
 982				llassert_always(blk->mSelf == blk || !blk->mSelf) ;
 983			}
 984			i += end ;
 985		}
 986		else
 987		{
 988			llerrs << "gap happens" << llendl ;
 989		}
 990	}
 991#endif
 992#if 0
 993	llinfos << "---------------------------" << llendl ;
 994	llinfos << "Chunk buffer: " << (U32)getBuffer() << " size: " << getBufferSize() << llendl ;
 995
 996	llinfos << "available blocks ... " << llendl ;
 997	for(S32 i = 0 ; i < mBlockLevels ; i++)
 998	{
 999		LLMemoryBlock* blk = mAvailBlockList[i] ;
1000		while(blk)
1001		{
1002			llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ;
1003			blk = blk->mNext ;
1004		}
1005	}
1006
1007	llinfos << "free blocks ... " << llendl ;
1008	for(S32 i = 0 ; i < mPartitionLevels ; i++)
1009	{
1010		LLMemoryBlock* blk = mFreeSpaceList[i] ;
1011		while(blk)
1012		{
1013			llinfos << "blk buffer " << (U32)blk->getBuffer() << " size: " << blk->getBufferSize() << llendl ;
1014			blk = blk->mNext ;
1015		}
1016	}
1017#endif
1018}
1019
1020//compute the size for a block, the size is round to integer times of mMinBlockSize.
1021U32 LLPrivateMemoryPool::LLMemoryChunk::calcBlockSize(U32 slot_size)
1022{
1023	//
1024	//Note: we try to make a block to have 32 slots if the size is not over 32 pages
1025	//32 is the number of bits of an integer in a 32-bit system
1026	//
1027
1028	U32 block_size;
1029	U32 cut_off_size = llmin(CUT_OFF_SIZE, (U32)(mMinBlockSize << 5)) ;
1030
1031	if((slot_size << 5) <= mMinBlockSize)//for small allocations, return one page 
1032	{
1033		block_size = mMinBlockSize ;
1034	}
1035	else if(slot_size >= cut_off_size)//for large allocations, return one-slot block
1036	{
1037		block_size = (slot_size / mMinBlockSize) * mMinBlockSize ;
1038		if(block_size < slot_size)
1039		{
1040			block_size += mMinBlockSize ;
1041		}
1042	}
1043	else //medium allocations
1044	{
1045		if((slot_size << 5) >= cut_off_size)
1046		{
1047			block_size = cut_off_size ;
1048		}
1049		else
1050		{
1051			block_size = ((slot_size << 5) / mMinBlockSize) * mMinBlockSize ;
1052		}
1053	}
1054
1055	llassert_always(block_size >= slot_size) ;
1056
1057	return block_size ;
1058}
1059
1060//create a new block in the chunk
1061LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::addBlock(U32 blk_idx)
1062{	
1063	U32 slot_size = mMinSlotSize * (blk_idx + 1) ;
1064	U32 preferred_block_size = calcBlockSize(slot_size) ;	
1065	U16 idx = getPageLevel(preferred_block_size); 
1066	LLMemoryBlock* blk = NULL ;
1067	
1068	if(mFreeSpaceList[idx])//if there is free slot for blk_idx
1069	{
1070		blk = createNewBlock(mFreeSpaceList[idx], preferred_block_size, slot_size, blk_idx) ;
1071	}
1072	else if(mFreeSpaceList[mPartitionLevels - 1]) //search free pool
1073	{		
1074		blk = createNewBlock(mFreeSpaceList[mPartitionLevels - 1], preferred_block_size, slot_size, blk_idx) ;
1075	}
1076	else //search for other non-preferred but enough space slot.
1077	{
1078		S32 min_idx = 0 ;
1079		if(slot_size > mMinBlockSize)
1080		{
1081			min_idx = getPageLevel(slot_size) ;
1082		}
1083		for(S32 i = (S32)idx - 1 ; i >= min_idx ; i--) //search the small slots first
1084		{
1085			if(mFreeSpaceList[i])
1086			{
1087				U32 new_preferred_block_size = mFreeSpaceList[i]->getBufferSize();
1088				new_preferred_block_size = (new_preferred_block_size / mMinBlockSize) * mMinBlockSize ; //round to integer times of mMinBlockSize.
1089
1090				//create a NEW BLOCK THERE.
1091				if(new_preferred_block_size >= slot_size) //at least there is space for one slot.
1092				{
1093					
1094					blk = createNewBlock(mFreeSpaceList[i], new_preferred_block_size, slot_size, blk_idx) ;
1095				}
1096				break ;
1097			} 
1098		}
1099
1100		if(!blk)
1101		{
1102			for(U16 i = idx + 1 ; i < mPartitionLevels - 1; i++) //search the large slots 
1103			{
1104				if(mFreeSpaceList[i])
1105				{
1106					//create a NEW BLOCK THERE.
1107					blk = createNewBlock(mFreeSpaceList[i], preferred_block_size, slot_size, blk_idx) ;
1108					break ;
1109				} 
1110			}
1111		}
1112	}
1113
1114	return blk ;
1115}
1116
1117//create a new block at the designed location
1118LLPrivateMemoryPool::LLMemoryBlock* LLPrivateMemoryPool::LLMemoryChunk::createNewBlock(LLMemoryBlock* blk, U32 buffer_size, U32 slot_size, U32 blk_idx)
1119{
1120	//unlink from the free space
1121	removeFromFreeSpace(blk) ;
1122
1123	//check the rest space
1124	U32 new_free_blk_size = blk->getBufferSize() - buffer_size ;	
1125	if(new_free_blk_size < mMinBlockSize) //can not partition the memory into size smaller than mMinBlockSize
1126	{
1127		new_free_blk_size = 0 ; //discard the last small extra space.
1128	}			
1129
1130	//add the rest space back to the free list
1131	if(new_free_blk_size > 0) //blk still has free space
1132	{
1133		LLMemoryBlock* next_blk = blk + (buffer_size / mMinBlockSize) ;
1134		next_blk->mPrev = NULL ;
1135		next_blk->mNext = NULL ;
1136		next_blk->setBuffer(blk->getBuffer() + buffer_size, new_free_blk_size) ;
1137		addToFreeSpace(next_blk) ;
1138	}
1139
1140	blk->init(blk->getBuffer(), buffer_size, slot_size) ;
1141	//insert to the available block list...
1142	mAvailBlockList[blk_idx] = blk ;
1143
1144	//mark the address map: all blocks covered by this block space pointing back to this block.
1145	U32 end = (buffer_size / mMinBlockSize) ;
1146	for(U32 i = 1 ; i < end ; i++)
1147	{
1148		(blk + i)->mSelf = blk ;
1149	}
1150
1151	return blk ;
1152}
1153
1154//delete a block, release the block to the free pool.
1155void LLPrivateMemoryPool::LLMemoryChunk::removeBlock(LLMemoryBlock* blk)
1156{
1157	//remove from the available block list
1158	if(blk->mPrev)
1159	{
1160		blk->mPrev->mNext = blk->mNext ;
1161	}
1162	if(blk->mNext)
1163	{
1164		blk->mNext->mPrev = blk->mPrev ;
1165	}
1166	U32 blk_idx = getBlockLevel(blk->getSlotSize());
1167	if(mAvailBlockList[blk_idx] == blk)
1168	{
1169		mAvailBlockList[blk_idx] = blk->mNext ;
1170	}
1171
1172	blk->mNext = NULL ;
1173	blk->mPrev = NULL ;
1174	
1175	//mark it free
1176	blk->setBuffer(blk->getBuffer(), blk->getBufferSize()) ;
1177
1178#if 1
1179	//merge blk with neighbors if possible
1180	if(blk->getBuffer() > mDataBuffer) //has the left neighbor
1181	{
1182		if((blk - 1)->mSelf->isFree())
1183		{
1184			LLMemoryBlock* left_blk = (blk - 1)->mSelf ;
1185			removeFromFreeSpace((blk - 1)->mSelf);
1186			left_blk->setBuffer(left_blk->getBuffer(), left_blk->getBufferSize() + blk->getBufferSize()) ;
1187			blk = left_blk ;
1188		}
1189	}
1190	if(blk->getBuffer() + blk->getBufferSize() <= mBuffer + mBufferSize - mMinBlockSize) //has the right neighbor
1191	{
1192		U32 d = blk->getBufferSize() / mMinBlockSize ;
1193		if((blk + d)->isFree())
1194		{
1195			LLMemoryBlock* right_blk = blk + d ;
1196			removeFromFreeSpace(blk + d) ;
1197			blk->setBuffer(blk->getBuffer(), blk->getBufferSize() + right_blk->getBufferSize()) ;
1198		}
1199	}
1200#endif
1201	
1202	addToFreeSpace(blk) ;
1203
1204	return ;
1205}
1206
1207//the top block in the list is full, pop it out of the list
1208void LLPrivateMemoryPool::LLMemoryChunk::popAvailBlockList(U32 blk_idx) 
1209{
1210	if(mAvailBlockList[blk_idx])
1211	{
1212		LLMemoryBlock* next = mAvailBlockList[blk_idx]->mNext ;
1213		if(next)
1214		{
1215			next->mPrev = NULL ;
1216		}
1217		mAvailBlockList[blk_idx]->mPrev = NULL ;
1218		mAvailBlockList[blk_idx]->mNext = NULL ;
1219		mAvailBlockList[blk_idx] = next ;
1220	}
1221}
1222
1223//add the block back to the free pool
1224void LLPrivateMemoryPool::LLMemoryChunk::addToFreeSpace(LLMemoryBlock* blk) 
1225{
1226	llassert_always(!blk->mPrev) ;
1227	llassert_always(!blk->mNext) ;
1228
1229	U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
1230
1231	(blk + free_idx)->mSelf = blk ; //mark the end pointing back to the head.
1232	free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
1233
1234	blk->mNext = mFreeSpaceList[free_idx] ;
1235	if(mFreeSpaceList[free_idx])
1236	{
1237		mFreeSpaceList[free_idx]->mPrev = blk ;
1238	}
1239	mFreeSpaceList[free_idx] = blk ;
1240	blk->mPrev = NULL ;
1241	blk->mSelf = blk ;
1242	
1243	return ;
1244}
1245
1246//remove the space from the free pool
1247void LLPrivateMemoryPool::LLMemoryChunk::removeFromFreeSpace(LLMemoryBlock* blk) 
1248{
1249	U16 free_idx = blk->getBufferSize() / mMinBlockSize - 1;
1250	free_idx = llmin(free_idx, (U16)(mPartitionLevels - 1)) ;
1251
1252	if(mFreeSpaceList[free_idx] == blk)
1253	{
1254		mFreeSpaceList[free_idx] = blk->mNext ;
1255	}
1256	if(blk->mPrev)
1257	{
1258		blk->mPrev->mNext = blk->mNext ;
1259	}
1260	if(blk->mNext)
1261	{
1262		blk->mNext->mPrev = blk->mPrev ;
1263	}
1264	blk->mNext = NULL ;
1265	blk->mPrev = NULL ;
1266	blk->mSelf = NULL ;
1267
1268	return ;
1269}
1270
1271void LLPrivateMemoryPool::LLMemoryChunk::addToAvailBlockList(LLMemoryBlock* blk) 
1272{
1273	llassert_always(!blk->mPrev) ;
1274	llassert_always(!blk->mNext) ;
1275
1276	U32 blk_idx = getBlockLevel(blk->getSlotSize());
1277
1278	blk->mNext = mAvailBlockList[blk_idx] ;
1279	if(blk->mNext)
1280	{
1281		blk->mNext->mPrev = blk ;
1282	}
1283	blk->mPrev = NULL ;
1284	mAvailBlockList[blk_idx] = blk ;
1285
1286	return ;
1287}
1288
1289U32 LLPrivateMemoryPool::LLMemoryChunk::getPageIndex(U32 addr)
1290{
1291	return (addr - (U32)mDataBuffer) / mMinBlockSize ;
1292}
1293
1294//for mAvailBlockList
1295U32 LLPrivateMemoryPool::LLMemoryChunk::getBlockLevel(U32 size)
1296{
1297	llassert(size >= mMinSlotSize && size <= mMaxSlotSize) ;
1298
1299	//start from 0
1300	return (size + mMinSlotSize - 1) / mMinSlotSize - 1 ;
1301}
1302
1303//for mFreeSpaceList
1304U16 LLPrivateMemoryPool::LLMemoryChunk::getPageLevel(U32 size)
1305{
1306	//start from 0
1307	U16 level = size / mMinBlockSize - 1 ;
1308	if(level >= mPartitionLevels)
1309	{
1310		level = mPartitionLevels - 1 ;
1311	}
1312	return level ;
1313}
1314
1315//-------------------------------------------------------------------
1316//class LLPrivateMemoryPool
1317//--------------------------------------------------------------------
1318const U32 CHUNK_SIZE = 4 << 20 ; //4 MB
1319const U32 LARGE_CHUNK_SIZE = 4 * CHUNK_SIZE ; //16 MB
1320LLPrivateMemoryPool::LLPrivateMemoryPool(S32 type, U32 max_pool_size) :
1321	mMutexp(NULL),	
1322	mReservedPoolSize(0),
1323	mHashFactor(1),
1324	mType(type),
1325	mMaxPoolSize(max_pool_size)
1326{
1327	if(type == STATIC_THREADED || type == VOLATILE_THREADED)
1328	{
1329		mMutexp = new LLMutex(NULL) ;
1330	}
1331
1332	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
1333	{
1334		mChunkList[i] = NULL ;
1335	}	
1336	
1337	mNumOfChunks = 0 ;
1338}
1339
1340LLPrivateMemoryPool::~LLPrivateMemoryPool()
1341{
1342	destroyPool();
1343	delete mMutexp ;
1344}
1345
1346char* LLPrivateMemoryPool::allocate(U32 size)
1347{	
1348	if(!size)
1349	{
1350		return NULL ;
1351	}
1352
1353	//if the asked size larger than MAX_BLOCK_SIZE, fetch from heap directly, the pool does not manage it
1354	if(size >= CHUNK_SIZE)
1355	{
1356		return (char*)malloc(size) ;
1357	}
1358
1359	char* p = NULL ;
1360
1361	//find the appropriate chunk
1362	S32 chunk_idx = getChunkIndex(size) ;
1363	
1364	lock() ;
1365
1366	LLMemoryChunk* chunk = mChunkList[chunk_idx];
1367	while(chunk)
1368	{
1369		if((p = chunk->allocate(size)))
1370		{
1371			break ;
1372		}
1373		chunk = chunk->mNext ;
1374	}
1375	
1376	//fetch new memory chunk
1377	if(!p)
1378	{
1379		if(mReservedPoolSize + CHUNK_SIZE > mMaxPoolSize)
1380		{
1381			chunk = mChunkList[chunk_idx];
1382			while(chunk)
1383			{
1384				if((p = chunk->allocate(size)))
1385				{
1386					break ;
1387				}
1388				chunk = chunk->mNext ;
1389			}
1390		}
1391		else
1392		{
1393			chunk = addChunk(chunk_idx) ;
1394			if(chunk)
1395			{
1396				p = chunk->allocate(size) ;
1397			}
1398		}
1399	}
1400
1401	unlock() ;
1402
1403	if(!p) //to get memory from the private pool failed, try the heap directly
1404	{
1405		static bool to_log = true ;
1406		
1407		if(to_log)
1408		{
1409			llwarns << "The memory pool overflows, now using heap directly!" << llendl ;
1410			to_log = false ;
1411		}
1412
1413		return (char*)malloc(size) ;
1414	}
1415
1416	return p ;
1417}
1418
1419void LLPrivateMemoryPool::freeMem(void* addr)
1420{
1421	if(!addr)
1422	{
1423		return ;
1424	}
1425	
1426	lock() ;
1427	
1428	LLMemoryChunk* chunk = findChunk((char*)addr) ;
1429	
1430	if(!chunk)
1431	{
1432		free(addr) ; //release from heap
1433	}
1434	else
1435	{
1436		chunk->freeMem(addr) ;
1437
1438		if(chunk->empty())
1439		{
1440			removeChunk(chunk) ;
1441		}
1442	}
1443	
1444	unlock() ;
1445}
1446
1447void LLPrivateMemoryPool::dump()
1448{
1449}
1450
1451U32 LLPrivateMemoryPool::getTotalAllocatedSize()
1452{
1453	U32 total_allocated = 0 ;
1454
1455	LLMemoryChunk* chunk ;
1456	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
1457	{
1458		chunk = mChunkList[i];
1459		while(chunk)
1460		{
1461			total_allocated += chunk->getAllocatedSize() ;
1462			chunk = chunk->mNext ;
1463		}
1464	}
1465
1466	return total_allocated ;
1467}
1468
1469void LLPrivateMemoryPool::lock()
1470{
1471	if(mMutexp)
1472	{
1473		mMutexp->lock() ;
1474	}
1475}
1476
1477void LLPrivateMemoryPool::unlock()
1478{
1479	if(mMutexp)
1480	{
1481		mMutexp->unlock() ;
1482	}
1483}
1484
1485S32  LLPrivateMemoryPool::getChunkIndex(U32 size) 
1486{
1487	S32 i ;
1488	for(i = 0 ; size > MAX_SLOT_SIZES[i]; i++);
1489	
1490	llassert_always(i < SUPER_ALLOCATION);
1491
1492	return i ;
1493}
1494
1495//destroy the entire pool
1496void  LLPrivateMemoryPool::destroyPool()
1497{
1498	lock() ;
1499
1500	if(mNumOfChunks > 0)
1501	{
1502		llwarns << "There is some memory not freed when destroy the memory pool!" << llendl ;
1503	}
1504
1505	mNumOfChunks = 0 ;
1506	mChunkHashList.clear() ;
1507	mHashFactor = 1 ;
1508	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
1509	{
1510		mChunkList[i] = NULL ;
1511	}
1512
1513	unlock() ;
1514}
1515
1516bool LLPrivateMemoryPool::checkSize(U32 asked_size)
1517{
1518	if(mReservedPoolSize + asked_size > mMaxPoolSize)
1519	{
1520		llinfos << "Max pool size: " << mMaxPoolSize << llendl ;
1521		llinfos << "Total reserved size: " << mReservedPoolSize + asked_size << llendl ;
1522		llinfos << "Total_allocated Size: " << getTotalAllocatedSize() << llendl ;
1523
1524		//llerrs << "The pool is overflowing..." << llendl ;
1525
1526		return false ;
1527	}
1528
1529	return true ;
1530}
1531
1532LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::addChunk(S32 chunk_index)
1533{
1534	U32 preferred_size ;
1535	U32 overhead ;
1536	if(chunk_index < LARGE_ALLOCATION)
1537	{
1538		preferred_size = CHUNK_SIZE ; //4MB
1539		overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index],
1540			MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
1541	}
1542	else
1543	{
1544		preferred_size = LARGE_CHUNK_SIZE ; //16MB
1545		overhead = LLMemoryChunk::getMaxOverhead(preferred_size, MIN_SLOT_SIZES[chunk_index], 
1546			MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
1547	}
1548
1549	if(!checkSize(preferred_size + overhead))
1550	{
1551		return NULL ;
1552	}
1553
1554	mReservedPoolSize += preferred_size + overhead ;
1555
1556	char* buffer = (char*)malloc(preferred_size + overhead) ;
1557	if(!buffer)
1558	{
1559		return NULL ;
1560	}
1561	
1562	LLMemoryChunk* chunk = new (buffer) LLMemoryChunk() ;
1563	chunk->init(buffer, preferred_size + overhead, MIN_SLOT_SIZES[chunk_index],
1564		MAX_SLOT_SIZES[chunk_index], MIN_BLOCK_SIZES[chunk_index], MAX_BLOCK_SIZES[chunk_index]) ;
1565
1566	//add to the tail of the linked list
1567	{
1568		if(!mChunkList[chunk_index])
1569		{
1570			mChunkList[chunk_index] = chunk ;
1571		}
1572		else
1573		{
1574			LLMemoryChunk* cur = mChunkList[chunk_index] ;
1575			while(cur->mNext)
1576			{
1577				cur = cur->mNext ;
1578			}
1579			cur->mNext = chunk ;
1580			chunk->mPrev = cur ;
1581		}
1582	}
1583
1584	//insert into the hash table
1585	addToHashTable(chunk) ;
1586	
1587	mNumOfChunks++;
1588
1589	return chunk ;
1590}
1591
1592void LLPrivateMemoryPool::removeChunk(LLMemoryChunk* chunk) 
1593{
1594	if(!chunk)
1595	{
1596		return ;
1597	}
1598
1599	//remove from the linked list
1600	for(S32 i = 0 ; i < SUPER_ALLOCATION ; i++)
1601	{
1602		if(mChunkList[i] == chunk)
1603		{
1604			mChunkList[i] = chunk->mNext ;
1605		}
1606	}
1607
1608	if(chunk->mPrev)
1609	{
1610		chunk->mPrev->mNext = chunk->mNext ;
1611	}
1612	if(chunk->mNext)
1613	{
1614		chunk->mNext->mPrev = chunk->mPrev ;
1615	}
1616
1617	//remove from the hash table
1618	removeFromHashTable(chunk) ;
1619	
1620	mNumOfChunks--;
1621	mReservedPoolSize -= chunk->getBufferSize() ;
1622	
1623	//release memory
1624	free(chunk->getBuffer()) ;
1625}
1626
1627U16 LLPrivateMemoryPool::findHashKey(const char* addr)
1628{
1629	return (((U32)addr) / CHUNK_SIZE) % mHashFactor ;
1630}
1631
1632LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::findChunk(const char* addr)
1633{
1634	U16 key = findHashKey(addr) ;	
1635	if(mChunkHashList.size() <= key)
1636	{
1637		return NULL ;
1638	}
1639
1640	return mChunkHashList[key].findChunk(addr) ;	
1641}
1642
1643void LLPrivateMemoryPool::addToHashTable(LLMemoryChunk* chunk) 
1644{
1645	static const U16 HASH_FACTORS[] = {41, 83, 193, 317, 419, 523, 719, 997, 1523, 0xFFFF}; 
1646	
1647	U16 i ;
1648	if(mChunkHashList.empty())
1649	{
1650		mHashFactor = HASH_FACTORS[0] ;
1651		rehash() ;		
1652	}
1653
1654	U16 start_key = findHashKey(chunk->getBuffer()) ;
1655	U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
1656	bool need_rehash = false ;
1657	
1658	if(mChunkHashList[start_key].hasElement(chunk))
1659	{
1660		return; //already inserted.
1661	}
1662	need_rehash = mChunkHashList[start_key].add(chunk) ;
1663	
1664	if(start_key == end_key && !need_rehash)
1665	{
1666		return ; //done
1667	}
1668
1669	if(!need_rehash)
1670	{
1671		need_rehash = mChunkHashList[end_key].add(chunk) ;
1672	}
1673
1674	if(!need_rehash)
1675	{
1676		if(end_key < start_key)
1677		{
1678			need_rehash = fillHashTable(start_key + 1, mHashFactor, chunk) ;
1679			if(!need_rehash)
1680			{
1681				need_rehash = fillHashTable(0, end_key, chunk) ;
1682			}
1683		}
1684		else
1685		{
1686			need_rehash = fillHashTable(start_key + 1, end_key, chunk) ;
1687		}
1688	}
1689	
1690	if(need_rehash)
1691	{
1692		i = 0 ;
1693		while(HASH_FACTORS[i] <= mHashFactor) i++;
1694
1695		mHashFactor = HASH_FACTORS[i] ;
1696		llassert_always(mHashFactor != 0xFFFF) ;//stop point to prevent endlessly recursive calls
1697
1698		rehash() ;
1699	}
1700}
1701
1702void LLPrivateMemoryPool::removeFromHashTable(LLMemoryChunk* chunk) 
1703{
1704	U16 start_key = findHashKey(chunk->getBuffer()) ;
1705	U16 end_key = findHashKey(chunk->getBuffer() + chunk->getBufferSize() - 1) ;
1706	
1707	mChunkHashList[start_key].remove(chunk) ;
1708	if(start_key == end_key)
1709	{
1710		return ; //done
1711	}
1712
1713	mChunkHashList[end_key].remove(chunk) ;
1714	
1715	if(end_key < start_key)
1716	{
1717		for(U16 i = start_key + 1 ; i < mHashFactor; i++)
1718		{
1719			mChunkHashList[i].remove(chunk) ;
1720		}
1721		for(U16 i = 0 ; i < end_key; i++)
1722		{
1723			mChunkHashList[i].remove(chunk) ;
1724		}
1725	}
1726	else
1727	{
1728		for(U16 i = start_key + 1 ; i < end_key; i++)
1729		{
1730			mChunkHashList[i].remove(chunk) ;
1731		}
1732	}
1733}
1734
1735void LLPrivateMemoryPool::rehash()
1736{
1737	llinfos << "new hash factor: " << mHashFactor << llendl ;
1738
1739	mChunkHashList.clear() ;
1740	mChunkHashList.resize(mHashFactor) ;
1741
1742	LLMemoryChunk* chunk ;
1743	for(U16 i = 0 ; i < SUPER_ALLOCATION ; i++)
1744	{
1745		chunk = mChunkList[i] ; 
1746		while(chunk)
1747		{
1748			addToHashTable(chunk) ;
1749			chunk = chunk->mNext ;
1750		}
1751	}
1752}
1753
1754bool LLPrivateMemoryPool::fillHashTable(U16 start, U16 end, LLMemoryChunk* chunk)
1755{
1756	for(U16 i = start; i < end; i++)
1757	{
1758		if(mChunkHashList[i].add(chunk))
1759		{			
1760			return true ;
1761		}		
1762	}
1763
1764	return false ;
1765}
1766
1767//--------------------------------------------------------------------
1768// class LLChunkHashElement
1769//--------------------------------------------------------------------
1770LLPrivateMemoryPool::LLMemoryChunk* LLPrivateMemoryPool::LLChunkHashElement::findChunk(const char* addr)
1771{
1772	if(mFirst && mFirst->containsAddress(addr))
1773	{
1774		return mFirst ;
1775	}
1776	else if(mSecond && mSecond->containsAddress(addr))
1777	{
1778		return mSecond ;
1779	}
1780
1781	return NULL ;
1782}
1783
1784//return false if successfully inserted to the hash slot.
1785bool LLPrivateMemoryPool::LLChunkHashElement::add(LLPrivateMemoryPool::LLMemoryChunk* chunk)
1786{
1787	llassert_always(!hasElement(chunk)) ;
1788
1789	if(!mFirst)
1790	{
1791		mFirst = chunk ;
1792	}
1793	else if(!mSecond)
1794	{
1795		mSecond = chunk ;
1796	}
1797	else
1798	{
1799		return true ; //failed
1800	}
1801
1802	return false ;
1803}
1804
1805void LLPrivateMemoryPool::LLChunkHashElement::remove(LLPrivateMemoryPool::LLMemoryChunk* chunk)
1806{
1807	if(mFirst == chunk)
1808	{
1809		mFirst = NULL ;
1810	}
1811	else if(mSecond ==chunk)
1812	{
1813		mSecond = NULL ;
1814	}
1815	else
1816	{
1817		llerrs << "This slot does not contain this chunk!" << llendl ;
1818	}
1819}
1820
1821//--------------------------------------------------------------------
1822//class LLPrivateMemoryPoolManager
1823//--------------------------------------------------------------------
1824LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::sInstance = NULL ;
1825BOOL LLPrivateMemoryPoolManager::sPrivatePoolEnabled = FALSE ;
1826std::vector<LLPrivateMemoryPool*> LLPrivateMemoryPoolManager::sDanglingPoolList ;
1827
1828LLPrivateMemoryPoolManager::LLPrivateMemoryPoolManager(BOOL enabled, U32 max_pool_size) 
1829{
1830	mPoolList.resize(LLPrivateMemoryPool::MAX_TYPES) ;
1831
1832	for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
1833	{
1834		mPoolList[i] = NULL ;
1835	}
1836
1837	sPrivatePoolEnabled = enabled ;
1838
1839	const U32 MAX_POOL_SIZE = 256 * 1024 * 1024 ; //256 MB
1840	mMaxPrivatePoolSize = llmax(max_pool_size, MAX_POOL_SIZE) ;
1841}
1842
1843LLPrivateMemoryPoolManager::~LLPrivateMemoryPoolManager() 
1844{
1845
1846#if __DEBUG_PRIVATE_MEM__
1847	if(!sMemAllocationTracker.empty())
1848	{
1849		llwarns << "there is potential memory leaking here. The list of not freed memory blocks are from: " <<llendl ;
1850
1851		S32 k = 0 ;
1852		for(mem_allocation_info_t::iterator iter = sMemAllocationTracker.begin() ; iter != sMemAllocationTracker.end() ; ++iter)
1853		{
1854			llinfos << k++ << ", " << (U32)iter->first << " : " << iter->second << llendl ;
1855		}
1856		sMemAllocationTracker.clear() ;
1857	}
1858#endif
1859
1860#if 0
1861	//all private pools should be released by their owners before reaching here.
1862	for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
1863	{
1864		llassert_always(!mPoolList[i]) ;
1865	}
1866	mPoolList.clear() ;
1867
1868#else
1869	//forcefully release all memory
1870	for(S32 i = 0 ; i < LLPrivateMemoryPool::MAX_TYPES; i++)
1871	{
1872		if(mPoolList[i])
1873		{
1874			if(mPoolList[i]->isEmpty())
1875			{
1876				delete mPoolList[i] ;
1877			}
1878			else
1879			{
1880				//can not delete this pool because it has alloacted memory to be freed.
1881				//move it to the dangling list.
1882				sDanglingPoolList.push_back(mPoolList[i]) ;				
1883			}
1884
1885			mPoolList[i] = NULL ;
1886		}
1887	}
1888	mPoolList.clear() ;
1889#endif
1890}
1891
1892//static 
1893void LLPrivateMemoryPoolManager::initClass(BOOL enabled, U32 max_pool_size) 
1894{
1895	llassert_always(!sInstance) ;
1896
1897	sInstance = new LLPrivateMemoryPoolManager(enabled, max_pool_size) ;
1898}
1899
1900//static 
1901LLPrivateMemoryPoolManager* LLPrivateMemoryPoolManager::getInstance() 
1902{
1903	//if(!sInstance)
1904	//{
1905	//	sInstance = new LLPrivateMemoryPoolManager(FALSE) ;
1906	//}
1907	return sInstance ;
1908}
1909	
1910//static 
1911void LLPrivateMemoryPoolManager::destroyClass() 
1912{
1913	if(sInstance)
1914	{
1915		delete sInstance ;
1916		sInstance = NULL ;
1917	}
1918}
1919
1920LLPrivateMemoryPool* LLPrivateMemoryPoolManager::newPool(S32 type) 
1921{
1922	if(!sPrivatePoolEnabled)
1923	{
1924		return NULL ;
1925	}
1926
1927	if(!mPoolList[type])
1928	{
1929		mPoolList[type] = new LLPrivateMemoryPool(type, mMaxPrivatePoolSize) ;
1930	}
1931
1932	return mPoolList[type] ;
1933}
1934
1935void LLPrivateMemoryPoolManager::deletePool(LLPrivateMemoryPool* pool) 
1936{
1937	if(pool && pool->isEmpty())
1938	{
1939		mPoolList[pool->getType()] = NULL ;
1940		delete pool;
1941	}
1942}
1943
1944//debug
1945void LLPrivateMemoryPoolManager::updateStatistics()
1946{
1947	mTotalReservedSize = 0 ;
1948	mTotalAllocatedSize = 0 ;
1949
1950	for(U32 i = 0; i < mPoolList.size(); i++)
1951	{
1952		if(mPoolList[i])
1953		{
1954			mTotalReservedSize += mPoolList[i]->getTotalReservedSize() ;
1955			mTotalAllocatedSize += mPoolList[i]->getTotalAllocatedSize() ;
1956		}
1957	}
1958}
1959
1960#if __DEBUG_PRIVATE_MEM__
1961//static 
1962char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size, const char* function, const int line) 
1963{
1964	char* p ;
1965
1966	if(!poolp)
1967	{
1968		p = (char*)malloc(size) ;
1969	}
1970	else
1971	{
1972		p = poolp->allocate(size) ;
1973	}
1974	
1975	if(p)
1976	{
1977		char num[16] ;
1978		sprintf(num, " line: %d ", line) ;
1979		std::string str(function) ;
1980		str += num; 
1981
1982		sMemAllocationTracker[p] = str ;
1983	}
1984
1985	return p ;
1986}	
1987#else
1988//static 
1989char* LLPrivateMemoryPoolManager::allocate(LLPrivateMemoryPool* poolp, U32 size) 
1990{
1991	if(poolp)
1992	{
1993		return poolp->allocate(size) ;		
1994	}
1995	else
1996	{
1997		return (char*)malloc(size) ;
1998	}
1999}
2000#endif
2001
2002//static 
2003void  LLPrivateMemoryPoolManager::freeMem(LLPrivateMemoryPool* poolp, void* addr) 
2004{
2005	if(!addr)
2006	{
2007		return ;
2008	}
2009
2010#if __DEBUG_PRIVATE_MEM__
2011	sMemAllocationTracker.erase((char*)addr) ;
2012#endif
2013
2014	if(poolp)
2015	{
2016		poolp->freeMem(addr) ;
2017	}
2018	else
2019	{
2020		if(!sPrivatePoolEnabled)
2021		{
2022			free(addr) ; //private pool is disabled.
2023		}
2024		else if(!sInstance) //the private memory manager is destroyed, try the dangling list
2025		{
2026			for(S32 i = 0 ; i < sDanglingPoolList.size(); i++)
2027			{
2028				if(sDanglingPoolList[i]->findChunk((char*)addr))
2029				{
2030					sDanglingPoolList[i]->freeMem(addr) ;
2031					if(sDanglingPoolList[i]->isEmpty())
2032					{
2033						delete sDanglingPoolList[i] ;
2034						
2035						if(i < sDanglingPoolList.size() - 1)
2036						{
2037							sDanglingPoolList[i] = sDanglingPoolList[sDanglingPoolList.size() - 1] ;
2038						}
2039						sDanglingPoolList.pop_back() ;
2040					}
2041
2042					addr = NULL ;
2043					break ;
2044				}
2045			}		
2046			llassert_always(!addr) ; //addr should be release before hitting here!
2047		}
2048		else
2049		{
2050			llerrs << "private pool is used before initialized.!" << llendl ;
2051		}
2052	}	
2053}
2054
2055//--------------------------------------------------------------------
2056//class LLPrivateMemoryPoolTester
2057//--------------------------------------------------------------------
2058#if 0
2059LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::sInstance = NULL ;
2060LLPrivateMemoryPool* LLPrivateMemoryPoolTester::sPool = NULL ;
2061LLPrivateMemoryPoolTester::LLPrivateMemoryPoolTester()
2062{	
2063}
2064	
2065LLPrivateMemoryPoolTester::~LLPrivateMemoryPoolTester() 
2066{	
2067}
2068
2069//static 
2070LLPrivateMemoryPoolTester* LLPrivateMemoryPoolTester::getInstance() 
2071{
2072	if(!sInstance)
2073	{
2074		sInstance = ::new LLPrivateMemoryPoolTester() ;
2075	}
2076	return sInstance ;
2077}
2078
2079//static 
2080void LLPrivateMemoryPoolTester::destroy()
2081{
2082	if(sInstance)
2083	{
2084		::delete sInstance ;
2085		sInstance = NULL ;
2086	}
2087
2088	if(sPool)
2089	{
2090		LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
2091		sPool = NULL ;
2092	}
2093}
2094
2095void LLPrivateMemoryPoolTester::run(S32 type) 
2096{
2097	if(sPool)
2098	{
2099		LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
2100	}
2101	sPool = LLPrivateMemoryPoolManager::getInstance()->newPool(type) ;
2102
2103	//run the test
2104	correctnessTest() ;
2105	performanceTest() ;
2106	//fragmentationtest() ;
2107
2108	//release pool.
2109	LLPrivateMemoryPoolManager::getInstance()->deletePool(sPool) ;
2110	sPool = NULL ;
2111}
2112
2113void LLPrivateMemoryPoolTester::test(U32 min_size, U32 max_size, U32 stride, U32 times, 
2114									 bool random_deletion, bool output_statistics)
2115{
2116	U32 levels = (max_size - min_size) / stride + 1 ;
2117	char*** p ;
2118	U32 i, j ;
2119	U32 total_allocated_size = 0 ;
2120
2121	//allocate space for p ;
2122	if(!(p = ::new char**[times]) || !(*p = ::new char*[times * levels]))
2123	{
2124		llerrs << "memory initialization for p failed" << llendl ;
2125	}
2126
2127	//init
2128	for(i = 0 ; i < times; i++)
2129	{
2130		p[i] = *p + i * levels ;
2131		for(j = 0 ; j < levels; j++)
2132		{
2133			p[i][j] = NULL ;
2134		}
2135	}
2136
2137	//allocation
2138	U32 size ;
2139	for(i = 0 ; i < times ; i++)
2140	{
2141		for(j = 0 ; j < levels; j++) 
2142		{
2143			size = min_size + j * stride ;
2144			p[i][j] = ALLOCATE_MEM(sPool, size) ;
2145
2146			total_allocated_size+= size ;
2147
2148			*(U32*)p[i][j] = i ;
2149			*((U32*)p[i][j] + 1) = j ;
2150			//p[i][j][size - 1] = '\0' ; //access the last element to verify the success of the allocation.
2151
2152			//randomly release memory
2153			if(random_deletion)
2154			{
2155				S32 k = rand() % levels ;
2156
2157				if(p[i][k])
2158				{
2159					llassert_always(*(U32*)p[i][k] == i && *((U32*)p[i][k] + 1) == k) ;
2160					FREE_MEM(sPool, p[i][k]) ;
2161					total_allocated_size -= min_size + k * stride ;
2162					p[i][k] = NULL ;
2163				}
2164			}
2165		}
2166	}
2167
2168	//output pool allocation statistics
2169	if(output_statistics)
2170	{
2171	}
2172
2173	//release all memory allocations
2174	for(i = 0 ; i < times; i++)
2175	{
2176		for(j = 0 ; j < levels; j++)
2177		{
2178			if(p[i][j])
2179			{
2180				llassert_always(*(U32*)p[i][j] == i && *((U32*)p[i][j] + 1) == j) ;
2181				FREE_MEM(sPool, p[i][j]) ;
2182				total_allocated_size -= min_size + j * stride ;
2183				p[i][j] = NULL ;
2184			}
2185		}
2186	}
2187
2188	::delete[] *p ;
2189	::delete[] p ;
2190}
2191
2192void LLPrivateMemoryPoolTester::testAndTime(U32 size, U32 times)
2193{
2194	LLTimer timer ;
2195
2196	llinfos << " -**********************- " << llendl ;
2197	llinfos << "test size: " << size << " test times: " << times << llendl ;
2198
2199	timer.reset() ;
2200	char** p = new char*[times] ;
2201		
2202	//using the customized memory pool
2203	//allocation
2204	for(U32 i = 0 ; i < times; i++)
2205	{
2206		p[i] = ALLOCATE_MEM(sPool, size) ;
2207		if(!p[i])
2208		{
2209			llerrs << "allocation failed" << llendl ;
2210		}
2211	}
2212	//de-allocation
2213	for(U32 i = 0 ; i < times; i++)
2214	{
2215		FREE_MEM(sPool, p[i]) ;
2216		p[i] = NULL ;
2217	}
2218	llinfos << "time spent using customized memory pool: " << timer.getElapsedTimeF32() << llendl ;
2219
2220	timer.reset() ;
2221
2222	//using the standard allocator/de-allocator:
2223	//allocation
2224	for(U32 i = 0 ; i < times; i++)
2225	{
2226		p[i] = ::new char[size] ;
2227		if(!p[i])
2228		{
2229			llerrs << "allocation failed" << llendl ;
2230		}
2231	}
2232	//de-all…

Large files files are truncated, but you can click here to view the full file