PageRenderTime 54ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 0ms

/branches/refactoring/sources/3rd party/nvtt/nvcore/Containers.h

#
C++ Header | 1059 lines | 681 code | 176 blank | 202 comment | 120 complexity | d5d2f8c7984bedd9fd8cfe445307a3e4 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. // This code is in the public domain -- castanyo@yahoo.es
  2. #ifndef NV_CORE_CONTAINER_H
  3. #define NV_CORE_CONTAINER_H
  4. /*
  5. These containers are based on Thatcher Ulrich <tu@tulrich.com> containers,
  6. donated to the Public Domain.
  7. I've also borrowed some ideas from the Qt toolkit, specially the cool
  8. foreach iterator.
  9. TODO
  10. Do not use memmove in insert & remove, use copy ctors instead.
  11. */
  12. // nvcore
  13. #include <nvcore/nvcore.h>
  14. #include <nvcore/Memory.h>
  15. #include <nvcore/Debug.h>
  16. #include <string.h> // memmove
  17. #include <new> // for placement new
  18. #if NV_CC_GNUC // If typeof is available:
  19. #define NV_FOREACH(i, container) \
  20. typedef typeof(container) NV_STRING_JOIN2(cont,__LINE__); \
  21. for(NV_STRING_JOIN2(cont,__LINE__)::PseudoIndex i((container).start()); !(container).isDone(i); (container).advance(i))
  22. /*
  23. #define NV_FOREACH(i, container) \
  24. for(typename typeof(container)::PseudoIndex i((container).start()); !(container).isDone(i); (container).advance(i))
  25. */
  26. #else // If typeof not available:
  27. struct PseudoIndexWrapper {
  28. template <typename T>
  29. PseudoIndexWrapper(const T & container) {
  30. nvStaticCheck(sizeof(typename T::PseudoIndex) <= sizeof(memory));
  31. new (memory) typename T::PseudoIndex(container.start());
  32. }
  33. // PseudoIndex cannot have a dtor!
  34. template <typename T> typename T::PseudoIndex & operator()(const T * container) {
  35. return *reinterpret_cast<typename T::PseudoIndex *>(memory);
  36. }
  37. template <typename T> const typename T::PseudoIndex & operator()(const T * container) const {
  38. return *reinterpret_cast<const typename T::PseudoIndex *>(memory);
  39. }
  40. uint8 memory[4]; // Increase the size if we have bigger enumerators.
  41. };
  42. #define NV_FOREACH(i, container) \
  43. for(PseudoIndexWrapper i(container); !(container).isDone(i(&(container))); (container).advance(i(&(container))))
  44. #endif
  45. // Declare foreach keyword.
  46. #if !defined NV_NO_USE_KEYWORDS
  47. # define foreach NV_FOREACH
  48. #endif
  49. namespace nv
  50. {
  51. // Templates
  52. /// Return the maximum of two values.
  53. template <typename T>
  54. inline const T & max(const T & a, const T & b)
  55. {
  56. //return std::max(a, b);
  57. if( a < b ) {
  58. return b;
  59. }
  60. return a;
  61. }
  62. /// Return the minimum of two values.
  63. template <typename T>
  64. inline const T & min(const T & a, const T & b)
  65. {
  66. //return std::min(a, b);
  67. if( b < a ) {
  68. return b;
  69. }
  70. return a;
  71. }
  72. /// Clamp between two values.
  73. template <typename T>
  74. inline const T & clamp(const T & x, const T & a, const T & b)
  75. {
  76. return min(max(x, a), b);
  77. }
  78. /// Swap two values.
  79. template <typename T>
  80. inline void swap(T & a, T & b)
  81. {
  82. //return std::swap(a, b);
  83. T temp = a;
  84. a = b;
  85. b = temp;
  86. }
  87. template <typename Key> struct hash
  88. {
  89. inline uint sdbm_hash(const void * data_in, uint size, uint h = 5381)
  90. {
  91. const uint8 * data = (const uint8 *) data_in;
  92. uint i = 0;
  93. while (i < size) {
  94. h = (h << 16) + (h << 6) - h + (uint) data[i++];
  95. }
  96. return h;
  97. }
  98. uint operator()(const Key & k) {
  99. return sdbm_hash(&k, sizeof(Key));
  100. }
  101. };
  102. template <> struct hash<int>
  103. {
  104. uint operator()(int x) const { return x; }
  105. };
  106. template <> struct hash<uint>
  107. {
  108. uint operator()(uint x) const { return x; }
  109. };
  110. /// Delete all the elements of a container.
  111. template <typename T>
  112. void deleteAll(T & container)
  113. {
  114. for(typename T::PseudoIndex i = container.start(); !container.isDone(i); container.advance(i))
  115. {
  116. delete container[i];
  117. }
  118. }
  119. /** Return the next power of two.
  120. * @see http://graphics.stanford.edu/~seander/bithacks.html
  121. * @warning Behaviour for 0 is undefined.
  122. * @note isPowerOfTwo(x) == true -> nextPowerOfTwo(x) == x
  123. * @note nextPowerOfTwo(x) = 2 << log2(x-1)
  124. */
  125. inline uint nextPowerOfTwo( uint x )
  126. {
  127. nvDebugCheck( x != 0 );
  128. #if 1 // On modern CPUs this is as fast as using the bsr instruction.
  129. x--;
  130. x |= x >> 1;
  131. x |= x >> 2;
  132. x |= x >> 4;
  133. x |= x >> 8;
  134. x |= x >> 16;
  135. return x+1;
  136. #else
  137. uint p = 1;
  138. while( x > p ) {
  139. p += p;
  140. }
  141. return p;
  142. #endif
  143. }
  144. /// Return true if @a n is a power of two.
  145. inline bool isPowerOfTwo( uint n )
  146. {
  147. return (n & (n-1)) == 0;
  148. }
  149. /// Simple iterator interface.
  150. template <typename T>
  151. struct Iterator
  152. {
  153. virtual void advance();
  154. virtual bool isDone();
  155. virtual T current();
  156. };
  157. /**
  158. * Replacement for std::vector that is easier to debug and provides
  159. * some nice foreach enumerators.
  160. */
  161. template<typename T>
  162. class NVCORE_CLASS Array {
  163. public:
  164. /// Ctor.
  165. Array() : m_buffer(NULL), m_size(0), m_buffer_size(0)
  166. {
  167. }
  168. /// Copy ctor.
  169. Array( const Array & a ) : m_buffer(NULL), m_size(0), m_buffer_size(0)
  170. {
  171. copy(a.m_buffer, a.m_size);
  172. }
  173. /// Ctor that initializes the vector with the given elements.
  174. Array( const T * ptr, int num ) : m_buffer(NULL), m_size(0), m_buffer_size(0)
  175. {
  176. copy(ptr, num);
  177. }
  178. /// Allocate array.
  179. explicit Array(uint capacity) : m_buffer(NULL), m_size(0), m_buffer_size(0)
  180. {
  181. allocate(capacity);
  182. }
  183. /// Dtor.
  184. ~Array()
  185. {
  186. clear();
  187. allocate(0);
  188. }
  189. /// Const and save vector access.
  190. const T & operator[]( uint index ) const
  191. {
  192. nvDebugCheck(index < m_size);
  193. return m_buffer[index];
  194. }
  195. /// Safe vector access.
  196. T & operator[] ( uint index )
  197. {
  198. nvDebugCheck(index < m_size);
  199. return m_buffer[index];
  200. }
  201. /// Get vector size.
  202. uint size() const { return m_size; }
  203. /// Get vector size.
  204. uint count() const { return m_size; }
  205. /// Get const vector pointer.
  206. const T * buffer() const { return m_buffer; }
  207. /// Get vector pointer.
  208. T * unsecureBuffer() { return m_buffer; }
  209. /// Is vector empty.
  210. bool isEmpty() const { return m_size == 0; }
  211. /// Is a null vector.
  212. bool isNull() const { return m_buffer == NULL; }
  213. /// Push an element at the end of the vector.
  214. void push_back( const T & val )
  215. {
  216. uint new_size = m_size + 1;
  217. if (new_size > m_buffer_size)
  218. {
  219. const T copy(val); // create a copy in case value is inside of this array.
  220. resize(new_size);
  221. m_buffer[new_size-1] = copy;
  222. }
  223. else
  224. {
  225. m_size = new_size;
  226. new(m_buffer+new_size-1) T(val);
  227. }
  228. }
  229. void pushBack( const T & val )
  230. {
  231. push_back(val);
  232. }
  233. void append( const T & val )
  234. {
  235. push_back(val);
  236. }
  237. /// Qt like push operator.
  238. Array<T> & operator<< ( T & t )
  239. {
  240. push_back(t);
  241. return *this;
  242. }
  243. /// Pop and return element at the end of the vector.
  244. void pop_back()
  245. {
  246. nvDebugCheck( m_size > 0 );
  247. resize( m_size - 1 );
  248. }
  249. void popBack()
  250. {
  251. pop_back();
  252. }
  253. /// Get back element.
  254. const T & back() const
  255. {
  256. nvDebugCheck( m_size > 0 );
  257. return m_buffer[m_size-1];
  258. }
  259. /// Get back element.
  260. T & back()
  261. {
  262. nvDebugCheck( m_size > 0 );
  263. return m_buffer[m_size-1];
  264. }
  265. /// Get front element.
  266. const T & front() const
  267. {
  268. nvDebugCheck( m_size > 0 );
  269. return m_buffer[0];
  270. }
  271. /// Get front element.
  272. T & front()
  273. {
  274. nvDebugCheck( m_size > 0 );
  275. return m_buffer[0];
  276. }
  277. /// Check if the given element is contained in the array.
  278. bool contains(const T & e) const
  279. {
  280. for (uint i = 0; i < m_size; i++) {
  281. if (m_buffer[i] == e) return true;
  282. }
  283. return false;
  284. }
  285. /// Remove the element at the given index. This is an expensive operation!
  286. void removeAt( uint index )
  287. {
  288. nvCheck(index >= 0 && index < m_size);
  289. if( m_size == 1 ) {
  290. clear();
  291. }
  292. else {
  293. m_buffer[index].~T();
  294. memmove( m_buffer+index, m_buffer+index+1, sizeof(T) * (m_size - 1 - index) );
  295. m_size--;
  296. }
  297. }
  298. /// Remove the first instance of the given element.
  299. void remove(const T & element)
  300. {
  301. for(PseudoIndex i = start(); !isDone(i); advance(i)) {
  302. removeAt(i);
  303. break;
  304. }
  305. }
  306. /// Insert the given element at the given index shifting all the elements up.
  307. void insertAt( uint index, const T & val = T() )
  308. {
  309. nvCheck( index >= 0 && index <= m_size );
  310. resize( m_size + 1 );
  311. if( index < m_size - 1 ) {
  312. memmove( m_buffer+index+1, m_buffer+index, sizeof(T) * (m_size - 1 - index) );
  313. }
  314. // Copy-construct into the newly opened slot.
  315. new(m_buffer+index) T(val);
  316. }
  317. /// Append the given data to our vector.
  318. void append(const Array<T> & other)
  319. {
  320. append(other.m_buffer, other.m_size);
  321. }
  322. /// Append the given data to our vector.
  323. void append(const T other[], uint count)
  324. {
  325. if( count > 0 ) {
  326. const uint old_size = m_size;
  327. resize(m_size + count);
  328. // Must use operator=() to copy elements, in case of side effects (e.g. ref-counting).
  329. for( uint i = 0; i < count; i++ ) {
  330. m_buffer[old_size + i] = other[i];
  331. }
  332. }
  333. }
  334. /// Remove the given element by replacing it with the last one.
  335. void replaceWithLast(uint index)
  336. {
  337. nvDebugCheck( index < m_size );
  338. m_buffer[index] = back();
  339. (m_buffer+m_size-1)->~T();
  340. m_size--;
  341. }
  342. /// Resize the vector preserving existing elements.
  343. void resize(uint new_size)
  344. {
  345. uint i;
  346. uint old_size = m_size;
  347. m_size = new_size;
  348. // Destruct old elements (if we're shrinking).
  349. for( i = new_size; i < old_size; i++ ) {
  350. (m_buffer+i)->~T(); // Explicit call to the destructor
  351. }
  352. if( m_size == 0 ) {
  353. //Allocate(0); // Don't shrink automatically.
  354. }
  355. else if( m_size <= m_buffer_size/* && m_size > m_buffer_size >> 1*/) {
  356. // don't compact yet.
  357. nvDebugCheck(m_buffer != NULL);
  358. }
  359. else {
  360. uint new_buffer_size;
  361. if( m_buffer_size == 0 ) {
  362. // first allocation
  363. new_buffer_size = m_size;
  364. }
  365. else {
  366. // growing
  367. new_buffer_size = m_size + (m_size >> 2);
  368. }
  369. allocate( new_buffer_size );
  370. }
  371. // Call default constructors
  372. for( i = old_size; i < new_size; i++ ) {
  373. new(m_buffer+i) T; // placement new
  374. }
  375. }
  376. /// Resize the vector preserving existing elements and initializing the
  377. /// new ones with the given value.
  378. void resize( uint new_size, const T &elem )
  379. {
  380. uint i;
  381. uint old_size = m_size;
  382. m_size = new_size;
  383. // Destruct old elements (if we're shrinking).
  384. for( i = new_size; i < old_size; i++ ) {
  385. (m_buffer+i)->~T(); // Explicit call to the destructor
  386. }
  387. if( m_size == 0 ) {
  388. //Allocate(0); // Don't shrink automatically.
  389. }
  390. else if( m_size <= m_buffer_size && m_size > m_buffer_size >> 1 ) {
  391. // don't compact yet.
  392. }
  393. else {
  394. uint new_buffer_size;
  395. if( m_buffer_size == 0 ) {
  396. // first allocation
  397. new_buffer_size = m_size;
  398. }
  399. else {
  400. // growing
  401. new_buffer_size = m_size + (m_size >> 2);
  402. }
  403. allocate( new_buffer_size );
  404. }
  405. // Call copy constructors
  406. for( i = old_size; i < new_size; i++ ) {
  407. new(m_buffer+i) T( elem ); // placement new
  408. }
  409. }
  410. /// Tighten the memory used by the container.
  411. void tighten()
  412. {
  413. // TODO Reallocate only if worth.
  414. }
  415. /// Clear the buffer.
  416. void clear()
  417. {
  418. resize(0);
  419. }
  420. /// Shrink the allocated vector.
  421. void shrink()
  422. {
  423. if( m_size < m_buffer_size ) {
  424. allocate(m_size);
  425. }
  426. }
  427. /// Preallocate space.
  428. void reserve(uint desired_size)
  429. {
  430. if( desired_size > m_buffer_size ) {
  431. allocate( desired_size );
  432. }
  433. }
  434. /// Copy memory to our vector. Resizes the vector if needed.
  435. void copy( const T * ptr, uint num )
  436. {
  437. resize( num );
  438. for(uint i = 0; i < m_size; i++) {
  439. m_buffer[i] = ptr[i];
  440. }
  441. }
  442. /// Assignment operator.
  443. void operator=( const Array<T> & a )
  444. {
  445. copy( a.m_buffer, a.m_size );
  446. }
  447. /*
  448. /// Array serialization.
  449. friend Stream & operator<< ( Stream & s, Array<T> & p )
  450. {
  451. if( s.isLoading() ) {
  452. uint size;
  453. s << size;
  454. p.resize( size );
  455. }
  456. else {
  457. s << p.m_size;
  458. }
  459. for( uint i = 0; i < p.m_size; i++ ) {
  460. s << p.m_buffer[i];
  461. }
  462. return s;
  463. }
  464. */
  465. // Array enumerator.
  466. typedef uint PseudoIndex;
  467. PseudoIndex start() const { return 0; }
  468. bool isDone(const PseudoIndex & i) const { nvDebugCheck(i <= this->m_size); return i == this->m_size; };
  469. void advance(PseudoIndex & i) const { nvDebugCheck(i <= this->m_size); i++; }
  470. #if NV_CC_MSVC
  471. T & operator[]( const PseudoIndexWrapper & i ) {
  472. return m_buffer[i(this)];
  473. }
  474. const T & operator[]( const PseudoIndexWrapper & i ) const {
  475. return m_buffer[i(this)];
  476. }
  477. #endif
  478. /// Swap the members of this vector and the given vector.
  479. friend void swap(Array<T> & a, Array<T> & b)
  480. {
  481. swap(a.m_buffer, b.m_buffer);
  482. swap(a.m_size, b.m_size);
  483. swap(a.m_buffer_size, b.m_buffer_size);
  484. }
  485. private:
  486. /// Change buffer size.
  487. void allocate( uint rsize )
  488. {
  489. m_buffer_size = rsize;
  490. // free the buffer.
  491. if( m_buffer_size == 0 ) {
  492. if( m_buffer ) {
  493. mem::free( m_buffer );
  494. m_buffer = NULL;
  495. }
  496. }
  497. // realloc the buffer
  498. else {
  499. if( m_buffer ) m_buffer = (T *) mem::realloc( m_buffer, sizeof(T) * m_buffer_size );
  500. else m_buffer = (T *) mem::malloc( sizeof(T) * m_buffer_size );
  501. }
  502. }
  503. private:
  504. T * m_buffer;
  505. uint m_size;
  506. uint m_buffer_size;
  507. };
  508. /** Thatcher Ulrich's hash table.
  509. *
  510. * Hash table, linear probing, internal chaining. One
  511. * interesting/nice thing about this implementation is that the table
  512. * itself is a flat chunk of memory containing no pointers, only
  513. * relative indices. If the key and value types of the hash contain
  514. * no pointers, then the hash can be serialized using raw IO. Could
  515. * come in handy.
  516. *
  517. * Never shrinks, unless you explicitly clear() it. Expands on
  518. * demand, though. For best results, if you know roughly how big your
  519. * table will be, default it to that size when you create it.
  520. */
  521. template<typename T, typename U, typename hash_functor = hash<T> >
  522. class NVCORE_CLASS HashMap
  523. {
  524. NV_FORBID_COPY(HashMap)
  525. public:
  526. /// Default ctor.
  527. HashMap() : entry_count(0), size_mask(-1), table(NULL) { }
  528. /// Ctor with size hint.
  529. explicit HashMap(int size_hint) : entry_count(0), size_mask(-1), table(NULL) { setCapacity(size_hint); }
  530. /// Dtor.
  531. ~HashMap() { clear(); }
  532. /// Set a new or existing value under the key, to the value.
  533. void set(const T& key, const U& value)
  534. {
  535. int index = findIndex(key);
  536. if (index >= 0)
  537. {
  538. E(index).value = value;
  539. return;
  540. }
  541. // Entry under key doesn't exist.
  542. add(key, value);
  543. }
  544. /// Add a new value to the hash table, under the specified key.
  545. void add(const T& key, const U& value)
  546. {
  547. nvCheck(findIndex(key) == -1);
  548. checkExpand();
  549. nvCheck(table != NULL);
  550. entry_count++;
  551. const uint hash_value = hash_functor()(key);
  552. const int index = hash_value & size_mask;
  553. Entry * natural_entry = &(E(index));
  554. if (natural_entry->isEmpty())
  555. {
  556. // Put the new entry in.
  557. new (natural_entry) Entry(key, value, -1, hash_value);
  558. }
  559. else
  560. {
  561. // Find a blank spot.
  562. int blank_index = index;
  563. for (;;)
  564. {
  565. blank_index = (blank_index + 1) & size_mask;
  566. if (E(blank_index).isEmpty()) break; // found it
  567. }
  568. Entry * blank_entry = &E(blank_index);
  569. if (int(natural_entry->hash_value & size_mask) == index)
  570. {
  571. // Collision. Link into this chain.
  572. // Move existing list head.
  573. new (blank_entry) Entry(*natural_entry); // placement new, copy ctor
  574. // Put the new info in the natural entry.
  575. natural_entry->key = key;
  576. natural_entry->value = value;
  577. natural_entry->next_in_chain = blank_index;
  578. natural_entry->hash_value = hash_value;
  579. }
  580. else
  581. {
  582. // Existing entry does not naturally
  583. // belong in this slot. Existing
  584. // entry must be moved.
  585. // Find natural location of collided element (i.e. root of chain)
  586. int collided_index = natural_entry->hash_value & size_mask;
  587. for (;;)
  588. {
  589. Entry * e = &E(collided_index);
  590. if (e->next_in_chain == index)
  591. {
  592. // Here's where we need to splice.
  593. new (blank_entry) Entry(*natural_entry);
  594. e->next_in_chain = blank_index;
  595. break;
  596. }
  597. collided_index = e->next_in_chain;
  598. nvCheck(collided_index >= 0 && collided_index <= size_mask);
  599. }
  600. // Put the new data in the natural entry.
  601. natural_entry->key = key;
  602. natural_entry->value = value;
  603. natural_entry->hash_value = hash_value;
  604. natural_entry->next_in_chain = -1;
  605. }
  606. }
  607. }
  608. /// Remove the first value under the specified key.
  609. bool remove(const T& key)
  610. {
  611. if (table == NULL)
  612. {
  613. return false;
  614. }
  615. int index = findIndex(key);
  616. if (index < 0)
  617. {
  618. return false;
  619. }
  620. Entry * entry = &E(index);
  621. if( entry->isEndOfChain() ) {
  622. entry->clear();
  623. }
  624. else {
  625. // Get next entry.
  626. Entry & next_entry = E(entry->next_in_chain);
  627. // Copy next entry in this place.
  628. new (entry) Entry(next_entry);
  629. next_entry.clear();
  630. }
  631. entry_count--;
  632. return true;
  633. }
  634. /// Remove all entries from the hash table.
  635. void clear()
  636. {
  637. if (table != NULL)
  638. {
  639. // Delete the entries.
  640. for (int i = 0, n = size_mask; i <= n; i++)
  641. {
  642. Entry * e = &E(i);
  643. if (e->isEmpty() == false)
  644. {
  645. e->clear();
  646. }
  647. }
  648. mem::free(table);
  649. table = NULL;
  650. entry_count = 0;
  651. size_mask = -1;
  652. }
  653. }
  654. /// Returns true if the hash is empty.
  655. bool isEmpty() const
  656. {
  657. return table == NULL || entry_count == 0;
  658. }
  659. /** Retrieve the value under the given key.
  660. *
  661. * If there's no value under the key, then return false and leave
  662. * *value alone.
  663. *
  664. * If there is a value, return true, and set *value to the entry's
  665. * value.
  666. *
  667. * If value == NULL, return true or false according to the
  668. * presence of the key, but don't touch *value.
  669. */
  670. bool get(const T& key, U* value = NULL) const
  671. {
  672. int index = findIndex(key);
  673. if (index >= 0)
  674. {
  675. if (value) {
  676. *value = E(index).value; // take care with side-effects!
  677. }
  678. return true;
  679. }
  680. return false;
  681. }
  682. /// Determine if the given key is contained in the hash.
  683. bool contains(const T & key) const
  684. {
  685. return get(key);
  686. }
  687. /// Number of entries in the hash.
  688. int size()
  689. {
  690. return entry_count;
  691. }
  692. /// Number of entries in the hash.
  693. int count()
  694. {
  695. return size();
  696. }
  697. /**
  698. * Resize the hash table to fit one more entry. Often this
  699. * doesn't involve any action.
  700. */
  701. void checkExpand()
  702. {
  703. if (table == NULL) {
  704. // Initial creation of table. Make a minimum-sized table.
  705. setRawCapacity(16);
  706. }
  707. else if (entry_count * 3 > (size_mask + 1) * 2) {
  708. // Table is more than 2/3rds full. Expand.
  709. setRawCapacity(entry_count * 2);
  710. }
  711. }
  712. /// Hint the bucket count to >= n.
  713. void resize(int n)
  714. {
  715. // Not really sure what this means in relation to
  716. // STLport's hash_map... they say they "increase the
  717. // bucket count to at least n" -- but does that mean
  718. // their real capacity after resize(n) is more like
  719. // n*2 (since they do linked-list chaining within
  720. // buckets?).
  721. setCapacity(n);
  722. }
  723. /**
  724. * Size the hash so that it can comfortably contain the given
  725. * number of elements. If the hash already contains more
  726. * elements than new_size, then this may be a no-op.
  727. */
  728. void setCapacity(int new_size)
  729. {
  730. int new_raw_size = (new_size * 3) / 2;
  731. if (new_raw_size < size()) { return; }
  732. setRawCapacity(new_raw_size);
  733. }
  734. /// Behaves much like std::pair.
  735. struct Entry
  736. {
  737. int next_in_chain; // internal chaining for collisions
  738. uint hash_value; // avoids recomputing. Worthwhile?
  739. T key;
  740. U value;
  741. Entry() : next_in_chain(-2) {}
  742. Entry(const Entry& e)
  743. : next_in_chain(e.next_in_chain), hash_value(e.hash_value), key(e.key), value(e.value)
  744. {
  745. }
  746. Entry(const T& k, const U& v, int next, int hash)
  747. : next_in_chain(next), hash_value(hash), key(k), value(v)
  748. {
  749. }
  750. bool isEmpty() const { return next_in_chain == -2; }
  751. bool isEndOfChain() const { return next_in_chain == -1; }
  752. void clear()
  753. {
  754. key.~T(); // placement delete
  755. value.~U(); // placement delete
  756. next_in_chain = -2;
  757. }
  758. };
  759. // HashMap enumerator.
  760. typedef int PseudoIndex;
  761. PseudoIndex start() const { PseudoIndex i = 0; findNext(i); return i; }
  762. bool isDone(const PseudoIndex & i) const { nvDebugCheck(i <= size_mask+1); return i == size_mask+1; };
  763. void advance(PseudoIndex & i) const { nvDebugCheck(i <= size_mask+1); i++; findNext(i); }
  764. #if NV_CC_GNUC
  765. Entry & operator[]( const PseudoIndex & i ) {
  766. return E(i);
  767. }
  768. const Entry & operator[]( const PseudoIndex & i ) const {
  769. return E(i);
  770. }
  771. #elif NV_CC_MSVC
  772. Entry & operator[]( const PseudoIndexWrapper & i ) {
  773. return E(i(this));
  774. }
  775. const Entry & operator[]( const PseudoIndexWrapper & i ) const {
  776. return E(i(this));
  777. }
  778. #endif
  779. private:
  780. // Find the index of the matching entry. If no match, then return -1.
  781. int findIndex(const T& key) const
  782. {
  783. if (table == NULL) return -1;
  784. uint hash_value = hash_functor()(key);
  785. int index = hash_value & size_mask;
  786. const Entry * e = &E(index);
  787. if (e->isEmpty()) return -1;
  788. if (int(e->hash_value & size_mask) != index) return -1; // occupied by a collider
  789. for (;;)
  790. {
  791. nvCheck((e->hash_value & size_mask) == (hash_value & size_mask));
  792. if (e->hash_value == hash_value && e->key == key)
  793. {
  794. // Found it.
  795. return index;
  796. }
  797. nvDebugCheck(! (e->key == key)); // keys are equal, but hash differs!
  798. // Keep looking through the chain.
  799. index = e->next_in_chain;
  800. if (index == -1) break; // end of chain
  801. nvCheck(index >= 0 && index <= size_mask);
  802. e = &E(index);
  803. nvCheck(e->isEmpty() == false);
  804. }
  805. return -1;
  806. }
  807. // Helpers.
  808. Entry & E(int index)
  809. {
  810. nvDebugCheck(table != NULL);
  811. nvDebugCheck(index >= 0 && index <= size_mask);
  812. return table[index];
  813. }
  814. const Entry & E(int index) const
  815. {
  816. nvDebugCheck(table != NULL);
  817. nvDebugCheck(index >= 0 && index <= size_mask);
  818. return table[index];
  819. }
  820. /**
  821. * Resize the hash table to the given size (Rehash the
  822. * contents of the current table). The arg is the number of
  823. * hash table entries, not the number of elements we should
  824. * actually contain (which will be less than this).
  825. */
  826. void setRawCapacity(int new_size)
  827. {
  828. if (new_size <= 0) {
  829. // Special case.
  830. clear();
  831. return;
  832. }
  833. // Force new_size to be a power of two.
  834. new_size = nextPowerOfTwo(new_size);
  835. HashMap<T, U, hash_functor> new_hash;
  836. new_hash.table = (Entry *) mem::malloc(sizeof(Entry) * new_size);
  837. nvDebugCheck(new_hash.table != NULL);
  838. new_hash.entry_count = 0;
  839. new_hash.size_mask = new_size - 1;
  840. for (int i = 0; i < new_size; i++)
  841. {
  842. new_hash.E(i).next_in_chain = -2; // mark empty
  843. }
  844. // Copy stuff to new_hash
  845. if (table != NULL)
  846. {
  847. for (int i = 0, n = size_mask; i <= n; i++)
  848. {
  849. Entry * e = &E(i);
  850. if (e->isEmpty() == false)
  851. {
  852. // Insert old entry into new hash.
  853. new_hash.add(e->key, e->value);
  854. e->clear(); // placement delete of old element
  855. }
  856. }
  857. // Delete our old data buffer.
  858. mem::free(table);
  859. }
  860. // Steal new_hash's data.
  861. entry_count = new_hash.entry_count;
  862. size_mask = new_hash.size_mask;
  863. table = new_hash.table;
  864. new_hash.entry_count = 0;
  865. new_hash.size_mask = -1;
  866. new_hash.table = NULL;
  867. }
  868. // Move the enumerator to the next valid element.
  869. void findNext(PseudoIndex & i) const {
  870. while (i <= size_mask && E(i).isEmpty()) {
  871. i++;
  872. }
  873. }
  874. int entry_count;
  875. int size_mask;
  876. Entry * table;
  877. };
  878. } // nv namespace
  879. #endif // NV_CORE_CONTAINER_H