/branches/1.10/ThirdParty/SmartPtr/shared_any.cpp

# · C++ · 240 lines · 189 code · 29 blank · 22 comment · 24 complexity · 4ee8e3bbd96d076ca72f561280a45361 MD5 · raw file

  1. //+---------------------------------------------------------------------------
  2. //
  3. // Copyright ( C ) Microsoft, 2002.
  4. //
  5. // File: shared_any.cpp
  6. //
  7. // Contents: pool allocator for reference counts
  8. //
  9. // Classes: ref_count_allocator and helpers
  10. //
  11. // Functions:
  12. //
  13. // Author: Eric Niebler ( ericne@microsoft.com )
  14. //
  15. //----------------------------------------------------------------------------
  16. #ifdef _MT
  17. # ifndef _WIN32_WINNT
  18. # define _WIN32_WINNT 0x0403
  19. # endif
  20. # include <windows.h>
  21. # include <msi.h>
  22. #endif
  23. #include <cassert>
  24. #include <functional> // for std::less
  25. #include <algorithm> // for std::swap
  26. #include "shared_any.h"
  27. #include "scoped_any.h"
  28. namespace detail
  29. {
  30. struct critsec
  31. {
  32. #ifdef _MT
  33. CRITICAL_SECTION m_cs;
  34. critsec()
  35. {
  36. InitializeCriticalSectionAndSpinCount( &m_cs, 4000 );
  37. }
  38. ~critsec()
  39. {
  40. DeleteCriticalSection( &m_cs );
  41. }
  42. void enter()
  43. {
  44. EnterCriticalSection( &m_cs );
  45. }
  46. void leave()
  47. {
  48. LeaveCriticalSection( &m_cs );
  49. }
  50. #endif
  51. };
  52. namespace
  53. {
  54. critsec g_critsec;
  55. }
  56. struct lock
  57. {
  58. #ifdef _MT
  59. critsec & m_cs;
  60. explicit lock( critsec & cs )
  61. : m_cs(cs)
  62. {
  63. m_cs.enter();
  64. }
  65. ~lock()
  66. {
  67. m_cs.leave();
  68. }
  69. #else
  70. explicit lock( critsec & )
  71. {
  72. }
  73. #endif
  74. private:
  75. lock( lock const & );
  76. lock & operator=( lock const & );
  77. };
  78. struct ref_count_block
  79. {
  80. static long const s_sizeBlock = 256;
  81. short m_free_list; // offset to start of freelist
  82. short m_available; // count of refcounts in this block that are available
  83. long m_refcounts[ s_sizeBlock ];
  84. ref_count_block()
  85. : m_free_list(0), m_available(s_sizeBlock)
  86. {
  87. for( long l=0; l<s_sizeBlock; ++l )
  88. m_refcounts[l] = l+1;
  89. }
  90. bool empty() const // throw()
  91. {
  92. return s_sizeBlock == m_available;
  93. }
  94. bool full() const // throw()
  95. {
  96. return 0 == m_available;
  97. }
  98. long volatile *alloc( lock & )
  99. {
  100. assert( 0 != m_available );
  101. long *refcount = m_refcounts + m_free_list;
  102. m_free_list = static_cast<short>( *refcount );
  103. --m_available;
  104. return refcount;
  105. }
  106. void free( long volatile *refcount, lock & ) // throw()
  107. {
  108. assert( owns( refcount ) );
  109. *refcount = m_free_list;
  110. m_free_list = static_cast<short>( refcount - m_refcounts );
  111. ++m_available;
  112. }
  113. bool owns( long volatile *refcount ) const // throw()
  114. {
  115. return ! std::less<void*>()( const_cast<long*>( refcount ), const_cast<long*>( m_refcounts ) ) &&
  116. std::less<void*>()( const_cast<long*>( refcount ), const_cast<long*>( m_refcounts ) + s_sizeBlock );
  117. }
  118. };
  119. struct ref_count_allocator::node
  120. {
  121. node *m_next;
  122. node *m_prev;
  123. ref_count_block m_block;
  124. explicit node( node *next=0, node *prev=0 )
  125. : m_next(next), m_prev(prev), m_block()
  126. {
  127. if( m_next )
  128. m_next->m_prev = this;
  129. if( m_prev )
  130. m_prev->m_next = this;
  131. }
  132. };
  133. ref_count_allocator::ref_count_allocator()
  134. : m_list_blocks(0), m_last_alloc(0), m_last_free(0)
  135. {
  136. }
  137. ref_count_allocator::~ref_count_allocator()
  138. {
  139. // Just leak the blocks. It's ok, really.
  140. // If you need to clean up the blocks and
  141. // you are certain that no refcounts are
  142. // outstanding, you can use the finalize()
  143. // method to force deallocation
  144. }
  145. void ref_count_allocator::finalize()
  146. {
  147. lock l( g_critsec );
  148. for( node *next; m_list_blocks; m_list_blocks=next )
  149. {
  150. next = m_list_blocks->m_next;
  151. delete m_list_blocks;
  152. }
  153. m_last_alloc = 0;
  154. m_last_free = 0;
  155. }
  156. long volatile *ref_count_allocator::alloc()
  157. {
  158. lock l( g_critsec );
  159. if( ! m_last_alloc || m_last_alloc->m_block.full() )
  160. {
  161. for( m_last_alloc = m_list_blocks;
  162. m_last_alloc && m_last_alloc->m_block.full();
  163. m_last_alloc = m_last_alloc->m_next );
  164. if( ! m_last_alloc )
  165. {
  166. m_last_alloc = new( std::nothrow ) node( m_list_blocks );
  167. if( ! m_last_alloc )
  168. return 0;
  169. m_list_blocks = m_last_alloc;
  170. }
  171. }
  172. return m_last_alloc->m_block.alloc( l );
  173. }
  174. long volatile *ref_count_allocator::alloc( long val )
  175. {
  176. long volatile *refcount = alloc();
  177. *refcount = val;
  178. return refcount;
  179. }
  180. void ref_count_allocator::free( long volatile *refcount ) // throw()
  181. {
  182. // don't rearrange the order of these locals!
  183. scoped_any<node*,close_delete> scoped_last_free;
  184. lock l( g_critsec );
  185. if( ! m_last_free || ! m_last_free->m_block.owns( refcount ) )
  186. {
  187. for( m_last_free = m_list_blocks;
  188. m_last_free && ! m_last_free->m_block.owns( refcount );
  189. m_last_free = m_last_free->m_next );
  190. }
  191. assert( m_last_free && m_last_free->m_block.owns( refcount ) );
  192. m_last_free->m_block.free( refcount, l );
  193. if( m_last_free != m_list_blocks && m_last_free->m_block.empty() )
  194. {
  195. if( 0 != ( m_last_free->m_prev->m_next = m_last_free->m_next ) )
  196. m_last_free->m_next->m_prev = m_last_free->m_prev;
  197. if( ! m_list_blocks->m_block.empty() )
  198. {
  199. m_last_free->m_next = m_list_blocks;
  200. m_last_free->m_prev = 0;
  201. m_list_blocks->m_prev = m_last_free;
  202. m_list_blocks = m_last_free;
  203. }
  204. else
  205. reset( scoped_last_free, m_last_free ); // deleted after critsec is released
  206. m_last_free = 0;
  207. }
  208. }
  209. // Here is the global reference count allocator.
  210. ref_count_allocator ref_count_allocator::instance;
  211. }