PageRenderTime 30ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 0ms

/mordor/atomic.h

http://github.com/mozy/mordor
C Header | 286 lines | 268 code | 16 blank | 2 comment | 44 complexity | 376fc7caa902f242126ed3cd6a679935 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. #ifndef __MORDOR_ATOMIC_H__
  2. #define __MORDOR_ATOMIC_H__
  3. #include <boost/utility/enable_if.hpp>
  4. #include "predef.h"
  5. #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1 && defined(__arm__))
  6. #include <boost/smart_ptr/detail/sp_counted_base_spin.hpp>
  7. #endif
  8. #ifdef OSX
  9. #include <libkern/OSAtomic.h>
  10. #endif
  11. #ifdef _MSC_VER
  12. #include <intrin.h>
  13. #endif
  14. namespace Mordor {
  15. #ifdef WINDOWS
  16. template <class T>
  17. typename boost::enable_if_c<sizeof(T) == sizeof(LONG), T>::type
  18. atomicDecrement(volatile T& t)
  19. {
  20. return InterlockedDecrement((volatile LONG*)&t);
  21. }
  22. template <class T>
  23. typename boost::enable_if_c<sizeof(T) == sizeof(LONG), T>::type
  24. atomicIncrement(volatile T& t)
  25. {
  26. return InterlockedIncrement((volatile LONG*)&t);
  27. }
  28. template <class T>
  29. typename boost::enable_if_c<sizeof(T) == sizeof(LONG), T>::type
  30. atomicAdd(volatile T& t, T v)
  31. {
  32. return InterlockedExchangeAdd((volatile LONG*)&t, (LONG)v) + v;
  33. }
  34. template <class T>
  35. typename boost::enable_if_c<sizeof(T) == sizeof(LONG), T>::type
  36. atomicCompareAndSwap(volatile T& t, T newvalue, T comparand)
  37. {
  38. return InterlockedCompareExchange((volatile LONG*)&t, (LONG)newvalue, (LONG)comparand);
  39. }
  40. template <class T>
  41. typename boost::enable_if_c<sizeof(T) == sizeof(LONG), T>::type
  42. atomicSwap(volatile T& t, T newvalue)
  43. {
  44. return InterlockedExchange((volatile LONG*)&t, (LONG)newvalue);
  45. }
  46. inline
  47. bool
  48. atomicTestAndSet(volatile void *address, int bit = 0)
  49. {
  50. return !!InterlockedBitTestAndSet((volatile LONG*)address + (bit >> 5), (LONG)(0x80000000 >> (bit & 31)));
  51. }
  52. inline
  53. bool
  54. atomicTestAndClear(volatile void *address, int bit = 0)
  55. {
  56. return !!InterlockedBitTestAndReset((volatile LONG*)address + (bit >> 5), (LONG)(0x80000000 >> (bit & 31)));
  57. }
  58. #ifdef X86_64
  59. template <class T>
  60. typename boost::enable_if_c<sizeof(T) == sizeof(LONGLONG), T>::type
  61. atomicDecrement(volatile T& t)
  62. {
  63. return InterlockedDecrement64((volatile LONGLONG*)&t);
  64. }
  65. template <class T>
  66. typename boost::enable_if_c<sizeof(T) == sizeof(LONGLONG), T>::type
  67. atomicIncrement(volatile T& t)
  68. {
  69. return InterlockedIncrement64((volatile LONGLONG*)&t);
  70. }
  71. template <class T>
  72. typename boost::enable_if_c<sizeof(T) == sizeof(LONGLONG), T>::type
  73. atomicAdd(volatile T& t, T v)
  74. {
  75. return InterlockedExchangeAdd64((volatile LONGLONG*)&t, (LONGLONG)v) + v;
  76. }
  77. template <class T>
  78. typename boost::enable_if_c<sizeof(T) == sizeof(LONGLONG), T>::type
  79. atomicCompareAndSwap(volatile T& t, T newvalue, T comparand)
  80. {
  81. return InterlockedCompareExchange64((volatile LONGLONG*)&t, (LONGLONG)newvalue, (LONGLONG)comparand);
  82. }
  83. template <class T>
  84. typename boost::enable_if_c<sizeof(T) == sizeof(LONGLONG), T>::type
  85. atomicSwap(volatile T& t, T newvalue)
  86. {
  87. return InterlockedExchange64((volatile LONGLONG*)&t, (LONGLONG)newvalue);
  88. }
  89. #endif
  90. #elif defined(OSX)
  91. #include <libkern/OSAtomic.h>
  92. template <class T>
  93. typename boost::enable_if_c<sizeof(T) == sizeof(int32_t), T>::type
  94. atomicDecrement(volatile T &t)
  95. {
  96. return OSAtomicDecrement32Barrier((volatile int32_t *)&t);
  97. }
  98. template <class T>
  99. typename boost::enable_if_c<sizeof(T) == sizeof(int32_t), T>::type
  100. atomicIncrement(volatile T &t)
  101. {
  102. return OSAtomicIncrement32Barrier((volatile int32_t *)&t);
  103. }
  104. template <class T>
  105. typename boost::enable_if_c<sizeof(T) == sizeof(int32_t), T>::type
  106. atomicAdd(volatile T &t, T v)
  107. {
  108. return OSAtomicAdd32Barrier((int32_t)v, (volatile int32_t *)&t);
  109. }
  110. template <class T>
  111. typename boost::enable_if_c<sizeof(T) == sizeof(int32_t), T>::type
  112. atomicCompareAndSwap(volatile T &t, T newvalue, T comparand)
  113. {
  114. return OSAtomicCompareAndSwap32Barrier((int32_t)comparand, (int32_t)newvalue, (volatile int32_t *)&t) ? comparand : t;
  115. }
  116. template <class T>
  117. typename boost::enable_if_c<sizeof(T) == sizeof(int32_t), T>::type
  118. atomicSwap(volatile T &t, T newvalue)
  119. {
  120. int32_t comparand = (int32_t)t;
  121. while (!OSAtomicCompareAndSwap32Barrier((int32_t)comparand, (int32_t)newvalue, (volatile int32_t *) &t))
  122. comparand = (int32_t)t;
  123. return comparand;
  124. }
  125. inline
  126. bool
  127. atomicTestAndSet(volatile void *addr, int bit = 0)
  128. {
  129. return OSAtomicTestAndSetBarrier((uint32_t)bit, addr);
  130. }
  131. template <class T>
  132. bool
  133. atomicTestAndClear(volatile void *addr, int bit = 0)
  134. {
  135. return OSAtomicTestAndClearBarrier((uint32_t)bit, addr);
  136. }
  137. #ifdef X86_64
  138. template <class T>
  139. typename boost::enable_if_c<sizeof(T) == sizeof(int64_t), T>::type
  140. atomicDecrement(volatile T &t)
  141. {
  142. return OSAtomicDecrement64Barrier((volatile int64_t *)&t);
  143. }
  144. template <class T>
  145. typename boost::enable_if_c<sizeof(T) == sizeof(int64_t), T>::type
  146. atomicIncrement(volatile T &t)
  147. {
  148. return OSAtomicIncrement64Barrier((volatile int64_t *)&t);
  149. }
  150. template <class T>
  151. typename boost::enable_if_c<sizeof(T) == sizeof(int64_t), T>::type
  152. atomicAdd(volatile T &t, T v)
  153. {
  154. return OSAtomicAdd64Barrier((int64_t)v, (volatile int64_t *)&t);
  155. }
  156. template <class T>
  157. typename boost::enable_if_c<sizeof(T) == sizeof(int64_t), T>::type
  158. atomicCompareAndSwap(volatile T &t, T newvalue, T comparand)
  159. {
  160. return OSAtomicCompareAndSwap64Barrier((int64_t)comparand, (int64_t)newvalue, (volatile int64_t *)&t) ? comparand : t;
  161. }
  162. template <class T>
  163. typename boost::enable_if_c<sizeof(T) == sizeof(int64_t), T>::type
  164. atomicSwap(volatile T &t, T newvalue)
  165. {
  166. int64_t comparand = (int64_t)t;
  167. while (!OSAtomicCompareAndSwap64Barrier((int64_t)comparand, (int64_t)newvalue, (volatile int64_t *)&t))
  168. comparand = (int64_t)t;
  169. return comparand;
  170. }
  171. #endif
  172. #elif ((__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 1) && !defined(__arm__))
  173. template <class T>
  174. typename boost::enable_if_c<sizeof(T) <= sizeof(void *), T>::type
  175. atomicDecrement(volatile T& t) { return __sync_sub_and_fetch(&t, 1); }
  176. template <class T>
  177. typename boost::enable_if_c<sizeof(T) <= sizeof(void *), T>::type
  178. atomicIncrement(volatile T& t) { return __sync_add_and_fetch(&t, 1); }
  179. template <class T>
  180. typename boost::enable_if_c<sizeof(T) <= sizeof(void *), T>::type
  181. atomicAdd(volatile T& t, T v) { return __sync_add_and_fetch(&t, v); }
  182. template <class T>
  183. typename boost::enable_if_c<sizeof(T) <= sizeof(void *), T>::type
  184. atomicCompareAndSwap(volatile T &t, T newvalue, T comparand)
  185. { return __sync_val_compare_and_swap((volatile T *)&t, comparand, newvalue); }
  186. template <class T>
  187. typename boost::enable_if_c<sizeof(T) <= sizeof(void *), T>::type
  188. atomicSwap(volatile T &t, T newvalue)
  189. { return __sync_lock_test_and_set(&t, newvalue); }
  190. inline
  191. bool
  192. atomicTestAndSet(volatile void *address, int bit = 0)
  193. {
  194. int mask = (1 << (sizeof(int) * 8 - 1)) >> (bit & (sizeof(int) * 8 - 1));
  195. volatile int &target = *(volatile int *)((intptr_t)address >> (sizeof(int) >> 3));
  196. int oldvalue, newvalue;
  197. do {
  198. oldvalue = target;
  199. newvalue = oldvalue | mask;
  200. } while (newvalue != atomicCompareAndSwap(target, newvalue, oldvalue));
  201. return !!(oldvalue & mask);
  202. }
  203. inline
  204. bool
  205. atomicTestAndClear(volatile void *address, int bit = 0)
  206. {
  207. int mask = (1 << (sizeof(int) * 8 - 1)) >> (bit & (sizeof(int) * 8 - 1));
  208. volatile int &target = *(volatile int *)((intptr_t)address >> (sizeof(int) >> 3));
  209. int oldvalue, newvalue;
  210. do {
  211. oldvalue = target;
  212. newvalue = oldvalue & ~mask;
  213. } while (newvalue != atomicCompareAndSwap(target, newvalue, oldvalue));
  214. return !!(oldvalue & mask);
  215. }
  216. #elif (__GNUC__ == 4 && __GNUC_MINOR__ == 0) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
  217. template <class T>
  218. typename boost::enable_if_c<sizeof(T) <= sizeof(_Atomic_word), T>::type
  219. atomicDecrement(volatile T& t) { return __gnu_cxx::__exchange_and_add((_Atomic_word*)_&t, -1) - 1; }
  220. template <class T>
  221. typename boost::enable_if_c<sizeof(T) <= sizeof(_Atomic_word), T>::type
  222. atomicIncrement(volatile T& t) { return __gnu_cxx::__exchange_and_add((_Atomic_word*)_&t, 1) + 1; }
  223. template <class T>
  224. typename boost::enable_if_c<sizeof(T) <= sizeof(_Atomic_word), T>::type
  225. atomicAdd(volatile T& t, T v) { return __gnu_cxx::__exchange_and_add((_Atomic_word*)_&t, v) + v; }
  226. #elif (__GNUC__ == 4 && __GNUC_MINOR__ >= 1 && defined(__arm__))
  227. template <class T>
  228. typename boost::enable_if_c<sizeof(T) <= sizeof(int), T>::type
  229. atomicAdd(volatile T& t, T v) { return boost::detail::atomic_exchange_and_add((int *)&t, (int)v) + v; }
  230. template <class T>
  231. typename boost::enable_if_c<sizeof(T) <= sizeof(int), T>::type
  232. atomicIncrement(volatile T& t) { return atomicAdd(t, (T)1); }
  233. template <class T>
  234. typename boost::enable_if_c<sizeof(T) <= sizeof(int), T>::type
  235. atomicCompareAndSwap(volatile T &t, T newvalue, T comparand) {
  236. ::boost::detail::spinlock_pool<1>::scoped_lock lock((void *)&t);
  237. T oldvalue = t;
  238. if (oldvalue == comparand)
  239. t = newvalue;
  240. return oldvalue;
  241. }
  242. #endif
  243. template <typename T>
  244. class Atomic
  245. {
  246. public:
  247. Atomic(T val = 0) : m_val(val) { }
  248. operator T(void) const { return atomicAdd(m_val, T(0)); }
  249. T operator +=(T v) { return atomicAdd(m_val, v); }
  250. T operator -=(T v) { return atomicAdd(m_val, -v); }
  251. T operator ++(void) { return atomicIncrement(m_val); }
  252. T operator --(void) { return atomicDecrement(m_val); }
  253. // the postfix operators couild be a little more efficient if we
  254. // created atomicPost(Increment,Decrement) functions, but meh
  255. T operator ++(int) { return atomicIncrement(m_val) - 1; }
  256. T operator --(int) { return atomicDecrement(m_val) + 1; }
  257. private:
  258. mutable T m_val;
  259. };
  260. #ifdef _MSC_VER
  261. inline void compilerReadWriteBarrier() { _ReadWriteBarrier(); }
  262. inline void compilerReadBarrier() { _ReadBarrier(); }
  263. inline void compilerWriteBarrier() { _WriteBarrier(); }
  264. #elif defined (__GNUC__)
  265. inline void compilerReadWriteBarrier() { __asm__ __volatile__ ("" ::: "memory"); }
  266. inline void compilerReadBarrier() { compilerReadWriteBarrier(); }
  267. inline void compilerWriteBarrier() { compilerReadWriteBarrier(); }
  268. #endif
  269. }
  270. #endif