/External/Mysql-5.0/include/atomic/generic-msvc.h

http://awoe.googlecode.com/ · C++ Header · 115 lines · 60 code · 12 blank · 43 comment · 3 complexity · f0de77b830c5e76ccf34d5b6b8535394 MD5 · raw file

  1. /* Copyright (C) 2006-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
  2. This program is free software; you can redistribute it and/or modify
  3. it under the terms of the GNU General Public License as published by
  4. the Free Software Foundation; version 2 of the License.
  5. This program is distributed in the hope that it will be useful,
  6. but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. GNU General Public License for more details.
  9. You should have received a copy of the GNU General Public License
  10. along with this program; if not, write to the Free Software
  11. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
  12. #ifndef _atomic_h_cleanup_
  13. #define _atomic_h_cleanup_ "atomic/generic-msvc.h"
  14. /*
  15. We don't implement anything specific for MY_ATOMIC_MODE_DUMMY, always use
  16. intrinsics.
  17. 8 and 16-bit atomics are not implemented, but it can be done if necessary.
  18. */
  19. /*
  20. x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate
  21. function calls to kernel32 instead, even in the optimized build.
  22. We force intrinsics as described in MSDN documentation for
  23. _InterlockedCompareExchange.
  24. */
  25. #ifdef _M_IX86
  26. #if (_MSC_VER >= 1500)
  27. #include <intrin.h>
  28. #else
  29. C_MODE_START
  30. /*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/
  31. LONG _InterlockedExchange (LONG volatile *Target,LONG Value);
  32. LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp);
  33. LONG _InterlockedExchangeAdd (LONG volatile *Addend, LONG Value);
  34. C_MODE_END
  35. #pragma intrinsic(_InterlockedExchangeAdd)
  36. #pragma intrinsic(_InterlockedCompareExchange)
  37. #pragma intrinsic(_InterlockedExchange)
  38. #endif
  39. #define InterlockedExchange _InterlockedExchange
  40. #define InterlockedExchangeAdd _InterlockedExchangeAdd
  41. #define InterlockedCompareExchange _InterlockedCompareExchange
  42. /*
  43. No need to do something special for InterlockedCompareExchangePointer
  44. as it is a #define to InterlockedCompareExchange. The same applies to
  45. InterlockedExchangePointer.
  46. */
  47. #endif /*_M_IX86*/
  48. #define MY_ATOMIC_MODE "msvc-intrinsics"
  49. #define IL_EXCHG_ADD32(X,Y) InterlockedExchangeAdd((volatile LONG *)(X),(Y))
  50. #define IL_COMP_EXCHG32(X,Y,Z) InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z))
  51. #define IL_COMP_EXCHGptr InterlockedCompareExchangePointer
  52. #define IL_EXCHG32(X,Y) InterlockedExchange((volatile LONG *)(X),(Y))
  53. #define IL_EXCHGptr InterlockedExchangePointer
  54. #define make_atomic_add_body(S) \
  55. v= IL_EXCHG_ADD ## S (a, v)
  56. #define make_atomic_cas_body(S) \
  57. int ## S initial_cmp= *cmp; \
  58. int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \
  59. if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a;
  60. #define make_atomic_swap_body(S) \
  61. v= IL_EXCHG ## S (a, v)
  62. #define make_atomic_load_body(S) \
  63. ret= 0; /* avoid compiler warning */ \
  64. ret= IL_COMP_EXCHG ## S (a, ret, ret);
  65. /*
  66. my_yield_processor (equivalent of x86 PAUSE instruction) should be used
  67. to improve performance on hyperthreaded CPUs. Intel recommends to use it in
  68. spin loops also on non-HT machines to reduce power consumption (see e.g
  69. http://softwarecommunity.intel.com/articles/eng/2004.htm)
  70. Running benchmarks for spinlocks implemented with InterlockedCompareExchange
  71. and YieldProcessor shows that much better performance is achieved by calling
  72. YieldProcessor in a loop - that is, yielding longer. On Intel boxes setting
  73. loop count in the range 200-300 brought best results.
  74. */
  75. #ifndef YIELD_LOOPS
  76. #define YIELD_LOOPS 200
  77. #endif
  78. static __inline int my_yield_processor()
  79. {
  80. int i;
  81. for(i=0; i<YIELD_LOOPS; i++)
  82. {
  83. #if (_MSC_VER <= 1310)
  84. /* On older compilers YieldProcessor is not available, use inline assembly*/
  85. __asm { rep nop }
  86. #else
  87. YieldProcessor();
  88. #endif
  89. }
  90. return 1;
  91. }
  92. #define LF_BACKOFF my_yield_processor()
  93. #else /* cleanup */
  94. #undef IL_EXCHG_ADD32
  95. #undef IL_COMP_EXCHG32
  96. #undef IL_COMP_EXCHGptr
  97. #undef IL_EXCHG32
  98. #undef IL_EXCHGptr
  99. #endif