/vendor/gc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86.h.orig

http://github.com/feyeleanor/RubyGoLightly · Unknown · 157 lines · 126 code · 31 blank · 0 comment · 0 complexity · 9e7e90b830f20a149ecb320cbf523804 MD5 · raw file

  1. /*
  2. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  4. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
  5. *
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. *
  16. * Some of the machine specific code was borrowed from our GC distribution.
  17. */
  18. /* The following really assume we have a 486 or better. Unfortunately */
  19. /* gcc doesn't define a suitable feature test macro based on command */
  20. /* line options. */
  21. /* We should perhaps test dynamically. */
  22. #include "../all_aligned_atomic_load_store.h"
  23. /* Real X86 implementations, except for some old WinChips, appear */
  24. /* to enforce ordering between memory operations, EXCEPT that a later */
  25. /* read can pass earlier writes, presumably due to the visible */
  26. /* presence of store buffers. */
  27. /* We ignore both the WinChips, and the fact that the official specs */
  28. /* seem to be much weaker (and arguably too weak to be usable). */
  29. #include "../ordered_except_wr.h"
  30. #include "../test_and_set_t_is_char.h"
  31. #include "../standard_ao_double_t.h"
  32. #if defined(AO_USE_PENTIUM4_INSTRS)
  33. AO_INLINE void
  34. AO_nop_full()
  35. {
  36. __asm__ __volatile__("mfence" : : : "memory");
  37. }
  38. #define AO_HAVE_nop_full
  39. #else
  40. /* We could use the cpuid instruction. But that seems to be slower */
  41. /* than the default implementation based on test_and_set_full. Thus */
  42. /* we omit that bit of misinformation here. */
  43. #endif
  44. /* As far as we can tell, the lfence and sfence instructions are not */
  45. /* currently needed or useful for cached memory accesses. */
  46. /* Really only works for 486 and later */
  47. AO_INLINE AO_t
  48. AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
  49. {
  50. AO_t result;
  51. __asm__ __volatile__ ("lock; xaddl %0, %1" :
  52. "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
  53. : "memory");
  54. return result;
  55. }
  56. #define AO_HAVE_fetch_and_add_full
  57. AO_INLINE unsigned char
  58. AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
  59. {
  60. unsigned char result;
  61. __asm__ __volatile__ ("lock; xaddb %0, %1" :
  62. "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
  63. : "memory");
  64. return result;
  65. }
  66. #define AO_HAVE_char_fetch_and_add_full
  67. AO_INLINE unsigned short
  68. AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
  69. {
  70. unsigned short result;
  71. __asm__ __volatile__ ("lock; xaddw %0, %1" :
  72. "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
  73. : "memory");
  74. return result;
  75. }
  76. #define AO_HAVE_short_fetch_and_add_full
  77. /* Really only works for 486 and later */
  78. AO_INLINE void
  79. AO_or_full (volatile AO_t *p, AO_t incr)
  80. {
  81. __asm__ __volatile__ ("lock; orl %1, %0" :
  82. "=m" (*p) : "r" (incr), "m" (*p) : "memory");
  83. }
  84. #define AO_HAVE_or_full
  85. AO_INLINE AO_TS_VAL_t
  86. AO_test_and_set_full(volatile AO_TS_t *addr)
  87. {
  88. unsigned char oldval;
  89. /* Note: the "xchg" instruction does not need a "lock" prefix */
  90. __asm__ __volatile__("xchgb %0, %1"
  91. : "=q"(oldval), "=m"(*addr)
  92. : "0"(0xff), "m"(*addr) : "memory");
  93. return (AO_TS_VAL_t)oldval;
  94. }
  95. #define AO_HAVE_test_and_set_full
  96. /* Returns nonzero if the comparison succeeded. */
  97. AO_INLINE int
  98. AO_compare_and_swap_full(volatile AO_t *addr,
  99. AO_t old, AO_t new_val)
  100. {
  101. char result;
  102. __asm__ __volatile__("lock; cmpxchgl %3, %0; setz %1"
  103. : "=m"(*addr), "=q"(result)
  104. : "m"(*addr), "r" (new_val), "a"(old) : "memory");
  105. return (int) result;
  106. }
  107. #define AO_HAVE_compare_and_swap_full
  108. /* Returns nonzero if the comparison succeeded. */
  109. /* Really requires at least a Pentium. */
  110. AO_INLINE int
  111. AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
  112. AO_t old_val1, AO_t old_val2,
  113. AO_t new_val1, AO_t new_val2)
  114. {
  115. char result;
  116. register AO_t nv1 asm("%ebx") = new_val1;
  117. /* The above hack seems to avoid a gcc error complaining */
  118. /* that ebx is unavailable. */
  119. __asm__ __volatile__("lock; cmpxchg8b %0; setz %1"
  120. : "=m"(*addr), "=q"(result)
  121. : "m"(*addr), "a" (old_val1), "d" (old_val2),
  122. "b" (nv1), "c" (new_val2) : "memory");
  123. return (int) result;
  124. }
  125. #define AO_HAVE_compare_double_and_swap_double_full
  126. #include "../ao_t_is_int.h"