PageRenderTime 126ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/arm/include/asm/spinlock.h

https://bitbucket.org/sammyz/iscream_thunderc-2.6.35-rebase
C++ Header | 237 lines | 166 code | 39 blank | 32 comment | 13 complexity | 81c0c89c04dd75c8fee2136b6c0a0434 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. #ifndef __ASM_SPINLOCK_H
  2. #define __ASM_SPINLOCK_H
  3. #if __LINUX_ARM_ARCH__ < 6
  4. #error SMP not supported on pre-ARMv6 CPUs
  5. #endif
  6. static inline void dsb_sev(void)
  7. {
  8. #if __LINUX_ARM_ARCH__ >= 7
  9. __asm__ __volatile__ (
  10. "dsb\n"
  11. "sev"
  12. );
  13. #elif defined(CONFIG_CPU_32v6K)
  14. __asm__ __volatile__ (
  15. "mcr p15, 0, %0, c7, c10, 4\n"
  16. "sev"
  17. : : "r" (0)
  18. );
  19. #endif
  20. }
  21. /*
  22. * ARMv6 Spin-locking.
  23. *
  24. * We exclusively read the old value. If it is zero, we may have
  25. * won the lock, so we try exclusively storing it. A memory barrier
  26. * is required after we get a lock, and before we release it, because
  27. * V6 CPUs are assumed to have weakly ordered memory.
  28. *
  29. * Unlocked value: 0
  30. * Locked value: 1
  31. */
  32. #define arch_spin_is_locked(x) ((x)->lock != 0)
  33. #define arch_spin_unlock_wait(lock) \
  34. do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
  35. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  36. static inline void arch_spin_lock(arch_spinlock_t *lock)
  37. {
  38. unsigned long tmp;
  39. __asm__ __volatile__(
  40. "1: ldrex %0, [%1]\n"
  41. " teq %0, #0\n"
  42. #ifdef CONFIG_CPU_32v6K
  43. " wfene\n"
  44. #endif
  45. " strexeq %0, %2, [%1]\n"
  46. " teqeq %0, #0\n"
  47. " bne 1b"
  48. : "=&r" (tmp)
  49. : "r" (&lock->lock), "r" (1)
  50. : "cc");
  51. smp_mb();
  52. }
  53. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  54. {
  55. unsigned long tmp;
  56. __asm__ __volatile__(
  57. " ldrex %0, [%1]\n"
  58. " teq %0, #0\n"
  59. " strexeq %0, %2, [%1]"
  60. : "=&r" (tmp)
  61. : "r" (&lock->lock), "r" (1)
  62. : "cc");
  63. if (tmp == 0) {
  64. smp_mb();
  65. return 1;
  66. } else {
  67. return 0;
  68. }
  69. }
  70. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  71. {
  72. smp_mb();
  73. __asm__ __volatile__(
  74. " str %1, [%0]\n"
  75. :
  76. : "r" (&lock->lock), "r" (0)
  77. : "cc");
  78. dsb_sev();
  79. }
  80. /*
  81. * RWLOCKS
  82. *
  83. *
  84. * Write locks are easy - we just set bit 31. When unlocking, we can
  85. * just write zero since the lock is exclusively held.
  86. */
  87. static inline void arch_write_lock(arch_rwlock_t *rw)
  88. {
  89. unsigned long tmp;
  90. __asm__ __volatile__(
  91. "1: ldrex %0, [%1]\n"
  92. " teq %0, #0\n"
  93. #ifdef CONFIG_CPU_32v6K
  94. " wfene\n"
  95. #endif
  96. " strexeq %0, %2, [%1]\n"
  97. " teq %0, #0\n"
  98. " bne 1b"
  99. : "=&r" (tmp)
  100. : "r" (&rw->lock), "r" (0x80000000)
  101. : "cc");
  102. smp_mb();
  103. }
  104. static inline int arch_write_trylock(arch_rwlock_t *rw)
  105. {
  106. unsigned long tmp;
  107. __asm__ __volatile__(
  108. "1: ldrex %0, [%1]\n"
  109. " teq %0, #0\n"
  110. " strexeq %0, %2, [%1]"
  111. : "=&r" (tmp)
  112. : "r" (&rw->lock), "r" (0x80000000)
  113. : "cc");
  114. if (tmp == 0) {
  115. smp_mb();
  116. return 1;
  117. } else {
  118. return 0;
  119. }
  120. }
  121. static inline void arch_write_unlock(arch_rwlock_t *rw)
  122. {
  123. smp_mb();
  124. __asm__ __volatile__(
  125. "str %1, [%0]\n"
  126. :
  127. : "r" (&rw->lock), "r" (0)
  128. : "cc");
  129. dsb_sev();
  130. }
  131. /* write_can_lock - would write_trylock() succeed? */
  132. #define arch_write_can_lock(x) ((x)->lock == 0)
  133. /*
  134. * Read locks are a bit more hairy:
  135. * - Exclusively load the lock value.
  136. * - Increment it.
  137. * - Store new lock value if positive, and we still own this location.
  138. * If the value is negative, we've already failed.
  139. * - If we failed to store the value, we want a negative result.
  140. * - If we failed, try again.
  141. * Unlocking is similarly hairy. We may have multiple read locks
  142. * currently active. However, we know we won't have any write
  143. * locks.
  144. */
  145. static inline void arch_read_lock(arch_rwlock_t *rw)
  146. {
  147. unsigned long tmp, tmp2;
  148. __asm__ __volatile__(
  149. "1: ldrex %0, [%2]\n"
  150. " adds %0, %0, #1\n"
  151. " strexpl %1, %0, [%2]\n"
  152. #ifdef CONFIG_CPU_32v6K
  153. " wfemi\n"
  154. #endif
  155. " rsbpls %0, %1, #0\n"
  156. " bmi 1b"
  157. : "=&r" (tmp), "=&r" (tmp2)
  158. : "r" (&rw->lock)
  159. : "cc");
  160. smp_mb();
  161. }
  162. static inline void arch_read_unlock(arch_rwlock_t *rw)
  163. {
  164. unsigned long tmp, tmp2;
  165. smp_mb();
  166. __asm__ __volatile__(
  167. "1: ldrex %0, [%2]\n"
  168. " sub %0, %0, #1\n"
  169. " strex %1, %0, [%2]\n"
  170. " teq %1, #0\n"
  171. " bne 1b"
  172. : "=&r" (tmp), "=&r" (tmp2)
  173. : "r" (&rw->lock)
  174. : "cc");
  175. if (tmp == 0)
  176. dsb_sev();
  177. }
  178. static inline int arch_read_trylock(arch_rwlock_t *rw)
  179. {
  180. unsigned long tmp, tmp2 = 1;
  181. __asm__ __volatile__(
  182. "1: ldrex %0, [%2]\n"
  183. " adds %0, %0, #1\n"
  184. " strexpl %1, %0, [%2]\n"
  185. : "=&r" (tmp), "+r" (tmp2)
  186. : "r" (&rw->lock)
  187. : "cc");
  188. smp_mb();
  189. return tmp2 == 0;
  190. }
  191. /* read_can_lock - would read_trylock() succeed? */
  192. #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
  193. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  194. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  195. #define arch_spin_relax(lock) cpu_relax()
  196. #define arch_read_relax(lock) cpu_relax()
  197. #define arch_write_relax(lock) cpu_relax()
  198. #endif /* __ASM_SPINLOCK_H */