PageRenderTime 45ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h

https://github.com/mstsirkin/linux
C Header | 236 lines | 127 code | 23 blank | 86 comment | 1 complexity | 87ad0d52128200beddef8349abd4968e MD5 | raw file
  1. /*
  2. * SMP/VPE-safe functions to access "registers" (see note).
  3. *
  4. * NOTES:
  5. * - These macros use ll/sc instructions, so it is your responsibility to
  6. * ensure these are available on your platform before including this file.
  7. * - The MIPS32 spec states that ll/sc results are undefined for uncached
  8. * accesses. This means they can't be used on HW registers accessed
  9. * through kseg1. Code which requires these macros for this purpose must
  10. * front-end the registers with cached memory "registers" and have a single
  11. * thread update the actual HW registers.
  12. * - A maximum of 2k of code can be inserted between ll and sc. Every
  13. * memory accesses between the instructions will increase the chance of
  14. * sc failing and having to loop.
  15. * - When using custom_read_reg32/custom_write_reg32 only perform the
  16. * necessary logical operations on the register value in between these
  17. * two calls. All other logic should be performed before the first call.
  18. * - There is a bug on the R10000 chips which has a workaround. If you
  19. * are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
  20. * to be non-zero. If you are using this header from within linux, you may
  21. * include <asm/war.h> before including this file to have this defined
  22. * appropriately for you.
  23. *
  24. * Copyright 2005-2007 PMC-Sierra, Inc.
  25. *
  26. * This program is free software; you can redistribute it and/or modify it
  27. * under the terms of the GNU General Public License as published by the
  28. * Free Software Foundation; either version 2 of the License, or (at your
  29. * option) any later version.
  30. *
  31. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  32. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  33. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  34. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  35. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  36. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  37. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  38. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  39. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  40. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  41. *
  42. * You should have received a copy of the GNU General Public License along
  43. * with this program; if not, write to the Free Software Foundation, Inc., 675
  44. * Mass Ave, Cambridge, MA 02139, USA.
  45. */
  46. #ifndef __ASM_REGOPS_H__
  47. #define __ASM_REGOPS_H__
  48. #include <linux/types.h>
  49. #include <asm/war.h>
  50. #ifndef R10000_LLSC_WAR
  51. #define R10000_LLSC_WAR 0
  52. #endif
  53. #if R10000_LLSC_WAR == 1
  54. #define __beqz "beqzl "
  55. #else
  56. #define __beqz "beqz "
  57. #endif
  58. #ifndef _LINUX_TYPES_H
  59. typedef unsigned int u32;
  60. #endif
  61. /*
  62. * Sets all the masked bits to the corresponding value bits
  63. */
  64. static inline void set_value_reg32(volatile u32 *const addr,
  65. u32 const mask,
  66. u32 const value)
  67. {
  68. u32 temp;
  69. __asm__ __volatile__(
  70. " .set push \n"
  71. " .set mips3 \n"
  72. "1: ll %0, %1 # set_value_reg32 \n"
  73. " and %0, %2 \n"
  74. " or %0, %3 \n"
  75. " sc %0, %1 \n"
  76. " "__beqz"%0, 1b \n"
  77. " nop \n"
  78. " .set pop \n"
  79. : "=&r" (temp), "=m" (*addr)
  80. : "ir" (~mask), "ir" (value), "m" (*addr));
  81. }
  82. /*
  83. * Sets all the masked bits to '1'
  84. */
  85. static inline void set_reg32(volatile u32 *const addr,
  86. u32 const mask)
  87. {
  88. u32 temp;
  89. __asm__ __volatile__(
  90. " .set push \n"
  91. " .set mips3 \n"
  92. "1: ll %0, %1 # set_reg32 \n"
  93. " or %0, %2 \n"
  94. " sc %0, %1 \n"
  95. " "__beqz"%0, 1b \n"
  96. " nop \n"
  97. " .set pop \n"
  98. : "=&r" (temp), "=m" (*addr)
  99. : "ir" (mask), "m" (*addr));
  100. }
  101. /*
  102. * Sets all the masked bits to '0'
  103. */
  104. static inline void clear_reg32(volatile u32 *const addr,
  105. u32 const mask)
  106. {
  107. u32 temp;
  108. __asm__ __volatile__(
  109. " .set push \n"
  110. " .set mips3 \n"
  111. "1: ll %0, %1 # clear_reg32 \n"
  112. " and %0, %2 \n"
  113. " sc %0, %1 \n"
  114. " "__beqz"%0, 1b \n"
  115. " nop \n"
  116. " .set pop \n"
  117. : "=&r" (temp), "=m" (*addr)
  118. : "ir" (~mask), "m" (*addr));
  119. }
  120. /*
  121. * Toggles all masked bits from '0' to '1' and '1' to '0'
  122. */
  123. static inline void toggle_reg32(volatile u32 *const addr,
  124. u32 const mask)
  125. {
  126. u32 temp;
  127. __asm__ __volatile__(
  128. " .set push \n"
  129. " .set mips3 \n"
  130. "1: ll %0, %1 # toggle_reg32 \n"
  131. " xor %0, %2 \n"
  132. " sc %0, %1 \n"
  133. " "__beqz"%0, 1b \n"
  134. " nop \n"
  135. " .set pop \n"
  136. : "=&r" (temp), "=m" (*addr)
  137. : "ir" (mask), "m" (*addr));
  138. }
  139. /*
  140. * Read all masked bits others are returned as '0'
  141. */
  142. static inline u32 read_reg32(volatile u32 *const addr,
  143. u32 const mask)
  144. {
  145. u32 temp;
  146. __asm__ __volatile__(
  147. " .set push \n"
  148. " .set noreorder \n"
  149. " lw %0, %1 # read \n"
  150. " and %0, %2 # mask \n"
  151. " .set pop \n"
  152. : "=&r" (temp)
  153. : "m" (*addr), "ir" (mask));
  154. return temp;
  155. }
  156. /*
  157. * blocking_read_reg32 - Read address with blocking load
  158. *
  159. * Uncached writes need to be read back to ensure they reach RAM.
  160. * The returned value must be 'used' to prevent from becoming a
  161. * non-blocking load.
  162. */
  163. static inline u32 blocking_read_reg32(volatile u32 *const addr)
  164. {
  165. u32 temp;
  166. __asm__ __volatile__(
  167. " .set push \n"
  168. " .set noreorder \n"
  169. " lw %0, %1 # read \n"
  170. " move %0, %0 # block \n"
  171. " .set pop \n"
  172. : "=&r" (temp)
  173. : "m" (*addr));
  174. return temp;
  175. }
  176. /*
  177. * For special strange cases only:
  178. *
  179. * If you need custom processing within a ll/sc loop, use the following macros
  180. * VERY CAREFULLY:
  181. *
  182. * u32 tmp; <-- Define a variable to hold the data
  183. *
  184. * custom_read_reg32(address, tmp); <-- Reads the address and put the value
  185. * in the 'tmp' variable given
  186. *
  187. * From here on out, you are (basically) atomic, so don't do anything too
  188. * fancy!
  189. * Also, this code may loop if the end of this block fails to write
  190. * everything back safely due do the other CPU, so do NOT do anything
  191. * with side-effects!
  192. *
  193. * custom_write_reg32(address, tmp); <-- Writes back 'tmp' safely.
  194. */
  195. #define custom_read_reg32(address, tmp) \
  196. __asm__ __volatile__( \
  197. " .set push \n" \
  198. " .set mips3 \n" \
  199. "1: ll %0, %1 #custom_read_reg32 \n" \
  200. " .set pop \n" \
  201. : "=r" (tmp), "=m" (*address) \
  202. : "m" (*address))
  203. #define custom_write_reg32(address, tmp) \
  204. __asm__ __volatile__( \
  205. " .set push \n" \
  206. " .set mips3 \n" \
  207. " sc %0, %1 #custom_write_reg32 \n" \
  208. " "__beqz"%0, 1b \n" \
  209. " nop \n" \
  210. " .set pop \n" \
  211. : "=&r" (tmp), "=m" (*address) \
  212. : "0" (tmp), "m" (*address))
  213. #endif /* __ASM_REGOPS_H__ */