PageRenderTime 144ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/x86/include/asm/uaccess_64.h

https://bitbucket.org/cresqo/cm7-p500-kernel
C Header | 254 lines | 222 code | 25 blank | 7 comment | 17 complexity | ae534f18c811977abb2d2399ee35ba53 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. #ifndef _ASM_X86_UACCESS_64_H
  2. #define _ASM_X86_UACCESS_64_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/prefetch.h>
  9. #include <linux/lockdep.h>
  10. #include <asm/alternative.h>
  11. #include <asm/cpufeature.h>
  12. #include <asm/page.h>
  13. /*
  14. * Copy To/From Userspace
  15. */
  16. /* Handles exceptions in both to and from, but doesn't do access_ok */
  17. __must_check unsigned long
  18. copy_user_generic_string(void *to, const void *from, unsigned len);
  19. __must_check unsigned long
  20. copy_user_generic_unrolled(void *to, const void *from, unsigned len);
  21. static __always_inline __must_check unsigned long
  22. copy_user_generic(void *to, const void *from, unsigned len)
  23. {
  24. unsigned ret;
  25. alternative_call(copy_user_generic_unrolled,
  26. copy_user_generic_string,
  27. X86_FEATURE_REP_GOOD,
  28. ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
  29. "=d" (len)),
  30. "1" (to), "2" (from), "3" (len)
  31. : "memory", "rcx", "r8", "r9", "r10", "r11");
  32. return ret;
  33. }
  34. __must_check unsigned long
  35. _copy_to_user(void __user *to, const void *from, unsigned len);
  36. __must_check unsigned long
  37. _copy_from_user(void *to, const void __user *from, unsigned len);
  38. __must_check unsigned long
  39. copy_in_user(void __user *to, const void __user *from, unsigned len);
  40. static inline unsigned long __must_check copy_from_user(void *to,
  41. const void __user *from,
  42. unsigned long n)
  43. {
  44. int sz = __compiletime_object_size(to);
  45. might_fault();
  46. if (likely(sz == -1 || sz >= n))
  47. n = _copy_from_user(to, from, n);
  48. #ifdef CONFIG_DEBUG_VM
  49. else
  50. WARN(1, "Buffer overflow detected!\n");
  51. #endif
  52. return n;
  53. }
  54. static __always_inline __must_check
  55. int copy_to_user(void __user *dst, const void *src, unsigned size)
  56. {
  57. might_fault();
  58. return _copy_to_user(dst, src, size);
  59. }
  60. static __always_inline __must_check
  61. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  62. {
  63. int ret = 0;
  64. might_fault();
  65. if (!__builtin_constant_p(size))
  66. return copy_user_generic(dst, (__force void *)src, size);
  67. switch (size) {
  68. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  69. ret, "b", "b", "=q", 1);
  70. return ret;
  71. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  72. ret, "w", "w", "=r", 2);
  73. return ret;
  74. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  75. ret, "l", "k", "=r", 4);
  76. return ret;
  77. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  78. ret, "q", "", "=r", 8);
  79. return ret;
  80. case 10:
  81. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  82. ret, "q", "", "=r", 10);
  83. if (unlikely(ret))
  84. return ret;
  85. __get_user_asm(*(u16 *)(8 + (char *)dst),
  86. (u16 __user *)(8 + (char __user *)src),
  87. ret, "w", "w", "=r", 2);
  88. return ret;
  89. case 16:
  90. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  91. ret, "q", "", "=r", 16);
  92. if (unlikely(ret))
  93. return ret;
  94. __get_user_asm(*(u64 *)(8 + (char *)dst),
  95. (u64 __user *)(8 + (char __user *)src),
  96. ret, "q", "", "=r", 8);
  97. return ret;
  98. default:
  99. return copy_user_generic(dst, (__force void *)src, size);
  100. }
  101. }
  102. static __always_inline __must_check
  103. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  104. {
  105. int ret = 0;
  106. might_fault();
  107. if (!__builtin_constant_p(size))
  108. return copy_user_generic((__force void *)dst, src, size);
  109. switch (size) {
  110. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  111. ret, "b", "b", "iq", 1);
  112. return ret;
  113. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  114. ret, "w", "w", "ir", 2);
  115. return ret;
  116. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  117. ret, "l", "k", "ir", 4);
  118. return ret;
  119. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  120. ret, "q", "", "er", 8);
  121. return ret;
  122. case 10:
  123. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  124. ret, "q", "", "er", 10);
  125. if (unlikely(ret))
  126. return ret;
  127. asm("":::"memory");
  128. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  129. ret, "w", "w", "ir", 2);
  130. return ret;
  131. case 16:
  132. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  133. ret, "q", "", "er", 16);
  134. if (unlikely(ret))
  135. return ret;
  136. asm("":::"memory");
  137. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  138. ret, "q", "", "er", 8);
  139. return ret;
  140. default:
  141. return copy_user_generic((__force void *)dst, src, size);
  142. }
  143. }
  144. static __always_inline __must_check
  145. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  146. {
  147. int ret = 0;
  148. might_fault();
  149. if (!__builtin_constant_p(size))
  150. return copy_user_generic((__force void *)dst,
  151. (__force void *)src, size);
  152. switch (size) {
  153. case 1: {
  154. u8 tmp;
  155. __get_user_asm(tmp, (u8 __user *)src,
  156. ret, "b", "b", "=q", 1);
  157. if (likely(!ret))
  158. __put_user_asm(tmp, (u8 __user *)dst,
  159. ret, "b", "b", "iq", 1);
  160. return ret;
  161. }
  162. case 2: {
  163. u16 tmp;
  164. __get_user_asm(tmp, (u16 __user *)src,
  165. ret, "w", "w", "=r", 2);
  166. if (likely(!ret))
  167. __put_user_asm(tmp, (u16 __user *)dst,
  168. ret, "w", "w", "ir", 2);
  169. return ret;
  170. }
  171. case 4: {
  172. u32 tmp;
  173. __get_user_asm(tmp, (u32 __user *)src,
  174. ret, "l", "k", "=r", 4);
  175. if (likely(!ret))
  176. __put_user_asm(tmp, (u32 __user *)dst,
  177. ret, "l", "k", "ir", 4);
  178. return ret;
  179. }
  180. case 8: {
  181. u64 tmp;
  182. __get_user_asm(tmp, (u64 __user *)src,
  183. ret, "q", "", "=r", 8);
  184. if (likely(!ret))
  185. __put_user_asm(tmp, (u64 __user *)dst,
  186. ret, "q", "", "er", 8);
  187. return ret;
  188. }
  189. default:
  190. return copy_user_generic((__force void *)dst,
  191. (__force void *)src, size);
  192. }
  193. }
  194. __must_check long
  195. strncpy_from_user(char *dst, const char __user *src, long count);
  196. __must_check long
  197. __strncpy_from_user(char *dst, const char __user *src, long count);
  198. __must_check long strnlen_user(const char __user *str, long n);
  199. __must_check long __strnlen_user(const char __user *str, long n);
  200. __must_check long strlen_user(const char __user *str);
  201. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  202. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  203. static __must_check __always_inline int
  204. __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
  205. {
  206. return copy_user_generic(dst, (__force const void *)src, size);
  207. }
  208. static __must_check __always_inline int
  209. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  210. {
  211. return copy_user_generic((__force void *)dst, src, size);
  212. }
  213. extern long __copy_user_nocache(void *dst, const void __user *src,
  214. unsigned size, int zerorest);
  215. static inline int
  216. __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
  217. {
  218. might_sleep();
  219. return __copy_user_nocache(dst, src, size, 1);
  220. }
  221. static inline int
  222. __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
  223. unsigned size)
  224. {
  225. return __copy_user_nocache(dst, src, size, 0);
  226. }
  227. unsigned long
  228. copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
  229. #endif /* _ASM_X86_UACCESS_64_H */