PageRenderTime 36ms CodeModel.GetById 15ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/s390/kvm/gaccess.h

https://bitbucket.org/cresqo/cm7-p500-kernel
C Header | 279 lines | 198 code | 70 blank | 11 comment | 45 complexity | a9ddd220d9a814279f54ac7899328f45 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. /*
  2. * gaccess.h - access guest memory
  3. *
  4. * Copyright IBM Corp. 2008,2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <asm/uaccess.h>
  17. #include "kvm-s390.h"
  18. static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
  19. unsigned long guestaddr)
  20. {
  21. unsigned long prefix = vcpu->arch.sie_block->prefix;
  22. unsigned long origin = vcpu->arch.sie_block->gmsor;
  23. unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
  24. if (guestaddr < 2 * PAGE_SIZE)
  25. guestaddr += prefix;
  26. else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
  27. guestaddr -= prefix;
  28. if (guestaddr > memsize)
  29. return (void __user __force *) ERR_PTR(-EFAULT);
  30. guestaddr += origin;
  31. return (void __user *) guestaddr;
  32. }
  33. static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  34. u64 *result)
  35. {
  36. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  37. BUG_ON(guestaddr & 7);
  38. if (IS_ERR((void __force *) uptr))
  39. return PTR_ERR((void __force *) uptr);
  40. return get_user(*result, (unsigned long __user *) uptr);
  41. }
  42. static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  43. u32 *result)
  44. {
  45. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  46. BUG_ON(guestaddr & 3);
  47. if (IS_ERR((void __force *) uptr))
  48. return PTR_ERR((void __force *) uptr);
  49. return get_user(*result, (u32 __user *) uptr);
  50. }
  51. static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  52. u16 *result)
  53. {
  54. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  55. BUG_ON(guestaddr & 1);
  56. if (IS_ERR(uptr))
  57. return PTR_ERR(uptr);
  58. return get_user(*result, (u16 __user *) uptr);
  59. }
  60. static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  61. u8 *result)
  62. {
  63. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  64. if (IS_ERR((void __force *) uptr))
  65. return PTR_ERR((void __force *) uptr);
  66. return get_user(*result, (u8 __user *) uptr);
  67. }
  68. static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  69. u64 value)
  70. {
  71. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  72. BUG_ON(guestaddr & 7);
  73. if (IS_ERR((void __force *) uptr))
  74. return PTR_ERR((void __force *) uptr);
  75. return put_user(value, (u64 __user *) uptr);
  76. }
  77. static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  78. u32 value)
  79. {
  80. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  81. BUG_ON(guestaddr & 3);
  82. if (IS_ERR((void __force *) uptr))
  83. return PTR_ERR((void __force *) uptr);
  84. return put_user(value, (u32 __user *) uptr);
  85. }
  86. static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  87. u16 value)
  88. {
  89. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  90. BUG_ON(guestaddr & 1);
  91. if (IS_ERR((void __force *) uptr))
  92. return PTR_ERR((void __force *) uptr);
  93. return put_user(value, (u16 __user *) uptr);
  94. }
  95. static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  96. u8 value)
  97. {
  98. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  99. if (IS_ERR((void __force *) uptr))
  100. return PTR_ERR((void __force *) uptr);
  101. return put_user(value, (u8 __user *) uptr);
  102. }
  103. static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
  104. unsigned long guestdest,
  105. const void *from, unsigned long n)
  106. {
  107. int rc;
  108. unsigned long i;
  109. const u8 *data = from;
  110. for (i = 0; i < n; i++) {
  111. rc = put_guest_u8(vcpu, guestdest++, *(data++));
  112. if (rc < 0)
  113. return rc;
  114. }
  115. return 0;
  116. }
  117. static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
  118. const void *from, unsigned long n)
  119. {
  120. unsigned long prefix = vcpu->arch.sie_block->prefix;
  121. unsigned long origin = vcpu->arch.sie_block->gmsor;
  122. unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
  123. if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
  124. goto slowpath;
  125. if ((guestdest < prefix) && (guestdest + n > prefix))
  126. goto slowpath;
  127. if ((guestdest < prefix + 2 * PAGE_SIZE)
  128. && (guestdest + n > prefix + 2 * PAGE_SIZE))
  129. goto slowpath;
  130. if (guestdest < 2 * PAGE_SIZE)
  131. guestdest += prefix;
  132. else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
  133. guestdest -= prefix;
  134. if (guestdest + n > memsize)
  135. return -EFAULT;
  136. if (guestdest + n < guestdest)
  137. return -EFAULT;
  138. guestdest += origin;
  139. return copy_to_user((void __user *) guestdest, from, n);
  140. slowpath:
  141. return __copy_to_guest_slow(vcpu, guestdest, from, n);
  142. }
  143. static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
  144. unsigned long guestsrc,
  145. unsigned long n)
  146. {
  147. int rc;
  148. unsigned long i;
  149. u8 *data = to;
  150. for (i = 0; i < n; i++) {
  151. rc = get_guest_u8(vcpu, guestsrc++, data++);
  152. if (rc < 0)
  153. return rc;
  154. }
  155. return 0;
  156. }
  157. static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
  158. unsigned long guestsrc, unsigned long n)
  159. {
  160. unsigned long prefix = vcpu->arch.sie_block->prefix;
  161. unsigned long origin = vcpu->arch.sie_block->gmsor;
  162. unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
  163. if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
  164. goto slowpath;
  165. if ((guestsrc < prefix) && (guestsrc + n > prefix))
  166. goto slowpath;
  167. if ((guestsrc < prefix + 2 * PAGE_SIZE)
  168. && (guestsrc + n > prefix + 2 * PAGE_SIZE))
  169. goto slowpath;
  170. if (guestsrc < 2 * PAGE_SIZE)
  171. guestsrc += prefix;
  172. else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
  173. guestsrc -= prefix;
  174. if (guestsrc + n > memsize)
  175. return -EFAULT;
  176. if (guestsrc + n < guestsrc)
  177. return -EFAULT;
  178. guestsrc += origin;
  179. return copy_from_user(to, (void __user *) guestsrc, n);
  180. slowpath:
  181. return __copy_from_guest_slow(vcpu, to, guestsrc, n);
  182. }
  183. static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
  184. unsigned long guestdest,
  185. const void *from, unsigned long n)
  186. {
  187. unsigned long origin = vcpu->arch.sie_block->gmsor;
  188. unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
  189. if (guestdest + n > memsize)
  190. return -EFAULT;
  191. if (guestdest + n < guestdest)
  192. return -EFAULT;
  193. guestdest += origin;
  194. return copy_to_user((void __user *) guestdest, from, n);
  195. }
  196. static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
  197. unsigned long guestsrc,
  198. unsigned long n)
  199. {
  200. unsigned long origin = vcpu->arch.sie_block->gmsor;
  201. unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
  202. if (guestsrc + n > memsize)
  203. return -EFAULT;
  204. if (guestsrc + n < guestsrc)
  205. return -EFAULT;
  206. guestsrc += origin;
  207. return copy_from_user(to, (void __user *) guestsrc, n);
  208. }
  209. #endif