PageRenderTime 96ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/staging/hv/hv.c

https://github.com/mstsirkin/kvm
C | 438 lines | 270 code | 82 blank | 86 comment | 20 complexity | 4028431afca5f394688363200b259449 MD5 | raw file
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. *
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/kernel.h>
  24. #include <linux/mm.h>
  25. #include <linux/slab.h>
  26. #include <linux/vmalloc.h>
  27. #include "hyperv.h"
  28. #include "hyperv_vmbus.h"
  29. /* The one and only */
  30. struct hv_context hv_context = {
  31. .synic_initialized = false,
  32. .hypercall_page = NULL,
  33. .signal_event_param = NULL,
  34. .signal_event_buffer = NULL,
  35. };
  36. /*
  37. * query_hypervisor_presence
  38. * - Query the cpuid for presence of windows hypervisor
  39. */
  40. static int query_hypervisor_presence(void)
  41. {
  42. unsigned int eax;
  43. unsigned int ebx;
  44. unsigned int ecx;
  45. unsigned int edx;
  46. unsigned int op;
  47. eax = 0;
  48. ebx = 0;
  49. ecx = 0;
  50. edx = 0;
  51. op = HVCPUID_VERSION_FEATURES;
  52. cpuid(op, &eax, &ebx, &ecx, &edx);
  53. return ecx & HV_PRESENT_BIT;
  54. }
  55. /*
  56. * query_hypervisor_info - Get version info of the windows hypervisor
  57. */
  58. static int query_hypervisor_info(void)
  59. {
  60. unsigned int eax;
  61. unsigned int ebx;
  62. unsigned int ecx;
  63. unsigned int edx;
  64. unsigned int max_leaf;
  65. unsigned int op;
  66. /*
  67. * Its assumed that this is called after confirming that Viridian
  68. * is present. Query id and revision.
  69. */
  70. eax = 0;
  71. ebx = 0;
  72. ecx = 0;
  73. edx = 0;
  74. op = HVCPUID_VENDOR_MAXFUNCTION;
  75. cpuid(op, &eax, &ebx, &ecx, &edx);
  76. max_leaf = eax;
  77. if (max_leaf >= HVCPUID_VERSION) {
  78. eax = 0;
  79. ebx = 0;
  80. ecx = 0;
  81. edx = 0;
  82. op = HVCPUID_VERSION;
  83. cpuid(op, &eax, &ebx, &ecx, &edx);
  84. pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
  85. eax,
  86. ebx >> 16,
  87. ebx & 0xFFFF,
  88. ecx,
  89. edx >> 24,
  90. edx & 0xFFFFFF);
  91. }
  92. return max_leaf;
  93. }
  94. /*
  95. * do_hypercall- Invoke the specified hypercall
  96. */
  97. static u64 do_hypercall(u64 control, void *input, void *output)
  98. {
  99. #ifdef CONFIG_X86_64
  100. u64 hv_status = 0;
  101. u64 input_address = (input) ? virt_to_phys(input) : 0;
  102. u64 output_address = (output) ? virt_to_phys(output) : 0;
  103. volatile void *hypercall_page = hv_context.hypercall_page;
  104. __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
  105. __asm__ __volatile__("call *%3" : "=a" (hv_status) :
  106. "c" (control), "d" (input_address),
  107. "m" (hypercall_page));
  108. return hv_status;
  109. #else
  110. u32 control_hi = control >> 32;
  111. u32 control_lo = control & 0xFFFFFFFF;
  112. u32 hv_status_hi = 1;
  113. u32 hv_status_lo = 1;
  114. u64 input_address = (input) ? virt_to_phys(input) : 0;
  115. u32 input_address_hi = input_address >> 32;
  116. u32 input_address_lo = input_address & 0xFFFFFFFF;
  117. u64 output_address = (output) ? virt_to_phys(output) : 0;
  118. u32 output_address_hi = output_address >> 32;
  119. u32 output_address_lo = output_address & 0xFFFFFFFF;
  120. volatile void *hypercall_page = hv_context.hypercall_page;
  121. __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
  122. "=a"(hv_status_lo) : "d" (control_hi),
  123. "a" (control_lo), "b" (input_address_hi),
  124. "c" (input_address_lo), "D"(output_address_hi),
  125. "S"(output_address_lo), "m" (hypercall_page));
  126. return hv_status_lo | ((u64)hv_status_hi << 32);
  127. #endif /* !x86_64 */
  128. }
  129. /*
  130. * hv_init - Main initialization routine.
  131. *
  132. * This routine must be called before any other routines in here are called
  133. */
  134. int hv_init(void)
  135. {
  136. int ret = 0;
  137. int max_leaf;
  138. union hv_x64_msr_hypercall_contents hypercall_msr;
  139. void *virtaddr = NULL;
  140. memset(hv_context.synic_event_page, 0, sizeof(void *) * MAX_NUM_CPUS);
  141. memset(hv_context.synic_message_page, 0,
  142. sizeof(void *) * MAX_NUM_CPUS);
  143. if (!query_hypervisor_presence())
  144. goto cleanup;
  145. max_leaf = query_hypervisor_info();
  146. /* HvQueryHypervisorFeatures(maxLeaf); */
  147. /*
  148. * We only support running on top of Hyper-V
  149. */
  150. rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
  151. if (hv_context.guestid != 0)
  152. goto cleanup;
  153. /* Write our OS info */
  154. wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
  155. hv_context.guestid = HV_LINUX_GUEST_ID;
  156. /* See if the hypercall page is already set */
  157. rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
  158. /*
  159. * Allocate the hypercall page memory
  160. * virtaddr = osd_page_alloc(1);
  161. */
  162. virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
  163. if (!virtaddr)
  164. goto cleanup;
  165. hypercall_msr.enable = 1;
  166. hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
  167. wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
  168. /* Confirm that hypercall page did get setup. */
  169. hypercall_msr.as_uint64 = 0;
  170. rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
  171. if (!hypercall_msr.enable)
  172. goto cleanup;
  173. hv_context.hypercall_page = virtaddr;
  174. /* Setup the global signal event param for the signal event hypercall */
  175. hv_context.signal_event_buffer =
  176. kmalloc(sizeof(struct hv_input_signal_event_buffer),
  177. GFP_KERNEL);
  178. if (!hv_context.signal_event_buffer)
  179. goto cleanup;
  180. hv_context.signal_event_param =
  181. (struct hv_input_signal_event *)
  182. (ALIGN((unsigned long)
  183. hv_context.signal_event_buffer,
  184. HV_HYPERCALL_PARAM_ALIGN));
  185. hv_context.signal_event_param->connectionid.asu32 = 0;
  186. hv_context.signal_event_param->connectionid.u.id =
  187. VMBUS_EVENT_CONNECTION_ID;
  188. hv_context.signal_event_param->flag_number = 0;
  189. hv_context.signal_event_param->rsvdz = 0;
  190. return ret;
  191. cleanup:
  192. if (virtaddr) {
  193. if (hypercall_msr.enable) {
  194. hypercall_msr.as_uint64 = 0;
  195. wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
  196. }
  197. vfree(virtaddr);
  198. }
  199. ret = -1;
  200. return ret;
  201. }
  202. /*
  203. * hv_cleanup - Cleanup routine.
  204. *
  205. * This routine is called normally during driver unloading or exiting.
  206. */
  207. void hv_cleanup(void)
  208. {
  209. union hv_x64_msr_hypercall_contents hypercall_msr;
  210. kfree(hv_context.signal_event_buffer);
  211. hv_context.signal_event_buffer = NULL;
  212. hv_context.signal_event_param = NULL;
  213. if (hv_context.hypercall_page) {
  214. hypercall_msr.as_uint64 = 0;
  215. wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
  216. vfree(hv_context.hypercall_page);
  217. hv_context.hypercall_page = NULL;
  218. }
  219. }
  220. /*
  221. * hv_post_message - Post a message using the hypervisor message IPC.
  222. *
  223. * This involves a hypercall.
  224. */
  225. u16 hv_post_message(union hv_connection_id connection_id,
  226. enum hv_message_type message_type,
  227. void *payload, size_t payload_size)
  228. {
  229. struct aligned_input {
  230. u64 alignment8;
  231. struct hv_input_post_message msg;
  232. };
  233. struct hv_input_post_message *aligned_msg;
  234. u16 status;
  235. unsigned long addr;
  236. if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
  237. return -EMSGSIZE;
  238. addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
  239. if (!addr)
  240. return -ENOMEM;
  241. aligned_msg = (struct hv_input_post_message *)
  242. (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN));
  243. aligned_msg->connectionid = connection_id;
  244. aligned_msg->message_type = message_type;
  245. aligned_msg->payload_size = payload_size;
  246. memcpy((void *)aligned_msg->payload, payload, payload_size);
  247. status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
  248. & 0xFFFF;
  249. kfree((void *)addr);
  250. return status;
  251. }
  252. /*
  253. * hv_signal_event -
  254. * Signal an event on the specified connection using the hypervisor event IPC.
  255. *
  256. * This involves a hypercall.
  257. */
  258. u16 hv_signal_event(void)
  259. {
  260. u16 status;
  261. status = do_hypercall(HVCALL_SIGNAL_EVENT,
  262. hv_context.signal_event_param,
  263. NULL) & 0xFFFF;
  264. return status;
  265. }
  266. /*
  267. * hv_synic_init - Initialize the Synthethic Interrupt Controller.
  268. *
  269. * If it is already initialized by another entity (ie x2v shim), we need to
  270. * retrieve the initialized message and event pages. Otherwise, we create and
  271. * initialize the message and event pages.
  272. */
  273. void hv_synic_init(void *irqarg)
  274. {
  275. u64 version;
  276. union hv_synic_simp simp;
  277. union hv_synic_siefp siefp;
  278. union hv_synic_sint shared_sint;
  279. union hv_synic_scontrol sctrl;
  280. u32 irq_vector = *((u32 *)(irqarg));
  281. int cpu = smp_processor_id();
  282. if (!hv_context.hypercall_page)
  283. return;
  284. /* Check the version */
  285. rdmsrl(HV_X64_MSR_SVERSION, version);
  286. hv_context.synic_message_page[cpu] =
  287. (void *)get_zeroed_page(GFP_ATOMIC);
  288. if (hv_context.synic_message_page[cpu] == NULL) {
  289. pr_err("Unable to allocate SYNIC message page\n");
  290. goto cleanup;
  291. }
  292. hv_context.synic_event_page[cpu] =
  293. (void *)get_zeroed_page(GFP_ATOMIC);
  294. if (hv_context.synic_event_page[cpu] == NULL) {
  295. pr_err("Unable to allocate SYNIC event page\n");
  296. goto cleanup;
  297. }
  298. /* Setup the Synic's message page */
  299. rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
  300. simp.simp_enabled = 1;
  301. simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
  302. >> PAGE_SHIFT;
  303. wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
  304. /* Setup the Synic's event page */
  305. rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
  306. siefp.siefp_enabled = 1;
  307. siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
  308. >> PAGE_SHIFT;
  309. wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
  310. /* Setup the shared SINT. */
  311. rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
  312. shared_sint.as_uint64 = 0;
  313. shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
  314. shared_sint.masked = false;
  315. shared_sint.auto_eoi = true;
  316. wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
  317. /* Enable the global synic bit */
  318. rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
  319. sctrl.enable = 1;
  320. wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
  321. hv_context.synic_initialized = true;
  322. return;
  323. cleanup:
  324. if (hv_context.synic_event_page[cpu])
  325. free_page((unsigned long)hv_context.synic_event_page[cpu]);
  326. if (hv_context.synic_message_page[cpu])
  327. free_page((unsigned long)hv_context.synic_message_page[cpu]);
  328. return;
  329. }
  330. /*
  331. * hv_synic_cleanup - Cleanup routine for hv_synic_init().
  332. */
  333. void hv_synic_cleanup(void *arg)
  334. {
  335. union hv_synic_sint shared_sint;
  336. union hv_synic_simp simp;
  337. union hv_synic_siefp siefp;
  338. int cpu = smp_processor_id();
  339. if (!hv_context.synic_initialized)
  340. return;
  341. rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
  342. shared_sint.masked = 1;
  343. /* Need to correctly cleanup in the case of SMP!!! */
  344. /* Disable the interrupt */
  345. wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
  346. rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
  347. simp.simp_enabled = 0;
  348. simp.base_simp_gpa = 0;
  349. wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
  350. rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
  351. siefp.siefp_enabled = 0;
  352. siefp.base_siefp_gpa = 0;
  353. wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
  354. free_page((unsigned long)hv_context.synic_message_page[cpu]);
  355. free_page((unsigned long)hv_context.synic_event_page[cpu]);
  356. }