PageRenderTime 79ms CodeModel.GetById 38ms RepoModel.GetById 0ms app.codeStats 0ms

/include/asm-generic/mshyperv.h

https://github.com/paulmckrcu/linux
C Header | 283 lines | 171 code | 39 blank | 73 comment | 14 complexity | 0cabf8da95c9377c39c0c6eab19e8ccd MD5 | raw file
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Linux-specific definitions for managing interactions with Microsoft's
  4. * Hyper-V hypervisor. The definitions in this file are architecture
  5. * independent. See arch/<arch>/include/asm/mshyperv.h for definitions
  6. * that are specific to architecture <arch>.
  7. *
  8. * Definitions that are specified in the Hyper-V Top Level Functional
  9. * Spec (TLFS) should not go in this file, but should instead go in
  10. * hyperv-tlfs.h.
  11. *
  12. * Copyright (C) 2019, Microsoft, Inc.
  13. *
  14. * Author : Michael Kelley <mikelley@microsoft.com>
  15. */
  16. #ifndef _ASM_GENERIC_MSHYPERV_H
  17. #define _ASM_GENERIC_MSHYPERV_H
  18. #include <linux/types.h>
  19. #include <linux/atomic.h>
  20. #include <linux/bitops.h>
  21. #include <linux/cpumask.h>
  22. #include <linux/nmi.h>
  23. #include <asm/ptrace.h>
  24. #include <asm/hyperv-tlfs.h>
  25. struct ms_hyperv_info {
  26. u32 features;
  27. u32 priv_high;
  28. u32 misc_features;
  29. u32 hints;
  30. u32 nested_features;
  31. u32 max_vp_index;
  32. u32 max_lp_index;
  33. u32 isolation_config_a;
  34. union {
  35. u32 isolation_config_b;
  36. struct {
  37. u32 cvm_type : 4;
  38. u32 reserved1 : 1;
  39. u32 shared_gpa_boundary_active : 1;
  40. u32 shared_gpa_boundary_bits : 6;
  41. u32 reserved2 : 20;
  42. };
  43. };
  44. u64 shared_gpa_boundary;
  45. };
  46. extern struct ms_hyperv_info ms_hyperv;
  47. extern void __percpu **hyperv_pcpu_input_arg;
  48. extern void __percpu **hyperv_pcpu_output_arg;
  49. extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
  50. extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
  51. extern bool hv_isolation_type_snp(void);
  52. /* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
  53. static inline int hv_result(u64 status)
  54. {
  55. return status & HV_HYPERCALL_RESULT_MASK;
  56. }
  57. static inline bool hv_result_success(u64 status)
  58. {
  59. return hv_result(status) == HV_STATUS_SUCCESS;
  60. }
  61. static inline unsigned int hv_repcomp(u64 status)
  62. {
  63. /* Bits [43:32] of status have 'Reps completed' data. */
  64. return (status & HV_HYPERCALL_REP_COMP_MASK) >>
  65. HV_HYPERCALL_REP_COMP_OFFSET;
  66. }
  67. /*
  68. * Rep hypercalls. Callers of this functions are supposed to ensure that
  69. * rep_count and varhead_size comply with Hyper-V hypercall definition.
  70. */
  71. static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
  72. void *input, void *output)
  73. {
  74. u64 control = code;
  75. u64 status;
  76. u16 rep_comp;
  77. control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
  78. control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
  79. do {
  80. status = hv_do_hypercall(control, input, output);
  81. if (!hv_result_success(status))
  82. return status;
  83. rep_comp = hv_repcomp(status);
  84. control &= ~HV_HYPERCALL_REP_START_MASK;
  85. control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
  86. touch_nmi_watchdog();
  87. } while (rep_comp < rep_count);
  88. return status;
  89. }
  90. /* Generate the guest OS identifier as described in the Hyper-V TLFS */
  91. static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
  92. __u64 d_info2)
  93. {
  94. __u64 guest_id = 0;
  95. guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
  96. guest_id |= (d_info1 << 48);
  97. guest_id |= (kernel_version << 16);
  98. guest_id |= d_info2;
  99. return guest_id;
  100. }
  101. /* Free the message slot and signal end-of-message if required */
  102. static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
  103. {
  104. /*
  105. * On crash we're reading some other CPU's message page and we need
  106. * to be careful: this other CPU may already had cleared the header
  107. * and the host may already had delivered some other message there.
  108. * In case we blindly write msg->header.message_type we're going
  109. * to lose it. We can still lose a message of the same type but
  110. * we count on the fact that there can only be one
  111. * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
  112. * on crash.
  113. */
  114. if (cmpxchg(&msg->header.message_type, old_msg_type,
  115. HVMSG_NONE) != old_msg_type)
  116. return;
  117. /*
  118. * The cmxchg() above does an implicit memory barrier to
  119. * ensure the write to MessageType (ie set to
  120. * HVMSG_NONE) happens before we read the
  121. * MessagePending and EOMing. Otherwise, the EOMing
  122. * will not deliver any more messages since there is
  123. * no empty slot
  124. */
  125. if (msg->header.message_flags.msg_pending) {
  126. /*
  127. * This will cause message queue rescan to
  128. * possibly deliver another msg from the
  129. * hypervisor
  130. */
  131. hv_set_register(HV_REGISTER_EOM, 0);
  132. }
  133. }
  134. void hv_setup_vmbus_handler(void (*handler)(void));
  135. void hv_remove_vmbus_handler(void);
  136. void hv_setup_stimer0_handler(void (*handler)(void));
  137. void hv_remove_stimer0_handler(void);
  138. void hv_setup_kexec_handler(void (*handler)(void));
  139. void hv_remove_kexec_handler(void);
  140. void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
  141. void hv_remove_crash_handler(void);
  142. extern int vmbus_interrupt;
  143. extern int vmbus_irq;
  144. extern bool hv_root_partition;
  145. #if IS_ENABLED(CONFIG_HYPERV)
  146. /*
  147. * Hypervisor's notion of virtual processor ID is different from
  148. * Linux' notion of CPU ID. This information can only be retrieved
  149. * in the context of the calling CPU. Setup a map for easy access
  150. * to this information.
  151. */
  152. extern u32 *hv_vp_index;
  153. extern u32 hv_max_vp_index;
  154. extern u64 (*hv_read_reference_counter)(void);
  155. /* Sentinel value for an uninitialized entry in hv_vp_index array */
  156. #define VP_INVAL U32_MAX
  157. int __init hv_common_init(void);
  158. void __init hv_common_free(void);
  159. int hv_common_cpu_init(unsigned int cpu);
  160. int hv_common_cpu_die(unsigned int cpu);
  161. void *hv_alloc_hyperv_page(void);
  162. void *hv_alloc_hyperv_zeroed_page(void);
  163. void hv_free_hyperv_page(unsigned long addr);
  164. /**
  165. * hv_cpu_number_to_vp_number() - Map CPU to VP.
  166. * @cpu_number: CPU number in Linux terms
  167. *
  168. * This function returns the mapping between the Linux processor
  169. * number and the hypervisor's virtual processor number, useful
  170. * in making hypercalls and such that talk about specific
  171. * processors.
  172. *
  173. * Return: Virtual processor number in Hyper-V terms
  174. */
  175. static inline int hv_cpu_number_to_vp_number(int cpu_number)
  176. {
  177. return hv_vp_index[cpu_number];
  178. }
  179. static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
  180. const struct cpumask *cpus,
  181. bool exclude_self)
  182. {
  183. int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
  184. int this_cpu = smp_processor_id();
  185. /* valid_bank_mask can represent up to 64 banks */
  186. if (hv_max_vp_index / 64 >= 64)
  187. return 0;
  188. /*
  189. * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
  190. * structs are not cleared between calls, we risk flushing unneeded
  191. * vCPUs otherwise.
  192. */
  193. for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
  194. vpset->bank_contents[vcpu_bank] = 0;
  195. /*
  196. * Some banks may end up being empty but this is acceptable.
  197. */
  198. for_each_cpu(cpu, cpus) {
  199. if (exclude_self && cpu == this_cpu)
  200. continue;
  201. vcpu = hv_cpu_number_to_vp_number(cpu);
  202. if (vcpu == VP_INVAL)
  203. return -1;
  204. vcpu_bank = vcpu / 64;
  205. vcpu_offset = vcpu % 64;
  206. __set_bit(vcpu_offset, (unsigned long *)
  207. &vpset->bank_contents[vcpu_bank]);
  208. if (vcpu_bank >= nr_bank)
  209. nr_bank = vcpu_bank + 1;
  210. }
  211. vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
  212. return nr_bank;
  213. }
  214. static inline int cpumask_to_vpset(struct hv_vpset *vpset,
  215. const struct cpumask *cpus)
  216. {
  217. return __cpumask_to_vpset(vpset, cpus, false);
  218. }
  219. static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
  220. const struct cpumask *cpus)
  221. {
  222. WARN_ON_ONCE(preemptible());
  223. return __cpumask_to_vpset(vpset, cpus, true);
  224. }
  225. void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
  226. bool hv_is_hyperv_initialized(void);
  227. bool hv_is_hibernation_supported(void);
  228. enum hv_isolation_type hv_get_isolation_type(void);
  229. bool hv_is_isolation_supported(void);
  230. bool hv_isolation_type_snp(void);
  231. u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
  232. void hyperv_cleanup(void);
  233. bool hv_query_ext_cap(u64 cap_query);
  234. #else /* CONFIG_HYPERV */
  235. static inline bool hv_is_hyperv_initialized(void) { return false; }
  236. static inline bool hv_is_hibernation_supported(void) { return false; }
  237. static inline void hyperv_cleanup(void) {}
  238. static inline bool hv_is_isolation_supported(void) { return false; }
  239. static inline enum hv_isolation_type hv_get_isolation_type(void)
  240. {
  241. return HV_ISOLATION_TYPE_NONE;
  242. }
  243. #endif /* CONFIG_HYPERV */
  244. #endif