PageRenderTime 36ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/libvirt-0.9.11/src/xen/xen_hypervisor.c

#
C | 3720 lines | 2636 code | 476 blank | 608 comment | 591 complexity | f2299dc4a0741d9f9279da5608c34bc1 MD5 | raw file
Possible License(s): LGPL-2.1
  1. /*
  2. * xen_internal.c: direct access to Xen hypervisor level
  3. *
  4. * Copyright (C) 2005-2012 Red Hat, Inc.
  5. *
  6. * See COPYING.LIB for the License of this software
  7. *
  8. * Daniel Veillard <veillard@redhat.com>
  9. */
  10. #include <config.h>
  11. #include <stdio.h>
  12. #include <string.h>
  13. /* required for uint8_t, uint32_t, etc ... */
  14. #include <stdint.h>
  15. #include <sys/types.h>
  16. #include <sys/stat.h>
  17. #include <unistd.h>
  18. #include <fcntl.h>
  19. #include <sys/mman.h>
  20. #include <sys/ioctl.h>
  21. #include <limits.h>
  22. #include <stdint.h>
  23. #include <regex.h>
  24. #include <errno.h>
  25. #include <sys/utsname.h>
  26. #ifdef __sun
  27. # include <sys/systeminfo.h>
  28. # include <priv.h>
  29. # ifndef PRIV_XVM_CONTROL
  30. # define PRIV_XVM_CONTROL ((const char *)"xvm_control")
  31. # endif
  32. #endif /* __sun */
  33. /* required for dom0_getdomaininfo_t */
  34. #include <xen/dom0_ops.h>
  35. #include <xen/version.h>
  36. #ifdef HAVE_XEN_LINUX_PRIVCMD_H
  37. # include <xen/linux/privcmd.h>
  38. #else
  39. # ifdef HAVE_XEN_SYS_PRIVCMD_H
  40. # include <xen/sys/privcmd.h>
  41. # endif
  42. #endif
  43. /* required for shutdown flags */
  44. #include <xen/sched.h>
  45. #include "virterror_internal.h"
  46. #include "logging.h"
  47. #include "datatypes.h"
  48. #include "driver.h"
  49. #include "util.h"
  50. #include "xen_driver.h"
  51. #include "xen_hypervisor.h"
  52. #include "xs_internal.h"
  53. #include "stats_linux.h"
  54. #include "block_stats.h"
  55. #include "xend_internal.h"
  56. #include "buf.h"
  57. #include "capabilities.h"
  58. #include "memory.h"
  59. #include "virfile.h"
  60. #include "virnodesuspend.h"
  61. #include "virtypedparam.h"
  62. #define VIR_FROM_THIS VIR_FROM_XEN
  63. /*
  64. * so far there is 2 versions of the structures usable for doing
  65. * hypervisor calls.
  66. */
  67. /* the old one */
  68. typedef struct v0_hypercall_struct {
  69. unsigned long op;
  70. unsigned long arg[5];
  71. } v0_hypercall_t;
  72. #ifdef __linux__
  73. # define XEN_V0_IOCTL_HYPERCALL_CMD \
  74. _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))
  75. /* the new one */
  76. typedef struct v1_hypercall_struct
  77. {
  78. uint64_t op;
  79. uint64_t arg[5];
  80. } v1_hypercall_t;
  81. # define XEN_V1_IOCTL_HYPERCALL_CMD \
  82. _IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
  83. typedef v1_hypercall_t hypercall_t;
  84. #elif defined(__sun)
  85. typedef privcmd_hypercall_t hypercall_t;
  86. #else
  87. # error "unsupported platform"
  88. #endif
  89. #ifndef __HYPERVISOR_sysctl
  90. # define __HYPERVISOR_sysctl 35
  91. #endif
  92. #ifndef __HYPERVISOR_domctl
  93. # define __HYPERVISOR_domctl 36
  94. #endif
  95. #ifdef WITH_RHEL5_API
  96. # define SYS_IFACE_MIN_VERS_NUMA 3
  97. #else
  98. # define SYS_IFACE_MIN_VERS_NUMA 4
  99. #endif
  100. static int xen_ioctl_hypercall_cmd = 0;
  101. static int initialized = 0;
  102. static int in_init = 0;
  103. static struct xenHypervisorVersions hv_versions = {
  104. .hv = 0,
  105. .hypervisor = 2,
  106. .sys_interface = -1,
  107. .dom_interface = -1,
  108. };
  109. static int kb_per_pages = 0;
  110. /* Regular expressions used by xenHypervisorGetCapabilities, and
  111. * compiled once by xenHypervisorInit. Note that these are POSIX.2
  112. * extended regular expressions (regex(7)).
  113. */
  114. static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
  115. static regex_t flags_hvm_rec;
  116. static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
  117. static regex_t flags_pae_rec;
  118. static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
  119. static regex_t xen_cap_rec;
  120. /*
  121. * The content of the structures for a getdomaininfolist system hypercall
  122. */
  123. #ifndef DOMFLAGS_DYING
  124. # define DOMFLAGS_DYING (1<<0) /* Domain is scheduled to die. */
  125. # define DOMFLAGS_HVM (1<<1) /* Domain is HVM */
  126. # define DOMFLAGS_SHUTDOWN (1<<2) /* The guest OS has shut down. */
  127. # define DOMFLAGS_PAUSED (1<<3) /* Currently paused by control software. */
  128. # define DOMFLAGS_BLOCKED (1<<4) /* Currently blocked pending an event. */
  129. # define DOMFLAGS_RUNNING (1<<5) /* Domain is currently running. */
  130. # define DOMFLAGS_CPUMASK 255 /* CPU to which this domain is bound. */
  131. # define DOMFLAGS_CPUSHIFT 8
  132. # define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */
  133. # define DOMFLAGS_SHUTDOWNSHIFT 16
  134. #endif
  135. /*
  136. * These flags explain why a system is in the state of "shutdown". Normally,
  137. * They are defined in xen/sched.h
  138. */
  139. #ifndef SHUTDOWN_poweroff
  140. # define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
  141. # define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
  142. # define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
  143. # define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
  144. #endif
  145. #define XEN_V0_OP_GETDOMAININFOLIST 38
  146. #define XEN_V1_OP_GETDOMAININFOLIST 38
  147. #define XEN_V2_OP_GETDOMAININFOLIST 6
  148. struct xen_v0_getdomaininfo {
  149. domid_t domain; /* the domain number */
  150. uint32_t flags; /* flags, see before */
  151. uint64_t tot_pages; /* total number of pages used */
  152. uint64_t max_pages; /* maximum number of pages allowed */
  153. unsigned long shared_info_frame; /* MFN of shared_info struct */
  154. uint64_t cpu_time; /* CPU time used */
  155. uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
  156. uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
  157. uint32_t ssidref;
  158. xen_domain_handle_t handle;
  159. };
  160. typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;
  161. struct xen_v2_getdomaininfo {
  162. domid_t domain; /* the domain number */
  163. uint32_t flags; /* flags, see before */
  164. uint64_t tot_pages; /* total number of pages used */
  165. uint64_t max_pages; /* maximum number of pages allowed */
  166. uint64_t shared_info_frame; /* MFN of shared_info struct */
  167. uint64_t cpu_time; /* CPU time used */
  168. uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
  169. uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
  170. uint32_t ssidref;
  171. xen_domain_handle_t handle;
  172. };
  173. typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;
  174. /* As of Hypervisor Call v2, DomCtl v5 we are now 8-byte aligned
  175. even on 32-bit archs when dealing with uint64_t */
  176. #define ALIGN_64 __attribute__((aligned(8)))
  177. struct xen_v2d5_getdomaininfo {
  178. domid_t domain; /* the domain number */
  179. uint32_t flags; /* flags, see before */
  180. uint64_t tot_pages ALIGN_64; /* total number of pages used */
  181. uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
  182. uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
  183. uint64_t cpu_time ALIGN_64; /* CPU time used */
  184. uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
  185. uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
  186. uint32_t ssidref;
  187. xen_domain_handle_t handle;
  188. };
  189. typedef struct xen_v2d5_getdomaininfo xen_v2d5_getdomaininfo;
  190. struct xen_v2d6_getdomaininfo {
  191. domid_t domain; /* the domain number */
  192. uint32_t flags; /* flags, see before */
  193. uint64_t tot_pages ALIGN_64; /* total number of pages used */
  194. uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
  195. uint64_t shr_pages ALIGN_64; /* number of shared pages */
  196. uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
  197. uint64_t cpu_time ALIGN_64; /* CPU time used */
  198. uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
  199. uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
  200. uint32_t ssidref;
  201. xen_domain_handle_t handle;
  202. };
  203. typedef struct xen_v2d6_getdomaininfo xen_v2d6_getdomaininfo;
  204. struct xen_v2d7_getdomaininfo {
  205. domid_t domain; /* the domain number */
  206. uint32_t flags; /* flags, see before */
  207. uint64_t tot_pages ALIGN_64; /* total number of pages used */
  208. uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
  209. uint64_t shr_pages ALIGN_64; /* number of shared pages */
  210. uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
  211. uint64_t cpu_time ALIGN_64; /* CPU time used */
  212. uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
  213. uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
  214. uint32_t ssidref;
  215. xen_domain_handle_t handle;
  216. uint32_t cpupool;
  217. };
  218. typedef struct xen_v2d7_getdomaininfo xen_v2d7_getdomaininfo;
  219. struct xen_v2d8_getdomaininfo {
  220. domid_t domain; /* the domain number */
  221. uint32_t flags; /* flags, see before */
  222. uint64_t tot_pages ALIGN_64; /* total number of pages used */
  223. uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
  224. uint64_t shr_pages ALIGN_64; /* number of shared pages */
  225. uint64_t paged_pages ALIGN_64; /* number of paged pages */
  226. uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
  227. uint64_t cpu_time ALIGN_64; /* CPU time used */
  228. uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
  229. uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
  230. uint32_t ssidref;
  231. xen_domain_handle_t handle;
  232. uint32_t cpupool;
  233. };
  234. typedef struct xen_v2d8_getdomaininfo xen_v2d8_getdomaininfo;
  235. union xen_getdomaininfo {
  236. struct xen_v0_getdomaininfo v0;
  237. struct xen_v2_getdomaininfo v2;
  238. struct xen_v2d5_getdomaininfo v2d5;
  239. struct xen_v2d6_getdomaininfo v2d6;
  240. struct xen_v2d7_getdomaininfo v2d7;
  241. struct xen_v2d8_getdomaininfo v2d8;
  242. };
  243. typedef union xen_getdomaininfo xen_getdomaininfo;
  244. union xen_getdomaininfolist {
  245. struct xen_v0_getdomaininfo *v0;
  246. struct xen_v2_getdomaininfo *v2;
  247. struct xen_v2d5_getdomaininfo *v2d5;
  248. struct xen_v2d6_getdomaininfo *v2d6;
  249. struct xen_v2d7_getdomaininfo *v2d7;
  250. struct xen_v2d8_getdomaininfo *v2d8;
  251. };
  252. typedef union xen_getdomaininfolist xen_getdomaininfolist;
  253. struct xen_v2_getschedulerid {
  254. uint32_t sched_id; /* Get Scheduler ID from Xen */
  255. };
  256. typedef struct xen_v2_getschedulerid xen_v2_getschedulerid;
  257. union xen_getschedulerid {
  258. struct xen_v2_getschedulerid *v2;
  259. };
  260. typedef union xen_getschedulerid xen_getschedulerid;
  261. struct xen_v2s4_availheap {
  262. uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
  263. uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
  264. int32_t node; /* NUMA node (-1 for sum across all nodes). */
  265. uint64_t avail_bytes; /* Bytes available in the specified region. */
  266. };
  267. typedef struct xen_v2s4_availheap xen_v2s4_availheap;
  268. struct xen_v2s5_availheap {
  269. uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
  270. uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
  271. int32_t node; /* NUMA node (-1 for sum across all nodes). */
  272. uint64_t avail_bytes ALIGN_64; /* Bytes available in the specified region. */
  273. };
  274. typedef struct xen_v2s5_availheap xen_v2s5_availheap;
  275. #define XEN_GETDOMAININFOLIST_ALLOC(domlist, size) \
  276. (hv_versions.hypervisor < 2 ? \
  277. (VIR_ALLOC_N(domlist.v0, (size)) == 0) : \
  278. (hv_versions.dom_interface >= 8 ? \
  279. (VIR_ALLOC_N(domlist.v2d8, (size)) == 0) : \
  280. (hv_versions.dom_interface == 7 ? \
  281. (VIR_ALLOC_N(domlist.v2d7, (size)) == 0) : \
  282. (hv_versions.dom_interface == 6 ? \
  283. (VIR_ALLOC_N(domlist.v2d6, (size)) == 0) : \
  284. (hv_versions.dom_interface == 5 ? \
  285. (VIR_ALLOC_N(domlist.v2d5, (size)) == 0) : \
  286. (VIR_ALLOC_N(domlist.v2, (size)) == 0))))))
  287. #define XEN_GETDOMAININFOLIST_FREE(domlist) \
  288. (hv_versions.hypervisor < 2 ? \
  289. VIR_FREE(domlist.v0) : \
  290. (hv_versions.dom_interface >= 8 ? \
  291. VIR_FREE(domlist.v2d8) : \
  292. (hv_versions.dom_interface == 7 ? \
  293. VIR_FREE(domlist.v2d7) : \
  294. (hv_versions.dom_interface == 6 ? \
  295. VIR_FREE(domlist.v2d6) : \
  296. (hv_versions.dom_interface == 5 ? \
  297. VIR_FREE(domlist.v2d5) : \
  298. VIR_FREE(domlist.v2))))))
  299. #define XEN_GETDOMAININFOLIST_CLEAR(domlist, size) \
  300. (hv_versions.hypervisor < 2 ? \
  301. memset(domlist.v0, 0, sizeof(*domlist.v0) * size) : \
  302. (hv_versions.dom_interface >= 8 ? \
  303. memset(domlist.v2d8, 0, sizeof(*domlist.v2d8) * size) : \
  304. (hv_versions.dom_interface == 7 ? \
  305. memset(domlist.v2d7, 0, sizeof(*domlist.v2d7) * size) : \
  306. (hv_versions.dom_interface == 6 ? \
  307. memset(domlist.v2d6, 0, sizeof(*domlist.v2d6) * size) : \
  308. (hv_versions.dom_interface == 5 ? \
  309. memset(domlist.v2d5, 0, sizeof(*domlist.v2d5) * size) : \
  310. memset(domlist.v2, 0, sizeof(*domlist.v2) * size))))))
  311. #define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n) \
  312. (hv_versions.hypervisor < 2 ? \
  313. domlist.v0[n].domain : \
  314. (hv_versions.dom_interface >= 8 ? \
  315. domlist.v2d8[n].domain : \
  316. (hv_versions.dom_interface == 7 ? \
  317. domlist.v2d7[n].domain : \
  318. (hv_versions.dom_interface == 6 ? \
  319. domlist.v2d6[n].domain : \
  320. (hv_versions.dom_interface == 5 ? \
  321. domlist.v2d5[n].domain : \
  322. domlist.v2[n].domain)))))
  323. #define XEN_GETDOMAININFOLIST_UUID(domlist, n) \
  324. (hv_versions.hypervisor < 2 ? \
  325. domlist.v0[n].handle : \
  326. (hv_versions.dom_interface >= 8 ? \
  327. domlist.v2d8[n].handle : \
  328. (hv_versions.dom_interface == 7 ? \
  329. domlist.v2d7[n].handle : \
  330. (hv_versions.dom_interface == 6 ? \
  331. domlist.v2d6[n].handle : \
  332. (hv_versions.dom_interface == 5 ? \
  333. domlist.v2d5[n].handle : \
  334. domlist.v2[n].handle)))))
  335. #define XEN_GETDOMAININFOLIST_DATA(domlist) \
  336. (hv_versions.hypervisor < 2 ? \
  337. (void*)(domlist->v0) : \
  338. (hv_versions.dom_interface >= 8 ? \
  339. (void*)(domlist->v2d8) : \
  340. (hv_versions.dom_interface == 7 ? \
  341. (void*)(domlist->v2d7) : \
  342. (hv_versions.dom_interface == 6 ? \
  343. (void*)(domlist->v2d6) : \
  344. (hv_versions.dom_interface == 5 ? \
  345. (void*)(domlist->v2d5) : \
  346. (void*)(domlist->v2))))))
  347. #define XEN_GETDOMAININFO_SIZE \
  348. (hv_versions.hypervisor < 2 ? \
  349. sizeof(xen_v0_getdomaininfo) : \
  350. (hv_versions.dom_interface >= 8 ? \
  351. sizeof(xen_v2d8_getdomaininfo) : \
  352. (hv_versions.dom_interface == 7 ? \
  353. sizeof(xen_v2d7_getdomaininfo) : \
  354. (hv_versions.dom_interface == 6 ? \
  355. sizeof(xen_v2d6_getdomaininfo) : \
  356. (hv_versions.dom_interface == 5 ? \
  357. sizeof(xen_v2d5_getdomaininfo) : \
  358. sizeof(xen_v2_getdomaininfo))))))
  359. #define XEN_GETDOMAININFO_CLEAR(dominfo) \
  360. (hv_versions.hypervisor < 2 ? \
  361. memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) : \
  362. (hv_versions.dom_interface >= 8 ? \
  363. memset(&(dominfo.v2d8), 0, sizeof(xen_v2d8_getdomaininfo)) : \
  364. (hv_versions.dom_interface == 7 ? \
  365. memset(&(dominfo.v2d7), 0, sizeof(xen_v2d7_getdomaininfo)) : \
  366. (hv_versions.dom_interface == 6 ? \
  367. memset(&(dominfo.v2d6), 0, sizeof(xen_v2d6_getdomaininfo)) : \
  368. (hv_versions.dom_interface == 5 ? \
  369. memset(&(dominfo.v2d5), 0, sizeof(xen_v2d5_getdomaininfo)) : \
  370. memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)))))))
  371. #define XEN_GETDOMAININFO_DOMAIN(dominfo) \
  372. (hv_versions.hypervisor < 2 ? \
  373. dominfo.v0.domain : \
  374. (hv_versions.dom_interface >= 8 ? \
  375. dominfo.v2d8.domain : \
  376. (hv_versions.dom_interface == 7 ? \
  377. dominfo.v2d7.domain : \
  378. (hv_versions.dom_interface == 6 ? \
  379. dominfo.v2d6.domain : \
  380. (hv_versions.dom_interface == 5 ? \
  381. dominfo.v2d5.domain : \
  382. dominfo.v2.domain)))))
  383. #define XEN_GETDOMAININFO_CPUTIME(dominfo) \
  384. (hv_versions.hypervisor < 2 ? \
  385. dominfo.v0.cpu_time : \
  386. (hv_versions.dom_interface >= 8 ? \
  387. dominfo.v2d8.cpu_time : \
  388. (hv_versions.dom_interface == 7 ? \
  389. dominfo.v2d7.cpu_time : \
  390. (hv_versions.dom_interface == 6 ? \
  391. dominfo.v2d6.cpu_time : \
  392. (hv_versions.dom_interface == 5 ? \
  393. dominfo.v2d5.cpu_time : \
  394. dominfo.v2.cpu_time)))))
  395. #define XEN_GETDOMAININFO_CPUCOUNT(dominfo) \
  396. (hv_versions.hypervisor < 2 ? \
  397. dominfo.v0.nr_online_vcpus : \
  398. (hv_versions.dom_interface >= 8 ? \
  399. dominfo.v2d8.nr_online_vcpus : \
  400. (hv_versions.dom_interface == 7 ? \
  401. dominfo.v2d7.nr_online_vcpus : \
  402. (hv_versions.dom_interface == 6 ? \
  403. dominfo.v2d6.nr_online_vcpus : \
  404. (hv_versions.dom_interface == 5 ? \
  405. dominfo.v2d5.nr_online_vcpus : \
  406. dominfo.v2.nr_online_vcpus)))))
  407. #define XEN_GETDOMAININFO_MAXCPUID(dominfo) \
  408. (hv_versions.hypervisor < 2 ? \
  409. dominfo.v0.max_vcpu_id : \
  410. (hv_versions.dom_interface >= 8 ? \
  411. dominfo.v2d8.max_vcpu_id : \
  412. (hv_versions.dom_interface == 7 ? \
  413. dominfo.v2d7.max_vcpu_id : \
  414. (hv_versions.dom_interface == 6 ? \
  415. dominfo.v2d6.max_vcpu_id : \
  416. (hv_versions.dom_interface == 5 ? \
  417. dominfo.v2d5.max_vcpu_id : \
  418. dominfo.v2.max_vcpu_id)))))
  419. #define XEN_GETDOMAININFO_FLAGS(dominfo) \
  420. (hv_versions.hypervisor < 2 ? \
  421. dominfo.v0.flags : \
  422. (hv_versions.dom_interface >= 8 ? \
  423. dominfo.v2d8.flags : \
  424. (hv_versions.dom_interface == 7 ? \
  425. dominfo.v2d7.flags : \
  426. (hv_versions.dom_interface == 6 ? \
  427. dominfo.v2d6.flags : \
  428. (hv_versions.dom_interface == 5 ? \
  429. dominfo.v2d5.flags : \
  430. dominfo.v2.flags)))))
  431. #define XEN_GETDOMAININFO_TOT_PAGES(dominfo) \
  432. (hv_versions.hypervisor < 2 ? \
  433. dominfo.v0.tot_pages : \
  434. (hv_versions.dom_interface >= 8 ? \
  435. dominfo.v2d8.tot_pages : \
  436. (hv_versions.dom_interface == 7 ? \
  437. dominfo.v2d7.tot_pages : \
  438. (hv_versions.dom_interface == 6 ? \
  439. dominfo.v2d6.tot_pages : \
  440. (hv_versions.dom_interface == 5 ? \
  441. dominfo.v2d5.tot_pages : \
  442. dominfo.v2.tot_pages)))))
  443. #define XEN_GETDOMAININFO_MAX_PAGES(dominfo) \
  444. (hv_versions.hypervisor < 2 ? \
  445. dominfo.v0.max_pages : \
  446. (hv_versions.dom_interface >= 8 ? \
  447. dominfo.v2d8.max_pages : \
  448. (hv_versions.dom_interface == 7 ? \
  449. dominfo.v2d7.max_pages : \
  450. (hv_versions.dom_interface == 6 ? \
  451. dominfo.v2d6.max_pages : \
  452. (hv_versions.dom_interface == 5 ? \
  453. dominfo.v2d5.max_pages : \
  454. dominfo.v2.max_pages)))))
  455. #define XEN_GETDOMAININFO_UUID(dominfo) \
  456. (hv_versions.hypervisor < 2 ? \
  457. dominfo.v0.handle : \
  458. (hv_versions.dom_interface >= 8 ? \
  459. dominfo.v2d8.handle : \
  460. (hv_versions.dom_interface == 7 ? \
  461. dominfo.v2d7.handle : \
  462. (hv_versions.dom_interface == 6 ? \
  463. dominfo.v2d6.handle : \
  464. (hv_versions.dom_interface == 5 ? \
  465. dominfo.v2d5.handle : \
  466. dominfo.v2.handle)))))
  467. static int
  468. lock_pages(void *addr, size_t len)
  469. {
  470. #ifdef __linux__
  471. return mlock(addr, len);
  472. #elif defined(__sun)
  473. return 0;
  474. #endif
  475. }
  476. static int
  477. unlock_pages(void *addr, size_t len)
  478. {
  479. #ifdef __linux__
  480. return munlock(addr, len);
  481. #elif defined(__sun)
  482. return 0;
  483. #endif
  484. }
  485. struct xen_v0_getdomaininfolistop {
  486. domid_t first_domain;
  487. uint32_t max_domains;
  488. struct xen_v0_getdomaininfo *buffer;
  489. uint32_t num_domains;
  490. };
  491. typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;
  492. struct xen_v2_getdomaininfolistop {
  493. domid_t first_domain;
  494. uint32_t max_domains;
  495. struct xen_v2_getdomaininfo *buffer;
  496. uint32_t num_domains;
  497. };
  498. typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;
  499. /* As of HV version 2, sysctl version 3 the *buffer pointer is 64-bit aligned */
  500. struct xen_v2s3_getdomaininfolistop {
  501. domid_t first_domain;
  502. uint32_t max_domains;
  503. #ifdef __BIG_ENDIAN__
  504. struct {
  505. int __pad[(sizeof(long long) - sizeof(struct xen_v2d5_getdomaininfo *)) / sizeof(int)];
  506. struct xen_v2d5_getdomaininfo *v;
  507. } buffer;
  508. #else
  509. union {
  510. struct xen_v2d5_getdomaininfo *v;
  511. uint64_t pad ALIGN_64;
  512. } buffer;
  513. #endif
  514. uint32_t num_domains;
  515. };
  516. typedef struct xen_v2s3_getdomaininfolistop xen_v2s3_getdomaininfolistop;
  517. struct xen_v0_domainop {
  518. domid_t domain;
  519. };
  520. typedef struct xen_v0_domainop xen_v0_domainop;
  521. /*
  522. * The information for a destroydomain system hypercall
  523. */
  524. #define XEN_V0_OP_DESTROYDOMAIN 9
  525. #define XEN_V1_OP_DESTROYDOMAIN 9
  526. #define XEN_V2_OP_DESTROYDOMAIN 2
  527. /*
  528. * The information for a pausedomain system hypercall
  529. */
  530. #define XEN_V0_OP_PAUSEDOMAIN 10
  531. #define XEN_V1_OP_PAUSEDOMAIN 10
  532. #define XEN_V2_OP_PAUSEDOMAIN 3
  533. /*
  534. * The information for an unpausedomain system hypercall
  535. */
  536. #define XEN_V0_OP_UNPAUSEDOMAIN 11
  537. #define XEN_V1_OP_UNPAUSEDOMAIN 11
  538. #define XEN_V2_OP_UNPAUSEDOMAIN 4
  539. /*
  540. * The information for a setmaxmem system hypercall
  541. */
  542. #define XEN_V0_OP_SETMAXMEM 28
  543. #define XEN_V1_OP_SETMAXMEM 28
  544. #define XEN_V2_OP_SETMAXMEM 11
  545. struct xen_v0_setmaxmem {
  546. domid_t domain;
  547. uint64_t maxmem;
  548. };
  549. typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
  550. typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;
  551. struct xen_v2_setmaxmem {
  552. uint64_t maxmem;
  553. };
  554. typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;
  555. struct xen_v2d5_setmaxmem {
  556. uint64_t maxmem ALIGN_64;
  557. };
  558. typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;
  559. /*
  560. * The information for a setmaxvcpu system hypercall
  561. */
  562. #define XEN_V0_OP_SETMAXVCPU 41
  563. #define XEN_V1_OP_SETMAXVCPU 41
  564. #define XEN_V2_OP_SETMAXVCPU 15
  565. struct xen_v0_setmaxvcpu {
  566. domid_t domain;
  567. uint32_t maxvcpu;
  568. };
  569. typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
  570. typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;
  571. struct xen_v2_setmaxvcpu {
  572. uint32_t maxvcpu;
  573. };
  574. typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;
  575. /*
  576. * The information for a setvcpumap system hypercall
  577. * Note that between 1 and 2 the limitation to 64 physical CPU was lifted
  578. * hence the difference in structures
  579. */
  580. #define XEN_V0_OP_SETVCPUMAP 20
  581. #define XEN_V1_OP_SETVCPUMAP 20
  582. #define XEN_V2_OP_SETVCPUMAP 9
  583. struct xen_v0_setvcpumap {
  584. domid_t domain;
  585. uint32_t vcpu;
  586. cpumap_t cpumap;
  587. };
  588. typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
  589. typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;
  590. struct xen_v2_cpumap {
  591. uint8_t *bitmap;
  592. uint32_t nr_cpus;
  593. };
  594. struct xen_v2_setvcpumap {
  595. uint32_t vcpu;
  596. struct xen_v2_cpumap cpumap;
  597. };
  598. typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;
  599. /* HV version 2, Dom version 5 requires 64-bit alignment */
  600. struct xen_v2d5_cpumap {
  601. #ifdef __BIG_ENDIAN__
  602. struct {
  603. int __pad[(sizeof(long long) - sizeof(uint8_t *)) / sizeof(int)];
  604. uint8_t *v;
  605. } bitmap;
  606. #else
  607. union {
  608. uint8_t *v;
  609. uint64_t pad ALIGN_64;
  610. } bitmap;
  611. #endif
  612. uint32_t nr_cpus;
  613. };
  614. struct xen_v2d5_setvcpumap {
  615. uint32_t vcpu;
  616. struct xen_v2d5_cpumap cpumap;
  617. };
  618. typedef struct xen_v2d5_setvcpumap xen_v2d5_setvcpumap;
  619. /*
  620. * The information for a vcpuinfo system hypercall
  621. */
  622. #define XEN_V0_OP_GETVCPUINFO 43
  623. #define XEN_V1_OP_GETVCPUINFO 43
  624. #define XEN_V2_OP_GETVCPUINFO 14
  625. struct xen_v0_vcpuinfo {
  626. domid_t domain; /* owner's domain */
  627. uint32_t vcpu; /* the vcpu number */
  628. uint8_t online; /* seen as on line */
  629. uint8_t blocked; /* blocked on event */
  630. uint8_t running; /* scheduled on CPU */
  631. uint64_t cpu_time; /* nanosecond of CPU used */
  632. uint32_t cpu; /* current mapping */
  633. cpumap_t cpumap; /* deprecated in V2 */
  634. };
  635. typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
  636. typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;
  637. struct xen_v2_vcpuinfo {
  638. uint32_t vcpu; /* the vcpu number */
  639. uint8_t online; /* seen as on line */
  640. uint8_t blocked; /* blocked on event */
  641. uint8_t running; /* scheduled on CPU */
  642. uint64_t cpu_time; /* nanosecond of CPU used */
  643. uint32_t cpu; /* current mapping */
  644. };
  645. typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;
  646. struct xen_v2d5_vcpuinfo {
  647. uint32_t vcpu; /* the vcpu number */
  648. uint8_t online; /* seen as on line */
  649. uint8_t blocked; /* blocked on event */
  650. uint8_t running; /* scheduled on CPU */
  651. uint64_t cpu_time ALIGN_64; /* nanosecond of CPU used */
  652. uint32_t cpu; /* current mapping */
  653. };
  654. typedef struct xen_v2d5_vcpuinfo xen_v2d5_vcpuinfo;
  655. /*
  656. * from V2 the pinning of a vcpu is read with a separate call
  657. */
  658. #define XEN_V2_OP_GETVCPUMAP 25
  659. typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;
  660. typedef struct xen_v2d5_setvcpumap xen_v2d5_getvcpumap;
  661. /*
  662. * from V2 we get the scheduler information
  663. */
  664. #define XEN_V2_OP_GETSCHEDULERID 4
  665. /*
  666. * from V2 we get the available heap information
  667. */
  668. #define XEN_V2_OP_GETAVAILHEAP 9
  669. /*
  670. * from V2 we get the scheduler parameter
  671. */
  672. #define XEN_V2_OP_SCHEDULER 16
  673. /* Scheduler types. */
  674. #define XEN_SCHEDULER_SEDF 4
  675. #define XEN_SCHEDULER_CREDIT 5
  676. /* get/set scheduler parameters */
  677. #define XEN_DOMCTL_SCHEDOP_putinfo 0
  678. #define XEN_DOMCTL_SCHEDOP_getinfo 1
  679. struct xen_v2_setschedinfo {
  680. uint32_t sched_id;
  681. uint32_t cmd;
  682. union {
  683. struct xen_domctl_sched_sedf {
  684. uint64_t period ALIGN_64;
  685. uint64_t slice ALIGN_64;
  686. uint64_t latency ALIGN_64;
  687. uint32_t extratime;
  688. uint32_t weight;
  689. } sedf;
  690. struct xen_domctl_sched_credit {
  691. uint16_t weight;
  692. uint16_t cap;
  693. } credit;
  694. } u;
  695. };
  696. typedef struct xen_v2_setschedinfo xen_v2_setschedinfo;
  697. typedef struct xen_v2_setschedinfo xen_v2_getschedinfo;
  698. /*
  699. * The hypercall operation structures also have changed on
  700. * changeset 86d26e6ec89b
  701. */
  702. /* the old structure */
  703. struct xen_op_v0 {
  704. uint32_t cmd;
  705. uint32_t interface_version;
  706. union {
  707. xen_v0_getdomaininfolistop getdomaininfolist;
  708. xen_v0_domainop domain;
  709. xen_v0_setmaxmem setmaxmem;
  710. xen_v0_setmaxvcpu setmaxvcpu;
  711. xen_v0_setvcpumap setvcpumap;
  712. xen_v0_vcpuinfo getvcpuinfo;
  713. uint8_t padding[128];
  714. } u;
  715. };
  716. typedef struct xen_op_v0 xen_op_v0;
  717. typedef struct xen_op_v0 xen_op_v1;
  718. /* the new structure for systems operations */
  719. struct xen_op_v2_sys {
  720. uint32_t cmd;
  721. uint32_t interface_version;
  722. union {
  723. xen_v2_getdomaininfolistop getdomaininfolist;
  724. xen_v2s3_getdomaininfolistop getdomaininfolists3;
  725. xen_v2_getschedulerid getschedulerid;
  726. xen_v2s4_availheap availheap;
  727. xen_v2s5_availheap availheap5;
  728. uint8_t padding[128];
  729. } u;
  730. };
  731. typedef struct xen_op_v2_sys xen_op_v2_sys;
  732. /* the new structure for domains operation */
  733. struct xen_op_v2_dom {
  734. uint32_t cmd;
  735. uint32_t interface_version;
  736. domid_t domain;
  737. union {
  738. xen_v2_setmaxmem setmaxmem;
  739. xen_v2d5_setmaxmem setmaxmemd5;
  740. xen_v2_setmaxvcpu setmaxvcpu;
  741. xen_v2_setvcpumap setvcpumap;
  742. xen_v2d5_setvcpumap setvcpumapd5;
  743. xen_v2_vcpuinfo getvcpuinfo;
  744. xen_v2d5_vcpuinfo getvcpuinfod5;
  745. xen_v2_getvcpumap getvcpumap;
  746. xen_v2d5_getvcpumap getvcpumapd5;
  747. xen_v2_setschedinfo setschedinfo;
  748. xen_v2_getschedinfo getschedinfo;
  749. uint8_t padding[128];
  750. } u;
  751. };
  752. typedef struct xen_op_v2_dom xen_op_v2_dom;
  753. #ifdef __linux__
  754. # define XEN_HYPERVISOR_SOCKET "/proc/xen/privcmd"
  755. # define HYPERVISOR_CAPABILITIES "/sys/hypervisor/properties/capabilities"
  756. #elif defined(__sun)
  757. # define XEN_HYPERVISOR_SOCKET "/dev/xen/privcmd"
  758. #else
  759. # error "unsupported platform"
  760. #endif
  761. static unsigned long long xenHypervisorGetMaxMemory(virDomainPtr domain);
  762. struct xenUnifiedDriver xenHypervisorDriver = {
  763. .xenClose = xenHypervisorClose,
  764. .xenVersion = xenHypervisorGetVersion,
  765. .xenDomainSuspend = xenHypervisorPauseDomain,
  766. .xenDomainResume = xenHypervisorResumeDomain,
  767. .xenDomainDestroyFlags = xenHypervisorDestroyDomainFlags,
  768. .xenDomainGetOSType = xenHypervisorDomainGetOSType,
  769. .xenDomainGetMaxMemory = xenHypervisorGetMaxMemory,
  770. .xenDomainSetMaxMemory = xenHypervisorSetMaxMemory,
  771. .xenDomainGetInfo = xenHypervisorGetDomainInfo,
  772. .xenDomainPinVcpu = xenHypervisorPinVcpu,
  773. .xenDomainGetVcpus = xenHypervisorGetVcpus,
  774. .xenDomainGetSchedulerType = xenHypervisorGetSchedulerType,
  775. .xenDomainGetSchedulerParameters = xenHypervisorGetSchedulerParameters,
  776. .xenDomainSetSchedulerParameters = xenHypervisorSetSchedulerParameters,
  777. };
  778. #define virXenError(code, ...) \
  779. if (in_init == 0) \
  780. virReportErrorHelper(VIR_FROM_XEN, code, __FILE__, \
  781. __FUNCTION__, __LINE__, __VA_ARGS__)
  782. /**
  783. * xenHypervisorDoV0Op:
  784. * @handle: the handle to the Xen hypervisor
  785. * @op: pointer to the hypervisor operation structure
  786. *
  787. * Do a hypervisor operation though the old interface,
  788. * this leads to a hypervisor call through ioctl.
  789. *
  790. * Returns 0 in case of success and -1 in case of error.
  791. */
  792. static int
  793. xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
  794. {
  795. int ret;
  796. v0_hypercall_t hc;
  797. memset(&hc, 0, sizeof(hc));
  798. op->interface_version = hv_versions.hv << 8;
  799. hc.op = __HYPERVISOR_dom0_op;
  800. hc.arg[0] = (unsigned long) op;
  801. if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
  802. virXenError(VIR_ERR_XEN_CALL, " locking");
  803. return -1;
  804. }
  805. ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
  806. if (ret < 0) {
  807. virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
  808. xen_ioctl_hypercall_cmd);
  809. }
  810. if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
  811. virXenError(VIR_ERR_XEN_CALL, " releasing");
  812. ret = -1;
  813. }
  814. if (ret < 0)
  815. return -1;
  816. return 0;
  817. }
  818. /**
  819. * xenHypervisorDoV1Op:
  820. * @handle: the handle to the Xen hypervisor
  821. * @op: pointer to the hypervisor operation structure
  822. *
  823. * Do a hypervisor v1 operation, this leads to a hypervisor call through
  824. * ioctl.
  825. *
  826. * Returns 0 in case of success and -1 in case of error.
  827. */
  828. static int
  829. xenHypervisorDoV1Op(int handle, xen_op_v1* op)
  830. {
  831. int ret;
  832. hypercall_t hc;
  833. memset(&hc, 0, sizeof(hc));
  834. op->interface_version = DOM0_INTERFACE_VERSION;
  835. hc.op = __HYPERVISOR_dom0_op;
  836. hc.arg[0] = (unsigned long) op;
  837. if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
  838. virXenError(VIR_ERR_XEN_CALL, " locking");
  839. return -1;
  840. }
  841. ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
  842. if (ret < 0) {
  843. virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
  844. xen_ioctl_hypercall_cmd);
  845. }
  846. if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
  847. virXenError(VIR_ERR_XEN_CALL, " releasing");
  848. ret = -1;
  849. }
  850. if (ret < 0)
  851. return -1;
  852. return 0;
  853. }
  854. /**
  855. * xenHypervisorDoV2Sys:
  856. * @handle: the handle to the Xen hypervisor
  857. * @op: pointer to the hypervisor operation structure
  858. *
  859. * Do a hypervisor v2 system operation, this leads to a hypervisor
  860. * call through ioctl.
  861. *
  862. * Returns 0 in case of success and -1 in case of error.
  863. */
  864. static int
  865. xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
  866. {
  867. int ret;
  868. hypercall_t hc;
  869. memset(&hc, 0, sizeof(hc));
  870. op->interface_version = hv_versions.sys_interface;
  871. hc.op = __HYPERVISOR_sysctl;
  872. hc.arg[0] = (unsigned long) op;
  873. if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
  874. virXenError(VIR_ERR_XEN_CALL, " locking");
  875. return -1;
  876. }
  877. ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
  878. if (ret < 0) {
  879. virXenError(VIR_ERR_XEN_CALL, " sys ioctl %d",
  880. xen_ioctl_hypercall_cmd);
  881. }
  882. if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
  883. virXenError(VIR_ERR_XEN_CALL, " releasing");
  884. ret = -1;
  885. }
  886. if (ret < 0)
  887. return -1;
  888. return 0;
  889. }
  890. /**
  891. * xenHypervisorDoV2Dom:
  892. * @handle: the handle to the Xen hypervisor
  893. * @op: pointer to the hypervisor domain operation structure
  894. *
  895. * Do a hypervisor v2 domain operation, this leads to a hypervisor
  896. * call through ioctl.
  897. *
  898. * Returns 0 in case of success and -1 in case of error.
  899. */
  900. static int
  901. xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
  902. {
  903. int ret;
  904. hypercall_t hc;
  905. memset(&hc, 0, sizeof(hc));
  906. op->interface_version = hv_versions.dom_interface;
  907. hc.op = __HYPERVISOR_domctl;
  908. hc.arg[0] = (unsigned long) op;
  909. if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
  910. virXenError(VIR_ERR_XEN_CALL, " locking");
  911. return -1;
  912. }
  913. ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
  914. if (ret < 0) {
  915. virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
  916. xen_ioctl_hypercall_cmd);
  917. }
  918. if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
  919. virXenError(VIR_ERR_XEN_CALL, " releasing");
  920. ret = -1;
  921. }
  922. if (ret < 0)
  923. return -1;
  924. return 0;
  925. }
  926. /**
  927. * virXen_getdomaininfolist:
  928. * @handle: the hypervisor handle
  929. * @first_domain: first domain in the range
  930. * @maxids: maximum number of domains to list
  931. * @dominfos: output structures
  932. *
  933. * Do a low level hypercall to list existing domains information
  934. *
  935. * Returns the number of domains or -1 in case of failure
  936. */
  937. static int
  938. virXen_getdomaininfolist(int handle, int first_domain, int maxids,
  939. xen_getdomaininfolist *dominfos)
  940. {
  941. int ret = -1;
  942. if (lock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
  943. XEN_GETDOMAININFO_SIZE * maxids) < 0) {
  944. virXenError(VIR_ERR_XEN_CALL, " locking");
  945. return -1;
  946. }
  947. if (hv_versions.hypervisor > 1) {
  948. xen_op_v2_sys op;
  949. memset(&op, 0, sizeof(op));
  950. op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
  951. if (hv_versions.sys_interface < 3) {
  952. op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
  953. op.u.getdomaininfolist.max_domains = maxids;
  954. op.u.getdomaininfolist.buffer = dominfos->v2;
  955. op.u.getdomaininfolist.num_domains = maxids;
  956. } else {
  957. op.u.getdomaininfolists3.first_domain = (domid_t) first_domain;
  958. op.u.getdomaininfolists3.max_domains = maxids;
  959. op.u.getdomaininfolists3.buffer.v = dominfos->v2d5;
  960. op.u.getdomaininfolists3.num_domains = maxids;
  961. }
  962. ret = xenHypervisorDoV2Sys(handle, &op);
  963. if (ret == 0) {
  964. if (hv_versions.sys_interface < 3)
  965. ret = op.u.getdomaininfolist.num_domains;
  966. else
  967. ret = op.u.getdomaininfolists3.num_domains;
  968. }
  969. } else if (hv_versions.hypervisor == 1) {
  970. xen_op_v1 op;
  971. memset(&op, 0, sizeof(op));
  972. op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
  973. op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
  974. op.u.getdomaininfolist.max_domains = maxids;
  975. op.u.getdomaininfolist.buffer = dominfos->v0;
  976. op.u.getdomaininfolist.num_domains = maxids;
  977. ret = xenHypervisorDoV1Op(handle, &op);
  978. if (ret == 0)
  979. ret = op.u.getdomaininfolist.num_domains;
  980. } else if (hv_versions.hypervisor == 0) {
  981. xen_op_v0 op;
  982. memset(&op, 0, sizeof(op));
  983. op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
  984. op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
  985. op.u.getdomaininfolist.max_domains = maxids;
  986. op.u.getdomaininfolist.buffer = dominfos->v0;
  987. op.u.getdomaininfolist.num_domains = maxids;
  988. ret = xenHypervisorDoV0Op(handle, &op);
  989. if (ret == 0)
  990. ret = op.u.getdomaininfolist.num_domains;
  991. }
  992. if (unlock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
  993. XEN_GETDOMAININFO_SIZE * maxids) < 0) {
  994. virXenError(VIR_ERR_XEN_CALL, " release");
  995. ret = -1;
  996. }
  997. return ret;
  998. }
  999. static int
  1000. virXen_getdomaininfo(int handle, int first_domain,
  1001. xen_getdomaininfo *dominfo) {
  1002. xen_getdomaininfolist dominfos;
  1003. if (hv_versions.hypervisor < 2) {
  1004. dominfos.v0 = &(dominfo->v0);
  1005. } else {
  1006. dominfos.v2 = &(dominfo->v2);
  1007. }
  1008. return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
  1009. }
  1010. /**
  1011. * xenHypervisorGetSchedulerType:
  1012. * @domain: pointer to the Xen Hypervisor block
  1013. * @nparams:give a number of scheduler parameters.
  1014. *
  1015. * Do a low level hypercall to get scheduler type
  1016. *
  1017. * Returns scheduler name or NULL in case of failure
  1018. */
  1019. char *
  1020. xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams)
  1021. {
  1022. char *schedulertype = NULL;
  1023. xenUnifiedPrivatePtr priv;
  1024. if (domain->conn == NULL) {
  1025. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  1026. _("domain or conn is NULL"));
  1027. return NULL;
  1028. }
  1029. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  1030. if (priv->handle < 0) {
  1031. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  1032. _("priv->handle invalid"));
  1033. return NULL;
  1034. }
  1035. if (domain->id < 0) {
  1036. virXenError(VIR_ERR_OPERATION_INVALID,
  1037. "%s", _("domain is not running"));
  1038. return NULL;
  1039. }
  1040. /*
  1041. * Support only hv_versions.dom_interface >=5
  1042. * (Xen3.1.0 or later)
  1043. * TODO: check on Xen 3.0.3
  1044. */
  1045. if (hv_versions.dom_interface < 5) {
  1046. virXenError(VIR_ERR_NO_XEN, "%s",
  1047. _("unsupported in dom interface < 5"));
  1048. return NULL;
  1049. }
  1050. if (hv_versions.hypervisor > 1) {
  1051. xen_op_v2_sys op;
  1052. int ret;
  1053. memset(&op, 0, sizeof(op));
  1054. op.cmd = XEN_V2_OP_GETSCHEDULERID;
  1055. ret = xenHypervisorDoV2Sys(priv->handle, &op);
  1056. if (ret < 0)
  1057. return NULL;
  1058. switch (op.u.getschedulerid.sched_id){
  1059. case XEN_SCHEDULER_SEDF:
  1060. schedulertype = strdup("sedf");
  1061. if (schedulertype == NULL)
  1062. virReportOOMError();
  1063. if (nparams)
  1064. *nparams = XEN_SCHED_SEDF_NPARAM;
  1065. break;
  1066. case XEN_SCHEDULER_CREDIT:
  1067. schedulertype = strdup("credit");
  1068. if (schedulertype == NULL)
  1069. virReportOOMError();
  1070. if (nparams)
  1071. *nparams = XEN_SCHED_CRED_NPARAM;
  1072. break;
  1073. default:
  1074. break;
  1075. }
  1076. }
  1077. return schedulertype;
  1078. }
  1079. /**
  1080. * xenHypervisorGetSchedulerParameters:
  1081. * @domain: pointer to the Xen Hypervisor block
  1082. * @params: pointer to scheduler parameters.
  1083. * This memory area should be allocated before calling.
  1084. * @nparams: this parameter must be at least as large as
  1085. * the given number of scheduler parameters.
  1086. * from xenHypervisorGetSchedulerType().
  1087. *
  1088. * Do a low level hypercall to get scheduler parameters
  1089. *
  1090. * Returns 0 or -1 in case of failure
  1091. */
  1092. int
  1093. xenHypervisorGetSchedulerParameters(virDomainPtr domain,
  1094. virTypedParameterPtr params, int *nparams)
  1095. {
  1096. xenUnifiedPrivatePtr priv;
  1097. if (domain->conn == NULL) {
  1098. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  1099. _("domain or conn is NULL"));
  1100. return -1;
  1101. }
  1102. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  1103. if (priv->handle < 0) {
  1104. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  1105. _("priv->handle invalid"));
  1106. return -1;
  1107. }
  1108. if (domain->id < 0) {
  1109. virXenError(VIR_ERR_OPERATION_INVALID,
  1110. "%s", _("domain is not running"));
  1111. return -1;
  1112. }
  1113. /*
  1114. * Support only hv_versions.dom_interface >=5
  1115. * (Xen3.1.0 or later)
  1116. * TODO: check on Xen 3.0.3
  1117. */
  1118. if (hv_versions.dom_interface < 5) {
  1119. virXenError(VIR_ERR_NO_XEN, "%s",
  1120. _("unsupported in dom interface < 5"));
  1121. return -1;
  1122. }
  1123. if (hv_versions.hypervisor > 1) {
  1124. xen_op_v2_sys op_sys;
  1125. xen_op_v2_dom op_dom;
  1126. int ret;
  1127. memset(&op_sys, 0, sizeof(op_sys));
  1128. op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
  1129. ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
  1130. if (ret < 0)
  1131. return -1;
  1132. switch (op_sys.u.getschedulerid.sched_id){
  1133. case XEN_SCHEDULER_SEDF:
  1134. if (*nparams < XEN_SCHED_SEDF_NPARAM) {
  1135. virXenError(VIR_ERR_INVALID_ARG,
  1136. "%s", _("Invalid parameter count"));
  1137. return -1;
  1138. }
  1139. /* TODO: Implement for Xen/SEDF */
  1140. TODO
  1141. return -1;
  1142. case XEN_SCHEDULER_CREDIT:
  1143. memset(&op_dom, 0, sizeof(op_dom));
  1144. op_dom.cmd = XEN_V2_OP_SCHEDULER;
  1145. op_dom.domain = (domid_t) domain->id;
  1146. op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
  1147. op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
  1148. ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
  1149. if (ret < 0)
  1150. return -1;
  1151. if (virTypedParameterAssign(&params[0],
  1152. VIR_DOMAIN_SCHEDULER_WEIGHT,
  1153. VIR_TYPED_PARAM_UINT,
  1154. op_dom.u.getschedinfo.u.credit.weight) < 0)
  1155. return -1;
  1156. if (*nparams > 1 &&
  1157. virTypedParameterAssign(&params[1],
  1158. VIR_DOMAIN_SCHEDULER_CAP,
  1159. VIR_TYPED_PARAM_UINT,
  1160. op_dom.u.getschedinfo.u.credit.cap) < 0)
  1161. return -1;
  1162. if (*nparams > XEN_SCHED_CRED_NPARAM)
  1163. *nparams = XEN_SCHED_CRED_NPARAM;
  1164. break;
  1165. default:
  1166. virXenError(VIR_ERR_INVALID_ARG,
  1167. _("Unknown scheduler %d"),
  1168. op_sys.u.getschedulerid.sched_id);
  1169. return -1;
  1170. }
  1171. }
  1172. return 0;
  1173. }
  1174. /**
  1175. * xenHypervisorSetSchedulerParameters:
  1176. * @domain: pointer to the Xen Hypervisor block
  1177. * @nparams:give a number of scheduler setting parameters .
  1178. *
  1179. * Do a low level hypercall to set scheduler parameters
  1180. *
  1181. * Returns 0 or -1 in case of failure
  1182. */
  1183. int
  1184. xenHypervisorSetSchedulerParameters(virDomainPtr domain,
  1185. virTypedParameterPtr params, int nparams)
  1186. {
  1187. int i;
  1188. unsigned int val;
  1189. xenUnifiedPrivatePtr priv;
  1190. char buf[256];
  1191. if (domain->conn == NULL) {
  1192. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  1193. _("domain or conn is NULL"));
  1194. return -1;
  1195. }
  1196. if (nparams == 0) {
  1197. /* nothing to do, exit early */
  1198. return 0;
  1199. }
  1200. if (virTypedParameterArrayValidate(params, nparams,
  1201. VIR_DOMAIN_SCHEDULER_WEIGHT,
  1202. VIR_TYPED_PARAM_UINT,
  1203. VIR_DOMAIN_SCHEDULER_CAP,
  1204. VIR_TYPED_PARAM_UINT,
  1205. NULL) < 0)
  1206. return -1;
  1207. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  1208. if (priv->handle < 0) {
  1209. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  1210. _("priv->handle invalid"));
  1211. return -1;
  1212. }
  1213. if (domain->id < 0) {
  1214. virXenError(VIR_ERR_OPERATION_INVALID,
  1215. "%s", _("domain is not running"));
  1216. return -1;
  1217. }
  1218. /*
  1219. * Support only hv_versions.dom_interface >=5
  1220. * (Xen3.1.0 or later)
  1221. * TODO: check on Xen 3.0.3
  1222. */
  1223. if (hv_versions.dom_interface < 5) {
  1224. virXenError(VIR_ERR_NO_XEN, "%s",
  1225. _("unsupported in dom interface < 5"));
  1226. return -1;
  1227. }
  1228. if (hv_versions.hypervisor > 1) {
  1229. xen_op_v2_sys op_sys;
  1230. xen_op_v2_dom op_dom;
  1231. int ret;
  1232. memset(&op_sys, 0, sizeof(op_sys));
  1233. op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
  1234. ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
  1235. if (ret == -1) return -1;
  1236. switch (op_sys.u.getschedulerid.sched_id){
  1237. case XEN_SCHEDULER_SEDF:
  1238. /* TODO: Implement for Xen/SEDF */
  1239. TODO
  1240. return -1;
  1241. case XEN_SCHEDULER_CREDIT: {
  1242. memset(&op_dom, 0, sizeof(op_dom));
  1243. op_dom.cmd = XEN_V2_OP_SCHEDULER;
  1244. op_dom.domain = (domid_t) domain->id;
  1245. op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
  1246. op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
  1247. /*
  1248. * credit scheduler parameters
  1249. * following values do not change the parameters
  1250. */
  1251. op_dom.u.getschedinfo.u.credit.weight = 0;
  1252. op_dom.u.getschedinfo.u.credit.cap = (uint16_t)~0U;
  1253. for (i = 0; i < nparams; i++) {
  1254. memset(&buf, 0, sizeof(buf));
  1255. if (STREQ(params[i].field, VIR_DOMAIN_SCHEDULER_WEIGHT)) {
  1256. val = params[i].value.ui;
  1257. if ((val < 1) || (val > USHRT_MAX)) {
  1258. virXenError(VIR_ERR_INVALID_ARG,
  1259. _("Credit scheduler weight parameter (%d) "
  1260. "is out of range (1-65535)"), val);
  1261. return -1;
  1262. }
  1263. op_dom.u.getschedinfo.u.credit.weight = val;
  1264. } else if (STREQ(params[i].field, VIR_DOMAIN_SCHEDULER_CAP)) {
  1265. val = params[i].value.ui;
  1266. if (val >= USHRT_MAX) {
  1267. virXenError(VIR_ERR_INVALID_ARG,
  1268. _("Credit scheduler cap parameter (%d) is "
  1269. "out of range (0-65534)"), val);
  1270. return -1;
  1271. }
  1272. op_dom.u.getschedinfo.u.credit.cap = val;
  1273. }
  1274. }
  1275. ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
  1276. if (ret < 0)
  1277. return -1;
  1278. break;
  1279. }
  1280. default:
  1281. virXenError(VIR_ERR_INVALID_ARG,
  1282. _("Unknown scheduler %d"),
  1283. op_sys.u.getschedulerid.sched_id);
  1284. return -1;
  1285. }
  1286. }
  1287. return 0;
  1288. }
  1289. int
  1290. xenHypervisorDomainBlockStats (virDomainPtr dom,
  1291. const char *path,
  1292. struct _virDomainBlockStats *stats)
  1293. {
  1294. #ifdef __linux__
  1295. xenUnifiedPrivatePtr priv;
  1296. int ret;
  1297. priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
  1298. xenUnifiedLock(priv);
  1299. /* Need to lock because it hits the xenstore handle :-( */
  1300. ret = xenLinuxDomainBlockStats (priv, dom, path, stats);
  1301. xenUnifiedUnlock(priv);
  1302. return ret;
  1303. #else
  1304. virXenError(VIR_ERR_OPERATION_INVALID, "%s",
  1305. _("block statistics not supported on this platform"));
  1306. return -1;
  1307. #endif
  1308. }
  1309. /* Paths have the form vif<domid>.<n> (this interface checks that
  1310. * <domid> is the real domain ID and returns an error if not).
  1311. *
  1312. * In future we may allow you to query bridge stats (virbrX or
  1313. * xenbrX), but that will probably be through a separate
  1314. * virNetwork interface, as yet not decided.
  1315. */
  1316. int
  1317. xenHypervisorDomainInterfaceStats (virDomainPtr dom,
  1318. const char *path,
  1319. struct _virDomainInterfaceStats *stats)
  1320. {
  1321. #ifdef __linux__
  1322. int rqdomid, device;
  1323. /* Verify that the vif requested is one belonging to the current
  1324. * domain.
  1325. */
  1326. if (sscanf(path, "vif%d.%d", &rqdomid, &device) != 2) {
  1327. virXenError(VIR_ERR_INVALID_ARG, "%s",
  1328. _("invalid path, should be vif<domid>.<n>."));
  1329. return -1;
  1330. }
  1331. if (rqdomid != dom->id) {
  1332. virXenError(VIR_ERR_INVALID_ARG, "%s",
  1333. _("invalid path, vif<domid> should match this domain ID"));
  1334. return -1;
  1335. }
  1336. return linuxDomainInterfaceStats(path, stats);
  1337. #else
  1338. virXenError(VIR_ERR_OPERATION_INVALID, "%s",
  1339. _("/proc/net/dev: Interface not found"));
  1340. return -1;
  1341. #endif
  1342. }
  1343. /**
  1344. * virXen_pausedomain:
  1345. * @handle: the hypervisor handle
  1346. * @id: the domain id
  1347. *
  1348. * Do a low level hypercall to pause the domain
  1349. *
  1350. * Returns 0 or -1 in case of failure
  1351. */
  1352. static int
  1353. virXen_pausedomain(int handle, int id)
  1354. {
  1355. int ret = -1;
  1356. if (hv_versions.hypervisor > 1) {
  1357. xen_op_v2_dom op;
  1358. memset(&op, 0, sizeof(op));
  1359. op.cmd = XEN_V2_OP_PAUSEDOMAIN;
  1360. op.domain = (domid_t) id;
  1361. ret = xenHypervisorDoV2Dom(handle, &op);
  1362. } else if (hv_versions.hypervisor == 1) {
  1363. xen_op_v1 op;
  1364. memset(&op, 0, sizeof(op));
  1365. op.cmd = XEN_V1_OP_PAUSEDOMAIN;
  1366. op.u.domain.domain = (domid_t) id;
  1367. ret = xenHypervisorDoV1Op(handle, &op);
  1368. } else if (hv_versions.hypervisor == 0) {
  1369. xen_op_v0 op;
  1370. memset(&op, 0, sizeof(op));
  1371. op.cmd = XEN_V0_OP_PAUSEDOMAIN;
  1372. op.u.domain.domain = (domid_t) id;
  1373. ret = xenHypervisorDoV0Op(handle, &op);
  1374. }
  1375. return ret;
  1376. }
  1377. /**
  1378. * virXen_unpausedomain:
  1379. * @handle: the hypervisor handle
  1380. * @id: the domain id
  1381. *
  1382. * Do a low level hypercall to unpause the domain
  1383. *
  1384. * Returns 0 or -1 in case of failure
  1385. */
  1386. static int
  1387. virXen_unpausedomain(int handle, int id)
  1388. {
  1389. int ret = -1;
  1390. if (hv_versions.hypervisor > 1) {
  1391. xen_op_v2_dom op;
  1392. memset(&op, 0, sizeof(op));
  1393. op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
  1394. op.domain = (domid_t) id;
  1395. ret = xenHypervisorDoV2Dom(handle, &op);
  1396. } else if (hv_versions.hypervisor == 1) {
  1397. xen_op_v1 op;
  1398. memset(&op, 0, sizeof(op));
  1399. op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
  1400. op.u.domain.domain = (domid_t) id;
  1401. ret = xenHypervisorDoV1Op(handle, &op);
  1402. } else if (hv_versions.hypervisor == 0) {
  1403. xen_op_v0 op;
  1404. memset(&op, 0, sizeof(op));
  1405. op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
  1406. op.u.domain.domain = (domid_t) id;
  1407. ret = xenHypervisorDoV0Op(handle, &op);
  1408. }
  1409. return ret;
  1410. }
  1411. /**
  1412. * virXen_destroydomain:
  1413. * @handle: the hypervisor handle
  1414. * @id: the domain id
  1415. *
  1416. * Do a low level hypercall to destroy the domain
  1417. *
  1418. * Returns 0 or -1 in case of failure
  1419. */
  1420. static int
  1421. virXen_destroydomain(int handle, int id)
  1422. {
  1423. int ret = -1;
  1424. if (hv_versions.hypervisor > 1) {
  1425. xen_op_v2_dom op;
  1426. memset(&op, 0, sizeof(op));
  1427. op.cmd = XEN_V2_OP_DESTROYDOMAIN;
  1428. op.domain = (domid_t) id;
  1429. ret = xenHypervisorDoV2Dom(handle, &op);
  1430. } else if (hv_versions.hypervisor == 1) {
  1431. xen_op_v1 op;
  1432. memset(&op, 0, sizeof(op));
  1433. op.cmd = XEN_V1_OP_DESTROYDOMAIN;
  1434. op.u.domain.domain = (domid_t) id;
  1435. ret = xenHypervisorDoV1Op(handle, &op);
  1436. } else if (hv_versions.hypervisor == 0) {
  1437. xen_op_v0 op;
  1438. memset(&op, 0, sizeof(op));
  1439. op.cmd = XEN_V0_OP_DESTROYDOMAIN;
  1440. op.u.domain.domain = (domid_t) id;
  1441. ret = xenHypervisorDoV0Op(handle, &op);
  1442. }
  1443. return ret;
  1444. }
  1445. /**
  1446. * virXen_setmaxmem:
  1447. * @handle: the hypervisor handle
  1448. * @id: the domain id
  1449. * @memory: the amount of memory in kilobytes
  1450. *
  1451. * Do a low level hypercall to change the max memory amount
  1452. *
  1453. * Returns 0 or -1 in case of failure
  1454. */
  1455. static int
  1456. virXen_setmaxmem(int handle, int id, unsigned long memory)
  1457. {
  1458. int ret = -1;
  1459. if (hv_versions.hypervisor > 1) {
  1460. xen_op_v2_dom op;
  1461. memset(&op, 0, sizeof(op));
  1462. op.cmd = XEN_V2_OP_SETMAXMEM;
  1463. op.domain = (domid_t) id;
  1464. if (hv_versions.dom_interface < 5)
  1465. op.u.setmaxmem.maxmem = memory;
  1466. else
  1467. op.u.setmaxmemd5.maxmem = memory;
  1468. ret = xenHypervisorDoV2Dom(handle, &op);
  1469. } else if (hv_versions.hypervisor == 1) {
  1470. xen_op_v1 op;
  1471. memset(&op, 0, sizeof(op));
  1472. op.cmd = XEN_V1_OP_SETMAXMEM;
  1473. op.u.setmaxmem.domain = (domid_t) id;
  1474. op.u.setmaxmem.maxmem = memory;
  1475. ret = xenHypervisorDoV1Op(handle, &op);
  1476. } else if (hv_versions.hypervisor == 0) {
  1477. xen_op_v0 op;
  1478. memset(&op, 0, sizeof(op));
  1479. op.cmd = XEN_V0_OP_SETMAXMEM;
  1480. op.u.setmaxmem.domain = (domid_t) id;
  1481. op.u.setmaxmem.maxmem = memory;
  1482. ret = xenHypervisorDoV0Op(handle, &op);
  1483. }
  1484. return ret;
  1485. }
  1486. /**
  1487. * virXen_setmaxvcpus:
  1488. * @handle: the hypervisor handle
  1489. * @id: the domain id
  1490. * @vcpus: the numbers of vcpus
  1491. *
  1492. * Do a low level hypercall to change the max vcpus amount
  1493. *
  1494. * Returns 0 or -1 in case of failure
  1495. */
  1496. static int
  1497. virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
  1498. {
  1499. int ret = -1;
  1500. if (hv_versions.hypervisor > 1) {
  1501. xen_op_v2_dom op;
  1502. memset(&op, 0, sizeof(op));
  1503. op.cmd = XEN_V2_OP_SETMAXVCPU;
  1504. op.domain = (domid_t) id;
  1505. op.u.setmaxvcpu.maxvcpu = vcpus;
  1506. ret = xenHypervisorDoV2Dom(handle, &op);
  1507. } else if (hv_versions.hypervisor == 1) {
  1508. xen_op_v1 op;
  1509. memset(&op, 0, sizeof(op));
  1510. op.cmd = XEN_V1_OP_SETMAXVCPU;
  1511. op.u.setmaxvcpu.domain = (domid_t) id;
  1512. op.u.setmaxvcpu.maxvcpu = vcpus;
  1513. ret = xenHypervisorDoV1Op(handle, &op);
  1514. } else if (hv_versions.hypervisor == 0) {
  1515. xen_op_v0 op;
  1516. memset(&op, 0, sizeof(op));
  1517. op.cmd = XEN_V0_OP_SETMAXVCPU;
  1518. op.u.setmaxvcpu.domain = (domid_t) id;
  1519. op.u.setmaxvcpu.maxvcpu = vcpus;
  1520. ret = xenHypervisorDoV0Op(handle, &op);
  1521. }
  1522. return ret;
  1523. }
  1524. /**
  1525. * virXen_setvcpumap:
  1526. * @handle: the hypervisor handle
  1527. * @id: the domain id
  1528. * @vcpu: the vcpu to map
  1529. * @cpumap: the bitmap for this vcpu
  1530. * @maplen: the size of the bitmap in bytes
  1531. *
  1532. * Do a low level hypercall to change the pinning for vcpu
  1533. *
  1534. * Returns 0 or -1 in case of failure
  1535. */
  1536. static int
  1537. virXen_setvcpumap(int handle, int id, unsigned int vcpu,
  1538. unsigned char * cpumap, int maplen)
  1539. {
  1540. int ret = -1;
  1541. unsigned char *new = NULL;
  1542. unsigned char *bitmap = NULL;
  1543. uint32_t nr_cpus;
  1544. if (hv_versions.hypervisor > 1) {
  1545. xen_op_v2_dom op;
  1546. if (lock_pages(cpumap, maplen) < 0) {
  1547. virXenError(VIR_ERR_XEN_CALL, " locking");
  1548. return -1;
  1549. }
  1550. memset(&op, 0, sizeof(op));
  1551. op.cmd = XEN_V2_OP_SETVCPUMAP;
  1552. op.domain = (domid_t) id;
  1553. /* The allocated memory to cpumap must be 'sizeof(uint64_t)' byte *
  1554. * for Xen, and also nr_cpus must be 'sizeof(uint64_t) * 8' */
  1555. if (maplen < 8) {
  1556. if (VIR_ALLOC_N(new, sizeof(uint64_t)) < 0) {
  1557. virReportOOMError();
  1558. return -1;
  1559. }
  1560. memcpy(new, cpumap, maplen);
  1561. bitmap = new;
  1562. nr_cpus = sizeof(uint64_t) * 8;
  1563. } else {
  1564. bitmap = cpumap;
  1565. nr_cpus = maplen * 8;
  1566. }
  1567. if (hv_versions.dom_interface < 5) {
  1568. op.u.setvcpumap.vcpu = vcpu;
  1569. op.u.setvcpumap.cpumap.bitmap = bitmap;
  1570. op.u.setvcpumap.cpumap.nr_cpus = nr_cpus;
  1571. } else {
  1572. op.u.setvcpumapd5.vcpu = vcpu;
  1573. op.u.setvcpumapd5.cpumap.bitmap.v = bitmap;
  1574. op.u.setvcpumapd5.cpumap.nr_cpus = nr_cpus;
  1575. }
  1576. ret = xenHypervisorDoV2Dom(handle, &op);
  1577. VIR_FREE(new);
  1578. if (unlock_pages(cpumap, maplen) < 0) {
  1579. virXenError(VIR_ERR_XEN_CALL, " release");
  1580. ret = -1;
  1581. }
  1582. } else {
  1583. cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
  1584. uint64_t *pm = &xen_cpumap;
  1585. int j;
  1586. if ((maplen > (int)sizeof(cpumap_t)) || (sizeof(cpumap_t) & 7))
  1587. return -1;
  1588. memset(pm, 0, sizeof(cpumap_t));
  1589. for (j = 0; j < maplen; j++)
  1590. *(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
  1591. if (hv_versions.hypervisor == 1) {
  1592. xen_op_v1 op;
  1593. memset(&op, 0, sizeof(op));
  1594. op.cmd = XEN_V1_OP_SETVCPUMAP;
  1595. op.u.setvcpumap.domain = (domid_t) id;
  1596. op.u.setvcpumap.vcpu = vcpu;
  1597. op.u.setvcpumap.cpumap = xen_cpumap;
  1598. ret = xenHypervisorDoV1Op(handle, &op);
  1599. } else if (hv_versions.hypervisor == 0) {
  1600. xen_op_v0 op;
  1601. memset(&op, 0, sizeof(op));
  1602. op.cmd = XEN_V0_OP_SETVCPUMAP;
  1603. op.u.setvcpumap.domain = (domid_t) id;
  1604. op.u.setvcpumap.vcpu = vcpu;
  1605. op.u.setvcpumap.cpumap = xen_cpumap;
  1606. ret = xenHypervisorDoV0Op(handle, &op);
  1607. }
  1608. }
  1609. return ret;
  1610. }
  1611. /**
  1612. * virXen_getvcpusinfo:
  1613. * @handle: the hypervisor handle
  1614. * @id: the domain id
  1615. * @vcpu: the vcpu to map
  1616. * @cpumap: the bitmap for this vcpu
  1617. * @maplen: the size of the bitmap in bytes
  1618. *
  1619. * Do a low level hypercall to change the pinning for vcpu
  1620. *
  1621. * Returns 0 or -1 in case of failure
  1622. */
  1623. static int
  1624. virXen_getvcpusinfo(int handle, int id, unsigned int vcpu, virVcpuInfoPtr ipt,
  1625. unsigned char *cpumap, int maplen)
  1626. {
  1627. int ret = -1;
  1628. if (hv_versions.hypervisor > 1) {
  1629. xen_op_v2_dom op;
  1630. memset(&op, 0, sizeof(op));
  1631. op.cmd = XEN_V2_OP_GETVCPUINFO;
  1632. op.domain = (domid_t) id;
  1633. if (hv_versions.dom_interface < 5)
  1634. op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
  1635. else
  1636. op.u.getvcpuinfod5.vcpu = (uint16_t) vcpu;
  1637. ret = xenHypervisorDoV2Dom(handle, &op);
  1638. if (ret < 0)
  1639. return -1;
  1640. ipt->number = vcpu;
  1641. if (hv_versions.dom_interface < 5) {
  1642. if (op.u.getvcpuinfo.online) {
  1643. if (op.u.getvcpuinfo.running)
  1644. ipt->state = VIR_VCPU_RUNNING;
  1645. if (op.u.getvcpuinfo.blocked)
  1646. ipt->state = VIR_VCPU_BLOCKED;
  1647. } else
  1648. ipt->state = VIR_VCPU_OFFLINE;
  1649. ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
  1650. ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
  1651. } else {
  1652. if (op.u.getvcpuinfod5.online) {
  1653. if (op.u.getvcpuinfod5.running)
  1654. ipt->state = VIR_VCPU_RUNNING;
  1655. if (op.u.getvcpuinfod5.blocked)
  1656. ipt->state = VIR_VCPU_BLOCKED;
  1657. } else
  1658. ipt->state = VIR_VCPU_OFFLINE;
  1659. ipt->cpuTime = op.u.getvcpuinfod5.cpu_time;
  1660. ipt->cpu = op.u.getvcpuinfod5.online ? (int)op.u.getvcpuinfod5.cpu : -1;
  1661. }
  1662. if ((cpumap != NULL) && (maplen > 0)) {
  1663. if (lock_pages(cpumap, maplen) < 0) {
  1664. virXenError(VIR_ERR_XEN_CALL, " locking");
  1665. return -1;
  1666. }
  1667. memset(cpumap, 0, maplen);
  1668. memset(&op, 0, sizeof(op));
  1669. op.cmd = XEN_V2_OP_GETVCPUMAP;
  1670. op.domain = (domid_t) id;
  1671. if (hv_versions.dom_interface < 5) {
  1672. op.u.getvcpumap.vcpu = vcpu;
  1673. op.u.getvcpumap.cpumap.bitmap = cpumap;
  1674. op.u.getvcpumap.cpumap.nr_cpus = maplen * 8;
  1675. } else {
  1676. op.u.getvcpumapd5.vcpu = vcpu;
  1677. op.u.getvcpumapd5.cpumap.bitmap.v = cpumap;
  1678. op.u.getvcpumapd5.cpumap.nr_cpus = maplen * 8;
  1679. }
  1680. ret = xenHypervisorDoV2Dom(handle, &op);
  1681. if (unlock_pages(cpumap, maplen) < 0) {
  1682. virXenError(VIR_ERR_XEN_CALL, " release");
  1683. ret = -1;
  1684. }
  1685. }
  1686. } else {
  1687. int mapl = maplen;
  1688. int cpu;
  1689. if (maplen > (int)sizeof(cpumap_t))
  1690. mapl = (int)sizeof(cpumap_t);
  1691. if (hv_versions.hypervisor == 1) {
  1692. xen_op_v1 op;
  1693. memset(&op, 0, sizeof(op));
  1694. op.cmd = XEN_V1_OP_GETVCPUINFO;
  1695. op.u.getvcpuinfo.domain = (domid_t) id;
  1696. op.u.getvcpuinfo.vcpu = vcpu;
  1697. ret = xenHypervisorDoV1Op(handle, &op);
  1698. if (ret < 0)
  1699. return -1;
  1700. ipt->number = vcpu;
  1701. if (op.u.getvcpuinfo.online) {
  1702. if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
  1703. if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
  1704. }
  1705. else ipt->state = VIR_VCPU_OFFLINE;
  1706. ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
  1707. ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
  1708. if ((cpumap != NULL) && (maplen > 0)) {
  1709. for (cpu = 0; cpu < (mapl * 8); cpu++) {
  1710. if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
  1711. VIR_USE_CPU(cpumap, cpu);
  1712. }
  1713. }
  1714. } else if (hv_versions.hypervisor == 0) {
  1715. xen_op_v1 op;
  1716. memset(&op, 0, sizeof(op));
  1717. op.cmd = XEN_V0_OP_GETVCPUINFO;
  1718. op.u.getvcpuinfo.domain = (domid_t) id;
  1719. op.u.getvcpuinfo.vcpu = vcpu;
  1720. ret = xenHypervisorDoV0Op(handle, &op);
  1721. if (ret < 0)
  1722. return -1;
  1723. ipt->number = vcpu;
  1724. if (op.u.getvcpuinfo.online) {
  1725. if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
  1726. if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
  1727. }
  1728. else ipt->state = VIR_VCPU_OFFLINE;
  1729. ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
  1730. ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
  1731. if ((cpumap != NULL) && (maplen > 0)) {
  1732. for (cpu = 0; cpu < (mapl * 8); cpu++) {
  1733. if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
  1734. VIR_USE_CPU(cpumap, cpu);
  1735. }
  1736. }
  1737. }
  1738. }
  1739. return ret;
  1740. }
  1741. /**
  1742. * xenHypervisorInit:
  1743. * @override_versions: pointer to optional struct xenHypervisorVersions with
  1744. * version information used instead of automatic version detection.
  1745. *
  1746. * Initialize the hypervisor layer. Try to detect the kind of interface
  1747. * used i.e. pre or post changeset 10277
  1748. *
  1749. * Returns 0 or -1 in case of failure
  1750. */
  1751. int
  1752. xenHypervisorInit(struct xenHypervisorVersions *override_versions)
  1753. {
  1754. int fd, ret, cmd, errcode;
  1755. hypercall_t hc;
  1756. v0_hypercall_t v0_hc;
  1757. xen_getdomaininfo info;
  1758. virVcpuInfoPtr ipt = NULL;
  1759. if (initialized) {
  1760. if (hv_versions.hypervisor == -1)
  1761. return -1;
  1762. return 0;
  1763. }
  1764. initialized = 1;
  1765. in_init = 1;
  1766. /* Compile regular expressions used by xenHypervisorGetCapabilities.
  1767. * Note that errors here are really internal errors since these
  1768. * regexps should never fail to compile.
  1769. */
  1770. errcode = regcomp (&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
  1771. if (errcode != 0) {
  1772. char error[100];
  1773. regerror (errcode, &flags_hvm_rec, error, sizeof(error));
  1774. regfree (&flags_hvm_rec);
  1775. virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
  1776. in_init = 0;
  1777. return -1;
  1778. }
  1779. errcode = regcomp (&flags_pae_rec, flags_pae_re, REG_EXTENDED);
  1780. if (errcode != 0) {
  1781. char error[100];
  1782. regerror (errcode, &flags_pae_rec, error, sizeof(error));
  1783. regfree (&flags_pae_rec);
  1784. regfree (&flags_hvm_rec);
  1785. virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
  1786. in_init = 0;
  1787. return -1;
  1788. }
  1789. errcode = regcomp (&xen_cap_rec, xen_cap_re, REG_EXTENDED);
  1790. if (errcode != 0) {
  1791. char error[100];
  1792. regerror (errcode, &xen_cap_rec, error, sizeof(error));
  1793. regfree (&xen_cap_rec);
  1794. regfree (&flags_pae_rec);
  1795. regfree (&flags_hvm_rec);
  1796. virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
  1797. in_init = 0;
  1798. return -1;
  1799. }
  1800. if (override_versions) {
  1801. hv_versions = *override_versions;
  1802. in_init = 0;
  1803. return 0;
  1804. }
  1805. /* Xen hypervisor version detection begins. */
  1806. ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
  1807. if (ret < 0) {
  1808. hv_versions.hypervisor = -1;
  1809. return -1;
  1810. }
  1811. fd = ret;
  1812. /*
  1813. * The size of the hypervisor call block changed July 2006
  1814. * this detect if we are using the new or old hypercall_t structure
  1815. */
  1816. hc.op = __HYPERVISOR_xen_version;
  1817. hc.arg[0] = (unsigned long) XENVER_version;
  1818. hc.arg[1] = 0;
  1819. cmd = IOCTL_PRIVCMD_HYPERCALL;
  1820. ret = ioctl(fd, cmd, (unsigned long) &hc);
  1821. if ((ret != -1) && (ret != 0)) {
  1822. VIR_DEBUG("Using new hypervisor call: %X", ret);
  1823. hv_versions.hv = ret;
  1824. xen_ioctl_hypercall_cmd = cmd;
  1825. goto detect_v2;
  1826. }
  1827. #ifndef __sun
  1828. /*
  1829. * check if the old hypercall are actually working
  1830. */
  1831. v0_hc.op = __HYPERVISOR_xen_version;
  1832. v0_hc.arg[0] = (unsigned long) XENVER_version;
  1833. v0_hc.arg[1] = 0;
  1834. cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
  1835. ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
  1836. if ((ret != -1) && (ret != 0)) {
  1837. VIR_DEBUG("Using old hypervisor call: %X", ret);
  1838. hv_versions.hv = ret;
  1839. xen_ioctl_hypercall_cmd = cmd;
  1840. hv_versions.hypervisor = 0;
  1841. goto done;
  1842. }
  1843. #endif
  1844. /*
  1845. * we failed to make any hypercall
  1846. */
  1847. hv_versions.hypervisor = -1;
  1848. virXenError(VIR_ERR_XEN_CALL, " ioctl %lu",
  1849. (unsigned long) IOCTL_PRIVCMD_HYPERCALL);
  1850. VIR_FORCE_CLOSE(fd);
  1851. in_init = 0;
  1852. return -1;
  1853. detect_v2:
  1854. /*
  1855. * The hypercalls were refactored into 3 different section in August 2006
  1856. * Try to detect if we are running a version post 3.0.2 with the new ones
  1857. * or the old ones
  1858. */
  1859. hv_versions.hypervisor = 2;
  1860. if (VIR_ALLOC(ipt) < 0) {
  1861. virReportOOMError();
  1862. return -1;
  1863. }
  1864. /* Currently consider RHEL5.0 Fedora7, xen-3.1, and xen-unstable */
  1865. hv_versions.sys_interface = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
  1866. if (virXen_getdomaininfo(fd, 0, &info) == 1) {
  1867. /* RHEL 5.0 */
  1868. hv_versions.dom_interface = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
  1869. if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
  1870. VIR_DEBUG("Using hypervisor call v2, sys ver2 dom ver3");
  1871. goto done;
  1872. }
  1873. /* Fedora 7 */
  1874. hv_versions.dom_interface = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
  1875. if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
  1876. VIR_DEBUG("Using hypervisor call v2, sys ver2 dom ver4");
  1877. goto done;
  1878. }
  1879. }
  1880. hv_versions.sys_interface = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
  1881. if (virXen_getdomaininfo(fd, 0, &info) == 1) {
  1882. /* xen-3.1 */
  1883. hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
  1884. if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
  1885. VIR_DEBUG("Using hypervisor call v2, sys ver3 dom ver5");
  1886. goto done;
  1887. }
  1888. }
  1889. hv_versions.sys_interface = 4; /* XEN_SYSCTL_INTERFACE_VERSION */
  1890. if (virXen_getdomaininfo(fd, 0, &info) == 1) {
  1891. /* Fedora 8 */
  1892. hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
  1893. if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
  1894. VIR_DEBUG("Using hypervisor call v2, sys ver4 dom ver5");
  1895. goto done;
  1896. }
  1897. }
  1898. hv_versions.sys_interface = 6; /* XEN_SYSCTL_INTERFACE_VERSION */
  1899. if (virXen_getdomaininfo(fd, 0, &info) == 1) {
  1900. /* Xen 3.2, Fedora 9 */
  1901. hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
  1902. if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
  1903. VIR_DEBUG("Using hypervisor call v2, sys ver6 dom ver5");
  1904. goto done;
  1905. }
  1906. }
  1907. /* Xen 4.0 */
  1908. hv_versions.sys_interface = 7; /* XEN_SYSCTL_INTERFACE_VERSION */
  1909. if (virXen_getdomaininfo(fd, 0, &info) == 1) {
  1910. hv_versions.dom_interface = 6; /* XEN_DOMCTL_INTERFACE_VERSION */
  1911. VIR_DEBUG("Using hypervisor call v2, sys ver7 dom ver6");
  1912. goto done;
  1913. }
  1914. /* Xen 4.1
  1915. * sysctl version 8 -> xen-unstable c/s 21118:28e5409e3fb3
  1916. * domctl version 7 -> xen-unstable c/s 21212:de94884a669c
  1917. * domctl version 8 -> xen-unstable c/s 23874:651aed73b39c
  1918. */
  1919. hv_versions.sys_interface = 8; /* XEN_SYSCTL_INTERFACE_VERSION */
  1920. if (virXen_getdomaininfo(fd, 0, &info) == 1) {
  1921. hv_versions.dom_interface = 7; /* XEN_DOMCTL_INTERFACE_VERSION */
  1922. if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
  1923. VIR_DEBUG("Using hypervisor call v2, sys ver8 dom ver7");
  1924. goto done;
  1925. }
  1926. hv_versions.dom_interface = 8; /* XEN_DOMCTL_INTERFACE_VERSION */
  1927. if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
  1928. VIR_DEBUG("Using hypervisor call v2, sys ver8 dom ver8");
  1929. goto done;
  1930. }
  1931. }
  1932. hv_versions.hypervisor = 1;
  1933. hv_versions.sys_interface = -1;
  1934. if (virXen_getdomaininfo(fd, 0, &info) == 1) {
  1935. VIR_DEBUG("Using hypervisor call v1");
  1936. goto done;
  1937. }
  1938. /*
  1939. * we failed to make the getdomaininfolist hypercall
  1940. */
  1941. VIR_DEBUG("Failed to find any Xen hypervisor method");
  1942. hv_versions.hypervisor = -1;
  1943. virXenError(VIR_ERR_XEN_CALL, " ioctl %lu",
  1944. (unsigned long)IOCTL_PRIVCMD_HYPERCALL);
  1945. VIR_FORCE_CLOSE(fd);
  1946. in_init = 0;
  1947. VIR_FREE(ipt);
  1948. return -1;
  1949. done:
  1950. VIR_FORCE_CLOSE(fd);
  1951. in_init = 0;
  1952. VIR_FREE(ipt);
  1953. return 0;
  1954. }
  1955. /**
  1956. * xenHypervisorOpen:
  1957. * @conn: pointer to the connection block
  1958. * @name: URL for the target, NULL for local
  1959. * @flags: combination of virDrvOpenFlag(s)
  1960. *
  1961. * Connects to the Xen hypervisor.
  1962. *
  1963. * Returns 0 or -1 in case of error.
  1964. */
  1965. virDrvOpenStatus
  1966. xenHypervisorOpen(virConnectPtr conn,
  1967. virConnectAuthPtr auth ATTRIBUTE_UNUSED,
  1968. unsigned int flags)
  1969. {
  1970. int ret;
  1971. xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
  1972. virCheckFlags(VIR_CONNECT_RO, VIR_DRV_OPEN_ERROR);
  1973. if (initialized == 0)
  1974. if (xenHypervisorInit(NULL) == -1)
  1975. return -1;
  1976. priv->handle = -1;
  1977. ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
  1978. if (ret < 0) {
  1979. virXenError(VIR_ERR_NO_XEN, "%s", XEN_HYPERVISOR_SOCKET);
  1980. return -1;
  1981. }
  1982. priv->handle = ret;
  1983. return 0;
  1984. }
  1985. /**
  1986. * xenHypervisorClose:
  1987. * @conn: pointer to the connection block
  1988. *
  1989. * Close the connection to the Xen hypervisor.
  1990. *
  1991. * Returns 0 in case of success or -1 in case of error.
  1992. */
  1993. int
  1994. xenHypervisorClose(virConnectPtr conn)
  1995. {
  1996. int ret;
  1997. xenUnifiedPrivatePtr priv;
  1998. if (conn == NULL)
  1999. return -1;
  2000. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2001. if (priv->handle < 0)
  2002. return -1;
  2003. ret = VIR_CLOSE(priv->handle);
  2004. if (ret < 0)
  2005. return -1;
  2006. return 0;
  2007. }
  2008. /**
  2009. * xenHypervisorGetVersion:
  2010. * @conn: pointer to the connection block
  2011. * @hvVer: where to store the version
  2012. *
  2013. * Call the hypervisor to extracts his own internal API version
  2014. *
  2015. * Returns 0 in case of success, -1 in case of error
  2016. */
  2017. int
  2018. xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
  2019. {
  2020. xenUnifiedPrivatePtr priv;
  2021. if (conn == NULL)
  2022. return -1;
  2023. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2024. if (priv->handle < 0 || hvVer == NULL)
  2025. return -1;
  2026. *hvVer = (hv_versions.hv >> 16) * 1000000 + (hv_versions.hv & 0xFFFF) * 1000;
  2027. return 0;
  2028. }
  2029. struct guest_arch {
  2030. const char *model;
  2031. int bits;
  2032. int hvm;
  2033. int pae;
  2034. int nonpae;
  2035. int ia64_be;
  2036. };
  2037. static int xenDefaultConsoleType(const char *ostype)
  2038. {
  2039. if (STREQ(ostype, "hvm"))
  2040. return VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_SERIAL;
  2041. else
  2042. return VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_XEN;
  2043. }
  2044. static virCapsPtr
  2045. xenHypervisorBuildCapabilities(virConnectPtr conn,
  2046. const char *hostmachine,
  2047. int host_pae,
  2048. const char *hvm_type,
  2049. struct guest_arch *guest_archs,
  2050. int nr_guest_archs) {
  2051. virCapsPtr caps;
  2052. int i;
  2053. int hv_major = hv_versions.hv >> 16;
  2054. int hv_minor = hv_versions.hv & 0xFFFF;
  2055. if ((caps = virCapabilitiesNew(hostmachine, 1, 1)) == NULL)
  2056. goto no_memory;
  2057. virCapabilitiesSetMacPrefix(caps, (unsigned char[]){ 0x00, 0x16, 0x3e });
  2058. if (hvm_type && STRNEQ(hvm_type, "") &&
  2059. virCapabilitiesAddHostFeature(caps, hvm_type) < 0)
  2060. goto no_memory;
  2061. if (host_pae &&
  2062. virCapabilitiesAddHostFeature(caps, "pae") < 0)
  2063. goto no_memory;
  2064. if (virCapabilitiesAddHostMigrateTransport(caps,
  2065. "xenmigr") < 0)
  2066. goto no_memory;
  2067. if (hv_versions.sys_interface >= SYS_IFACE_MIN_VERS_NUMA && conn != NULL) {
  2068. if (xenDaemonNodeGetTopology(conn, caps) != 0) {
  2069. virCapabilitiesFree(caps);
  2070. return NULL;
  2071. }
  2072. }
  2073. for (i = 0; i < nr_guest_archs; ++i) {
  2074. virCapsGuestPtr guest;
  2075. char const *const xen_machines[] = {guest_archs[i].hvm ? "xenfv" : "xenpv"};
  2076. virCapsGuestMachinePtr *machines;
  2077. if ((machines = virCapabilitiesAllocMachines(xen_machines, 1)) == NULL)
  2078. goto no_memory;
  2079. if ((guest = virCapabilitiesAddGuest(caps,
  2080. guest_archs[i].hvm ? "hvm" : "xen",
  2081. guest_archs[i].model,
  2082. guest_archs[i].bits,
  2083. (STREQ(hostmachine, "x86_64") ?
  2084. "/usr/lib64/xen/bin/qemu-dm" :
  2085. "/usr/lib/xen/bin/qemu-dm"),
  2086. (guest_archs[i].hvm ?
  2087. "/usr/lib/xen/boot/hvmloader" :
  2088. NULL),
  2089. 1,
  2090. machines)) == NULL) {
  2091. virCapabilitiesFreeMachines(machines, 1);
  2092. goto no_memory;
  2093. }
  2094. machines = NULL;
  2095. if (virCapabilitiesAddGuestDomain(guest,
  2096. "xen",
  2097. NULL,
  2098. NULL,
  2099. 0,
  2100. NULL) == NULL)
  2101. goto no_memory;
  2102. if (guest_archs[i].pae &&
  2103. virCapabilitiesAddGuestFeature(guest,
  2104. "pae",
  2105. 1,
  2106. 0) == NULL)
  2107. goto no_memory;
  2108. if (guest_archs[i].nonpae &&
  2109. virCapabilitiesAddGuestFeature(guest,
  2110. "nonpae",
  2111. 1,
  2112. 0) == NULL)
  2113. goto no_memory;
  2114. if (guest_archs[i].ia64_be &&
  2115. virCapabilitiesAddGuestFeature(guest,
  2116. "ia64_be",
  2117. 1,
  2118. 0) == NULL)
  2119. goto no_memory;
  2120. if (guest_archs[i].hvm) {
  2121. if (virCapabilitiesAddGuestFeature(guest,
  2122. "acpi",
  2123. 1, 1) == NULL)
  2124. goto no_memory;
  2125. /* In Xen 3.1.0, APIC is always on and can't be toggled */
  2126. if (virCapabilitiesAddGuestFeature(guest,
  2127. "apic",
  2128. 1,
  2129. (hv_major > 3 &&
  2130. hv_minor > 0 ?
  2131. 0 : 1)) == NULL)
  2132. goto no_memory;
  2133. /* Xen 3.3.x and beyond supports enabling/disabling
  2134. * hardware assisted paging. Default is off.
  2135. */
  2136. if ((hv_major == 3 && hv_minor >= 3) || (hv_major > 3))
  2137. if (virCapabilitiesAddGuestFeature(guest,
  2138. "hap",
  2139. 0,
  2140. 1) == NULL)
  2141. goto no_memory;
  2142. /* Xen 3.4.x and beyond supports the Viridian (Hyper-V)
  2143. * enlightenment interface. Default is off.
  2144. */
  2145. if ((hv_major == 3 && hv_minor >= 4) || (hv_major > 3))
  2146. if (virCapabilitiesAddGuestFeature(guest,
  2147. "viridian",
  2148. 0,
  2149. 1) == NULL)
  2150. goto no_memory;
  2151. }
  2152. }
  2153. caps->defaultConsoleTargetType = xenDefaultConsoleType;
  2154. return caps;
  2155. no_memory:
  2156. virCapabilitiesFree(caps);
  2157. return NULL;
  2158. }
  2159. #ifdef __sun
  2160. static int
  2161. get_cpu_flags(virConnectPtr conn, const char **hvm, int *pae, int *longmode)
  2162. {
  2163. struct {
  2164. uint32_t r_eax, r_ebx, r_ecx, r_edx;
  2165. } regs;
  2166. char tmpbuf[20];
  2167. int ret = 0;
  2168. int fd;
  2169. /* returns -1, errno 22 if in 32-bit mode */
  2170. *longmode = (sysinfo(SI_ARCHITECTURE_64, tmpbuf, sizeof(tmpbuf)) != -1);
  2171. if ((fd = open("/dev/cpu/self/cpuid", O_RDONLY)) == -1 ||
  2172. pread(fd, &regs, sizeof(regs), 0) != sizeof(regs)) {
  2173. virReportSystemError(errno, "%s", _("could not read CPU flags"));
  2174. goto out;
  2175. }
  2176. *pae = 0;
  2177. *hvm = "";
  2178. if (STREQLEN((const char *)&regs.r_ebx, "AuthcAMDenti", 12)) {
  2179. if (pread(fd, &regs, sizeof(regs), 0x80000001) == sizeof(regs)) {
  2180. /* Read secure virtual machine bit (bit 2 of ECX feature ID) */
  2181. if ((regs.r_ecx >> 2) & 1) {
  2182. *hvm = "svm";
  2183. }
  2184. if ((regs.r_edx >> 6) & 1)
  2185. *pae = 1;
  2186. }
  2187. } else if (STREQLEN((const char *)&regs.r_ebx, "GenuntelineI", 12)) {
  2188. if (pread(fd, &regs, sizeof(regs), 0x00000001) == sizeof(regs)) {
  2189. /* Read VMXE feature bit (bit 5 of ECX feature ID) */
  2190. if ((regs.r_ecx >> 5) & 1)
  2191. *hvm = "vmx";
  2192. if ((regs.r_edx >> 6) & 1)
  2193. *pae = 1;
  2194. }
  2195. }
  2196. ret = 1;
  2197. out:
  2198. VIR_FORCE_CLOSE(fd);
  2199. return ret;
  2200. }
  2201. static virCapsPtr
  2202. xenHypervisorMakeCapabilitiesSunOS(virConnectPtr conn)
  2203. {
  2204. struct guest_arch guest_arches[32];
  2205. int i = 0;
  2206. virCapsPtr caps = NULL;
  2207. struct utsname utsname;
  2208. int pae, longmode;
  2209. const char *hvm;
  2210. if (!get_cpu_flags(conn, &hvm, &pae, &longmode))
  2211. return NULL;
  2212. /* Really, this never fails - look at the man-page. */
  2213. uname (&utsname);
  2214. guest_arches[i].model = "i686";
  2215. guest_arches[i].bits = 32;
  2216. guest_arches[i].hvm = 0;
  2217. guest_arches[i].pae = pae;
  2218. guest_arches[i].nonpae = !pae;
  2219. guest_arches[i].ia64_be = 0;
  2220. i++;
  2221. if (longmode) {
  2222. guest_arches[i].model = "x86_64";
  2223. guest_arches[i].bits = 64;
  2224. guest_arches[i].hvm = 0;
  2225. guest_arches[i].pae = 0;
  2226. guest_arches[i].nonpae = 0;
  2227. guest_arches[i].ia64_be = 0;
  2228. i++;
  2229. }
  2230. if (hvm[0] != '\0') {
  2231. guest_arches[i].model = "i686";
  2232. guest_arches[i].bits = 32;
  2233. guest_arches[i].hvm = 1;
  2234. guest_arches[i].pae = pae;
  2235. guest_arches[i].nonpae = 1;
  2236. guest_arches[i].ia64_be = 0;
  2237. i++;
  2238. if (longmode) {
  2239. guest_arches[i].model = "x86_64";
  2240. guest_arches[i].bits = 64;
  2241. guest_arches[i].hvm = 1;
  2242. guest_arches[i].pae = 0;
  2243. guest_arches[i].nonpae = 0;
  2244. guest_arches[i].ia64_be = 0;
  2245. i++;
  2246. }
  2247. }
  2248. if ((caps = xenHypervisorBuildCapabilities(conn,
  2249. utsname.machine,
  2250. pae, hvm,
  2251. guest_arches, i)) == NULL)
  2252. virReportOOMError();
  2253. return caps;
  2254. }
  2255. #endif /* __sun */
  2256. /**
  2257. * xenHypervisorMakeCapabilitiesInternal:
  2258. * @conn: pointer to the connection block
  2259. * @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
  2260. * @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
  2261. *
  2262. * Return the capabilities of this hypervisor.
  2263. */
  2264. virCapsPtr
  2265. xenHypervisorMakeCapabilitiesInternal(virConnectPtr conn,
  2266. const char *hostmachine,
  2267. FILE *cpuinfo, FILE *capabilities)
  2268. {
  2269. char line[1024], *str, *token;
  2270. regmatch_t subs[4];
  2271. char *saveptr = NULL;
  2272. int i;
  2273. char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
  2274. int host_pae = 0;
  2275. struct guest_arch guest_archs[32];
  2276. int nr_guest_archs = 0;
  2277. virCapsPtr caps = NULL;
  2278. memset(guest_archs, 0, sizeof(guest_archs));
  2279. /* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
  2280. * It's not clear if this will work on IA64, let alone other
  2281. * architectures and non-Linux. (XXX)
  2282. */
  2283. if (cpuinfo) {
  2284. while (fgets (line, sizeof(line), cpuinfo)) {
  2285. if (regexec (&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
  2286. && subs[0].rm_so != -1) {
  2287. if (virStrncpy(hvm_type,
  2288. &line[subs[1].rm_so],
  2289. subs[1].rm_eo-subs[1].rm_so,
  2290. sizeof(hvm_type)) == NULL)
  2291. goto no_memory;
  2292. } else if (regexec (&flags_pae_rec, line, 0, NULL, 0) == 0)
  2293. host_pae = 1;
  2294. }
  2295. }
  2296. /* Most of the useful info is in /sys/hypervisor/properties/capabilities
  2297. * which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
  2298. *
  2299. * It is a space-separated list of supported guest architectures.
  2300. *
  2301. * For x86:
  2302. * TYP-VER-ARCH[p]
  2303. * ^ ^ ^ ^
  2304. * | | | +-- PAE supported
  2305. * | | +------- x86_32 or x86_64
  2306. * | +----------- the version of Xen, eg. "3.0"
  2307. * +--------------- "xen" or "hvm" for para or full virt respectively
  2308. *
  2309. * For PPC this file appears to be always empty (?)
  2310. *
  2311. * For IA64:
  2312. * TYP-VER-ARCH[be]
  2313. * ^ ^ ^ ^
  2314. * | | | +-- Big-endian supported
  2315. * | | +------- always "ia64"
  2316. * | +----------- the version of Xen, eg. "3.0"
  2317. * +--------------- "xen" or "hvm" for para or full virt respectively
  2318. */
  2319. /* Expecting one line in this file - ignore any more. */
  2320. if ((capabilities) && (fgets (line, sizeof(line), capabilities))) {
  2321. /* Split the line into tokens. strtok_r is OK here because we "own"
  2322. * this buffer. Parse out the features from each token.
  2323. */
  2324. for (str = line, nr_guest_archs = 0;
  2325. nr_guest_archs < sizeof(guest_archs) / sizeof(guest_archs[0])
  2326. && (token = strtok_r (str, " ", &saveptr)) != NULL;
  2327. str = NULL) {
  2328. if (regexec (&xen_cap_rec, token, sizeof(subs) / sizeof(subs[0]),
  2329. subs, 0) == 0) {
  2330. int hvm = STRPREFIX(&token[subs[1].rm_so], "hvm");
  2331. const char *model;
  2332. int bits, pae = 0, nonpae = 0, ia64_be = 0;
  2333. if (STRPREFIX(&token[subs[2].rm_so], "x86_32")) {
  2334. model = "i686";
  2335. bits = 32;
  2336. if (subs[3].rm_so != -1 &&
  2337. STRPREFIX(&token[subs[3].rm_so], "p"))
  2338. pae = 1;
  2339. else
  2340. nonpae = 1;
  2341. }
  2342. else if (STRPREFIX(&token[subs[2].rm_so], "x86_64")) {
  2343. model = "x86_64";
  2344. bits = 64;
  2345. }
  2346. else if (STRPREFIX(&token[subs[2].rm_so], "ia64")) {
  2347. model = "ia64";
  2348. bits = 64;
  2349. if (subs[3].rm_so != -1 &&
  2350. STRPREFIX(&token[subs[3].rm_so], "be"))
  2351. ia64_be = 1;
  2352. }
  2353. else if (STRPREFIX(&token[subs[2].rm_so], "powerpc64")) {
  2354. model = "ppc64";
  2355. bits = 64;
  2356. } else {
  2357. /* XXX surely no other Xen archs exist */
  2358. continue;
  2359. }
  2360. /* Search for existing matching (model,hvm) tuple */
  2361. for (i = 0 ; i < nr_guest_archs ; i++) {
  2362. if (STREQ(guest_archs[i].model, model) &&
  2363. guest_archs[i].hvm == hvm) {
  2364. break;
  2365. }
  2366. }
  2367. /* Too many arch flavours - highly unlikely ! */
  2368. if (i >= ARRAY_CARDINALITY(guest_archs))
  2369. continue;
  2370. /* Didn't find a match, so create a new one */
  2371. if (i == nr_guest_archs)
  2372. nr_guest_archs++;
  2373. guest_archs[i].model = model;
  2374. guest_archs[i].bits = bits;
  2375. guest_archs[i].hvm = hvm;
  2376. /* Careful not to overwrite a previous positive
  2377. setting with a negative one here - some archs
  2378. can do both pae & non-pae, but Xen reports
  2379. separately capabilities so we're merging archs */
  2380. if (pae)
  2381. guest_archs[i].pae = pae;
  2382. if (nonpae)
  2383. guest_archs[i].nonpae = nonpae;
  2384. if (ia64_be)
  2385. guest_archs[i].ia64_be = ia64_be;
  2386. }
  2387. }
  2388. }
  2389. if ((caps = xenHypervisorBuildCapabilities(conn,
  2390. hostmachine,
  2391. host_pae,
  2392. hvm_type,
  2393. guest_archs,
  2394. nr_guest_archs)) == NULL)
  2395. goto no_memory;
  2396. return caps;
  2397. no_memory:
  2398. virReportOOMError();
  2399. virCapabilitiesFree(caps);
  2400. return NULL;
  2401. }
  2402. /**
  2403. * xenHypervisorMakeCapabilities:
  2404. *
  2405. * Return the capabilities of this hypervisor.
  2406. */
  2407. virCapsPtr
  2408. xenHypervisorMakeCapabilities(virConnectPtr conn)
  2409. {
  2410. #ifdef __sun
  2411. return xenHypervisorMakeCapabilitiesSunOS(conn);
  2412. #else
  2413. virCapsPtr caps;
  2414. FILE *cpuinfo, *capabilities;
  2415. struct utsname utsname;
  2416. /* Really, this never fails - look at the man-page. */
  2417. uname (&utsname);
  2418. cpuinfo = fopen ("/proc/cpuinfo", "r");
  2419. if (cpuinfo == NULL) {
  2420. if (errno != ENOENT) {
  2421. virReportSystemError(errno,
  2422. _("cannot read file %s"),
  2423. "/proc/cpuinfo");
  2424. return NULL;
  2425. }
  2426. }
  2427. capabilities = fopen ("/sys/hypervisor/properties/capabilities", "r");
  2428. if (capabilities == NULL) {
  2429. if (errno != ENOENT) {
  2430. VIR_FORCE_FCLOSE(cpuinfo);
  2431. virReportSystemError(errno,
  2432. _("cannot read file %s"),
  2433. "/sys/hypervisor/properties/capabilities");
  2434. return NULL;
  2435. }
  2436. }
  2437. caps = xenHypervisorMakeCapabilitiesInternal(conn,
  2438. utsname.machine,
  2439. cpuinfo,
  2440. capabilities);
  2441. if (caps == NULL)
  2442. return NULL;
  2443. if (virNodeSuspendGetTargetMask(&caps->host.powerMgmt) < 0)
  2444. VIR_WARN("Failed to get host power management capabilities");
  2445. VIR_FORCE_FCLOSE(cpuinfo);
  2446. VIR_FORCE_FCLOSE(capabilities);
  2447. return caps;
  2448. #endif /* __sun */
  2449. }
  2450. /**
  2451. * xenHypervisorGetCapabilities:
  2452. * @conn: pointer to the connection block
  2453. *
  2454. * Return the capabilities of this hypervisor.
  2455. */
  2456. char *
  2457. xenHypervisorGetCapabilities (virConnectPtr conn)
  2458. {
  2459. xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
  2460. char *xml;
  2461. if (!(xml = virCapabilitiesFormatXML(priv->caps))) {
  2462. virReportOOMError();
  2463. return NULL;
  2464. }
  2465. return xml;
  2466. }
  2467. /**
  2468. * xenHypervisorNumOfDomains:
  2469. * @conn: pointer to the connection block
  2470. *
  2471. * Provides the number of active domains.
  2472. *
  2473. * Returns the number of domain found or -1 in case of error
  2474. */
  2475. int
  2476. xenHypervisorNumOfDomains(virConnectPtr conn)
  2477. {
  2478. xen_getdomaininfolist dominfos;
  2479. int ret, nbids;
  2480. static int last_maxids = 2;
  2481. int maxids = last_maxids;
  2482. xenUnifiedPrivatePtr priv;
  2483. if (conn == NULL)
  2484. return -1;
  2485. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2486. if (priv->handle < 0)
  2487. return -1;
  2488. retry:
  2489. if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
  2490. virReportOOMError();
  2491. return -1;
  2492. }
  2493. XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
  2494. ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
  2495. XEN_GETDOMAININFOLIST_FREE(dominfos);
  2496. if (ret < 0)
  2497. return -1;
  2498. nbids = ret;
  2499. /* Can't possibly have more than 65,000 concurrent guests
  2500. * so limit how many times we try, to avoid increasing
  2501. * without bound & thus allocating all of system memory !
  2502. * XXX I'll regret this comment in a few years time ;-)
  2503. */
  2504. if (nbids == maxids) {
  2505. if (maxids < 65000) {
  2506. last_maxids *= 2;
  2507. maxids *= 2;
  2508. goto retry;
  2509. }
  2510. nbids = -1;
  2511. }
  2512. if ((nbids < 0) || (nbids > maxids))
  2513. return -1;
  2514. return nbids;
  2515. }
  2516. /**
  2517. * xenHypervisorListDomains:
  2518. * @conn: pointer to the connection block
  2519. * @ids: array to collect the list of IDs of active domains
  2520. * @maxids: size of @ids
  2521. *
  2522. * Collect the list of active domains, and store their ID in @maxids
  2523. *
  2524. * Returns the number of domain found or -1 in case of error
  2525. */
  2526. int
  2527. xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
  2528. {
  2529. xen_getdomaininfolist dominfos;
  2530. int ret, nbids, i;
  2531. xenUnifiedPrivatePtr priv;
  2532. if (conn == NULL)
  2533. return -1;
  2534. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2535. if (priv->handle < 0 ||
  2536. (ids == NULL) || (maxids < 0))
  2537. return -1;
  2538. if (maxids == 0)
  2539. return 0;
  2540. if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
  2541. virReportOOMError();
  2542. return -1;
  2543. }
  2544. XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
  2545. memset(ids, 0, maxids * sizeof(int));
  2546. ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
  2547. if (ret < 0) {
  2548. XEN_GETDOMAININFOLIST_FREE(dominfos);
  2549. return -1;
  2550. }
  2551. nbids = ret;
  2552. if ((nbids < 0) || (nbids > maxids)) {
  2553. XEN_GETDOMAININFOLIST_FREE(dominfos);
  2554. return -1;
  2555. }
  2556. for (i = 0;i < nbids;i++) {
  2557. ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
  2558. }
  2559. XEN_GETDOMAININFOLIST_FREE(dominfos);
  2560. return nbids;
  2561. }
  2562. char *
  2563. xenHypervisorDomainGetOSType (virDomainPtr dom)
  2564. {
  2565. xenUnifiedPrivatePtr priv;
  2566. xen_getdomaininfo dominfo;
  2567. char *ostype = NULL;
  2568. priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
  2569. if (priv->handle < 0) {
  2570. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  2571. _("domain shut off or invalid"));
  2572. return NULL;
  2573. }
  2574. /* HV's earlier than 3.1.0 don't include the HVM flags in guests status*/
  2575. if (hv_versions.hypervisor < 2 ||
  2576. hv_versions.dom_interface < 4) {
  2577. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  2578. _("unsupported in dom interface < 4"));
  2579. return NULL;
  2580. }
  2581. XEN_GETDOMAININFO_CLEAR(dominfo);
  2582. if (virXen_getdomaininfo(priv->handle, dom->id, &dominfo) < 0) {
  2583. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  2584. _("cannot get domain details"));
  2585. return NULL;
  2586. }
  2587. if (XEN_GETDOMAININFO_DOMAIN(dominfo) != dom->id) {
  2588. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  2589. _("cannot get domain details"));
  2590. return NULL;
  2591. }
  2592. if (XEN_GETDOMAININFO_FLAGS(dominfo) & DOMFLAGS_HVM)
  2593. ostype = strdup("hvm");
  2594. else
  2595. ostype = strdup("linux");
  2596. if (ostype == NULL)
  2597. virReportOOMError();
  2598. return ostype;
  2599. }
  2600. int
  2601. xenHypervisorHasDomain(virConnectPtr conn,
  2602. int id)
  2603. {
  2604. xenUnifiedPrivatePtr priv;
  2605. xen_getdomaininfo dominfo;
  2606. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2607. if (priv->handle < 0)
  2608. return 0;
  2609. XEN_GETDOMAININFO_CLEAR(dominfo);
  2610. if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
  2611. return 0;
  2612. if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
  2613. return 0;
  2614. return 1;
  2615. }
  2616. virDomainPtr
  2617. xenHypervisorLookupDomainByID(virConnectPtr conn,
  2618. int id)
  2619. {
  2620. xenUnifiedPrivatePtr priv;
  2621. xen_getdomaininfo dominfo;
  2622. virDomainPtr ret;
  2623. char *name;
  2624. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2625. if (priv->handle < 0)
  2626. return NULL;
  2627. XEN_GETDOMAININFO_CLEAR(dominfo);
  2628. if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
  2629. return NULL;
  2630. if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
  2631. return NULL;
  2632. xenUnifiedLock(priv);
  2633. name = xenStoreDomainGetName(conn, id);
  2634. xenUnifiedUnlock(priv);
  2635. if (!name)
  2636. return NULL;
  2637. ret = virGetDomain(conn, name, XEN_GETDOMAININFO_UUID(dominfo));
  2638. if (ret)
  2639. ret->id = id;
  2640. VIR_FREE(name);
  2641. return ret;
  2642. }
  2643. virDomainPtr
  2644. xenHypervisorLookupDomainByUUID(virConnectPtr conn,
  2645. const unsigned char *uuid)
  2646. {
  2647. xen_getdomaininfolist dominfos;
  2648. xenUnifiedPrivatePtr priv;
  2649. virDomainPtr ret;
  2650. char *name;
  2651. int maxids = 100, nids, i, id;
  2652. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2653. if (priv->handle < 0)
  2654. return NULL;
  2655. retry:
  2656. if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
  2657. virReportOOMError();
  2658. return NULL;
  2659. }
  2660. XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
  2661. nids = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
  2662. if (nids < 0) {
  2663. XEN_GETDOMAININFOLIST_FREE(dominfos);
  2664. return NULL;
  2665. }
  2666. /* Can't possibly have more than 65,000 concurrent guests
  2667. * so limit how many times we try, to avoid increasing
  2668. * without bound & thus allocating all of system memory !
  2669. * XXX I'll regret this comment in a few years time ;-)
  2670. */
  2671. if (nids == maxids) {
  2672. XEN_GETDOMAININFOLIST_FREE(dominfos);
  2673. if (maxids < 65000) {
  2674. maxids *= 2;
  2675. goto retry;
  2676. }
  2677. return NULL;
  2678. }
  2679. id = -1;
  2680. for (i = 0 ; i < nids ; i++) {
  2681. if (memcmp(XEN_GETDOMAININFOLIST_UUID(dominfos, i), uuid, VIR_UUID_BUFLEN) == 0) {
  2682. id = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
  2683. break;
  2684. }
  2685. }
  2686. XEN_GETDOMAININFOLIST_FREE(dominfos);
  2687. if (id == -1)
  2688. return NULL;
  2689. xenUnifiedLock(priv);
  2690. name = xenStoreDomainGetName(conn, id);
  2691. xenUnifiedUnlock(priv);
  2692. if (!name)
  2693. return NULL;
  2694. ret = virGetDomain(conn, name, uuid);
  2695. if (ret)
  2696. ret->id = id;
  2697. VIR_FREE(name);
  2698. return ret;
  2699. }
  2700. /**
  2701. * xenHypervisorGetMaxVcpus:
  2702. *
  2703. * Returns the maximum of CPU defined by Xen.
  2704. */
  2705. int
  2706. xenHypervisorGetMaxVcpus(virConnectPtr conn,
  2707. const char *type ATTRIBUTE_UNUSED)
  2708. {
  2709. xenUnifiedPrivatePtr priv;
  2710. if (conn == NULL)
  2711. return -1;
  2712. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2713. if (priv->handle < 0)
  2714. return -1;
  2715. return MAX_VIRT_CPUS;
  2716. }
  2717. /**
  2718. * xenHypervisorGetDomMaxMemory:
  2719. * @conn: connection data
  2720. * @id: domain id
  2721. *
  2722. * Retrieve the maximum amount of physical memory allocated to a
  2723. * domain.
  2724. *
  2725. * Returns the memory size in kilobytes or 0 in case of error.
  2726. */
  2727. unsigned long
  2728. xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
  2729. {
  2730. xenUnifiedPrivatePtr priv;
  2731. xen_getdomaininfo dominfo;
  2732. int ret;
  2733. if (conn == NULL)
  2734. return 0;
  2735. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2736. if (priv->handle < 0)
  2737. return 0;
  2738. if (kb_per_pages == 0) {
  2739. kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
  2740. if (kb_per_pages <= 0)
  2741. kb_per_pages = 4;
  2742. }
  2743. XEN_GETDOMAININFO_CLEAR(dominfo);
  2744. ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
  2745. if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
  2746. return 0;
  2747. return (unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages;
  2748. }
  2749. /**
  2750. * xenHypervisorGetMaxMemory:
  2751. * @domain: a domain object or NULL
  2752. *
  2753. * Retrieve the maximum amount of physical memory allocated to a
  2754. * domain. If domain is NULL, then this get the amount of memory reserved
  2755. * to Domain0 i.e. the domain where the application runs.
  2756. *
  2757. * Returns the memory size in kilobytes or 0 in case of error.
  2758. */
  2759. static unsigned long long ATTRIBUTE_NONNULL (1)
  2760. xenHypervisorGetMaxMemory(virDomainPtr domain)
  2761. {
  2762. xenUnifiedPrivatePtr priv;
  2763. if (domain->conn == NULL)
  2764. return 0;
  2765. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  2766. if (priv->handle < 0 || domain->id < 0)
  2767. return 0;
  2768. return xenHypervisorGetDomMaxMemory(domain->conn, domain->id);
  2769. }
  2770. /**
  2771. * xenHypervisorGetDomInfo:
  2772. * @conn: connection data
  2773. * @id: the domain ID
  2774. * @info: the place where information should be stored
  2775. *
  2776. * Do a hypervisor call to get the related set of domain information.
  2777. *
  2778. * Returns 0 in case of success, -1 in case of error.
  2779. */
  2780. int
  2781. xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
  2782. {
  2783. xenUnifiedPrivatePtr priv;
  2784. xen_getdomaininfo dominfo;
  2785. int ret;
  2786. uint32_t domain_flags, domain_state, domain_shutdown_cause;
  2787. if (kb_per_pages == 0) {
  2788. kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
  2789. if (kb_per_pages <= 0)
  2790. kb_per_pages = 4;
  2791. }
  2792. if (conn == NULL)
  2793. return -1;
  2794. priv = (xenUnifiedPrivatePtr) conn->privateData;
  2795. if (priv->handle < 0 || info == NULL)
  2796. return -1;
  2797. memset(info, 0, sizeof(virDomainInfo));
  2798. XEN_GETDOMAININFO_CLEAR(dominfo);
  2799. ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
  2800. if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
  2801. return -1;
  2802. domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
  2803. domain_flags &= ~DOMFLAGS_HVM; /* Mask out HVM flags */
  2804. domain_state = domain_flags & 0xFF; /* Mask out high bits */
  2805. switch (domain_state) {
  2806. case DOMFLAGS_DYING:
  2807. info->state = VIR_DOMAIN_SHUTDOWN;
  2808. break;
  2809. case DOMFLAGS_SHUTDOWN:
  2810. /* The domain is shutdown. Determine the cause. */
  2811. domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
  2812. switch (domain_shutdown_cause) {
  2813. case SHUTDOWN_crash:
  2814. info->state = VIR_DOMAIN_CRASHED;
  2815. break;
  2816. default:
  2817. info->state = VIR_DOMAIN_SHUTOFF;
  2818. }
  2819. break;
  2820. case DOMFLAGS_PAUSED:
  2821. info->state = VIR_DOMAIN_PAUSED;
  2822. break;
  2823. case DOMFLAGS_BLOCKED:
  2824. info->state = VIR_DOMAIN_BLOCKED;
  2825. break;
  2826. case DOMFLAGS_RUNNING:
  2827. info->state = VIR_DOMAIN_RUNNING;
  2828. break;
  2829. default:
  2830. info->state = VIR_DOMAIN_NOSTATE;
  2831. }
  2832. /*
  2833. * the API brings back the cpu time in nanoseconds,
  2834. * convert to microseconds, same thing convert to
  2835. * kilobytes from page counts
  2836. */
  2837. info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
  2838. info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
  2839. info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
  2840. if(info->maxMem != UINT_MAX)
  2841. info->maxMem *= kb_per_pages;
  2842. info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
  2843. return 0;
  2844. }
  2845. /**
  2846. * xenHypervisorGetDomainInfo:
  2847. * @domain: pointer to the domain block
  2848. * @info: the place where information should be stored
  2849. *
  2850. * Do a hypervisor call to get the related set of domain information.
  2851. *
  2852. * Returns 0 in case of success, -1 in case of error.
  2853. */
  2854. int
  2855. xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
  2856. {
  2857. xenUnifiedPrivatePtr priv;
  2858. if (domain->conn == NULL)
  2859. return -1;
  2860. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  2861. if (priv->handle < 0 || info == NULL ||
  2862. (domain->id < 0))
  2863. return -1;
  2864. return xenHypervisorGetDomInfo(domain->conn, domain->id, info);
  2865. }
  2866. /**
  2867. * xenHypervisorGetDomainState:
  2868. * @domain: pointer to the domain block
  2869. * @state: returned state of the domain
  2870. * @reason: returned reason for the state
  2871. * @flags: additional flags, 0 for now
  2872. *
  2873. * Do a hypervisor call to get the related set of domain information.
  2874. *
  2875. * Returns 0 in case of success, -1 in case of error.
  2876. */
  2877. int
  2878. xenHypervisorGetDomainState(virDomainPtr domain,
  2879. int *state,
  2880. int *reason,
  2881. unsigned int flags)
  2882. {
  2883. xenUnifiedPrivatePtr priv = domain->conn->privateData;
  2884. virDomainInfo info;
  2885. virCheckFlags(0, -1);
  2886. if (domain->conn == NULL)
  2887. return -1;
  2888. if (priv->handle < 0 || domain->id < 0)
  2889. return -1;
  2890. if (xenHypervisorGetDomInfo(domain->conn, domain->id, &info) < 0)
  2891. return -1;
  2892. *state = info.state;
  2893. if (reason)
  2894. *reason = 0;
  2895. return 0;
  2896. }
  2897. /**
  2898. * xenHypervisorNodeGetCellsFreeMemory:
  2899. * @conn: pointer to the hypervisor connection
  2900. * @freeMems: pointer to the array of unsigned long long
  2901. * @startCell: index of first cell to return freeMems info on.
  2902. * @maxCells: Maximum number of cells for which freeMems information can
  2903. * be returned.
  2904. *
  2905. * This call returns the amount of free memory in one or more NUMA cells.
  2906. * The @freeMems array must be allocated by the caller and will be filled
  2907. * with the amount of free memory in kilobytes for each cell requested,
  2908. * starting with startCell (in freeMems[0]), up to either
  2909. * (startCell + maxCells), or the number of additional cells in the node,
  2910. * whichever is smaller.
  2911. *
  2912. * Returns the number of entries filled in freeMems, or -1 in case of error.
  2913. */
  2914. int
  2915. xenHypervisorNodeGetCellsFreeMemory(virConnectPtr conn, unsigned long long *freeMems,
  2916. int startCell, int maxCells)
  2917. {
  2918. xen_op_v2_sys op_sys;
  2919. int i, j, ret;
  2920. xenUnifiedPrivatePtr priv;
  2921. if (conn == NULL) {
  2922. virXenError(VIR_ERR_INVALID_ARG, "%s", _("invalid argument"));
  2923. return -1;
  2924. }
  2925. priv = conn->privateData;
  2926. if (priv->nbNodeCells < 0) {
  2927. virXenError(VIR_ERR_XEN_CALL, "%s",
  2928. _("cannot determine actual number of cells"));
  2929. return -1;
  2930. }
  2931. if ((maxCells < 1) || (startCell >= priv->nbNodeCells)) {
  2932. virXenError(VIR_ERR_INVALID_ARG, "%s",
  2933. _("invalid argument"));
  2934. return -1;
  2935. }
  2936. /*
  2937. * Support only hv_versions.sys_interface >=4
  2938. */
  2939. if (hv_versions.sys_interface < SYS_IFACE_MIN_VERS_NUMA) {
  2940. virXenError(VIR_ERR_XEN_CALL, "%s",
  2941. _("unsupported in sys interface < 4"));
  2942. return -1;
  2943. }
  2944. if (priv->handle < 0) {
  2945. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  2946. _("priv->handle invalid"));
  2947. return -1;
  2948. }
  2949. memset(&op_sys, 0, sizeof(op_sys));
  2950. op_sys.cmd = XEN_V2_OP_GETAVAILHEAP;
  2951. for (i = startCell, j = 0;(i < priv->nbNodeCells) && (j < maxCells);i++,j++) {
  2952. if (hv_versions.sys_interface >= 5)
  2953. op_sys.u.availheap5.node = i;
  2954. else
  2955. op_sys.u.availheap.node = i;
  2956. ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
  2957. if (ret < 0) {
  2958. return -1;
  2959. }
  2960. if (hv_versions.sys_interface >= 5)
  2961. freeMems[j] = op_sys.u.availheap5.avail_bytes;
  2962. else
  2963. freeMems[j] = op_sys.u.availheap.avail_bytes;
  2964. }
  2965. return j;
  2966. }
  2967. /**
  2968. * xenHypervisorPauseDomain:
  2969. * @domain: pointer to the domain block
  2970. *
  2971. * Do a hypervisor call to pause the given domain
  2972. *
  2973. * Returns 0 in case of success, -1 in case of error.
  2974. */
  2975. int
  2976. xenHypervisorPauseDomain(virDomainPtr domain)
  2977. {
  2978. int ret;
  2979. xenUnifiedPrivatePtr priv;
  2980. if (domain->conn == NULL)
  2981. return -1;
  2982. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  2983. if (priv->handle < 0 || domain->id < 0)
  2984. return -1;
  2985. ret = virXen_pausedomain(priv->handle, domain->id);
  2986. if (ret < 0)
  2987. return -1;
  2988. return 0;
  2989. }
  2990. /**
  2991. * xenHypervisorResumeDomain:
  2992. * @domain: pointer to the domain block
  2993. *
  2994. * Do a hypervisor call to resume the given domain
  2995. *
  2996. * Returns 0 in case of success, -1 in case of error.
  2997. */
  2998. int
  2999. xenHypervisorResumeDomain(virDomainPtr domain)
  3000. {
  3001. int ret;
  3002. xenUnifiedPrivatePtr priv;
  3003. if (domain->conn == NULL)
  3004. return -1;
  3005. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  3006. if (priv->handle < 0 || domain->id < 0)
  3007. return -1;
  3008. ret = virXen_unpausedomain(priv->handle, domain->id);
  3009. if (ret < 0)
  3010. return -1;
  3011. return 0;
  3012. }
  3013. /**
  3014. * xenHypervisorDestroyDomainFlags:
  3015. * @domain: pointer to the domain block
  3016. * @flags: an OR'ed set of virDomainDestroyFlagsValues
  3017. *
  3018. * Do a hypervisor call to destroy the given domain
  3019. *
  3020. * Calling this function with no @flags set (equal to zero)
  3021. * is equivalent to calling xenHypervisorDestroyDomain.
  3022. *
  3023. * Returns 0 in case of success, -1 in case of error.
  3024. */
  3025. int
  3026. xenHypervisorDestroyDomainFlags(virDomainPtr domain,
  3027. unsigned int flags)
  3028. {
  3029. int ret;
  3030. xenUnifiedPrivatePtr priv;
  3031. virCheckFlags(0, -1);
  3032. if (domain->conn == NULL)
  3033. return -1;
  3034. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  3035. if (priv->handle < 0 || domain->id < 0)
  3036. return -1;
  3037. ret = virXen_destroydomain(priv->handle, domain->id);
  3038. if (ret < 0)
  3039. return -1;
  3040. return 0;
  3041. }
  3042. /**
  3043. * xenHypervisorSetMaxMemory:
  3044. * @domain: pointer to the domain block
  3045. * @memory: the max memory size in kilobytes.
  3046. *
  3047. * Do a hypervisor call to change the maximum amount of memory used
  3048. *
  3049. * Returns 0 in case of success, -1 in case of error.
  3050. */
  3051. int
  3052. xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
  3053. {
  3054. int ret;
  3055. xenUnifiedPrivatePtr priv;
  3056. if (domain->conn == NULL)
  3057. return -1;
  3058. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  3059. if (priv->handle < 0 || domain->id < 0)
  3060. return -1;
  3061. ret = virXen_setmaxmem(priv->handle, domain->id, memory);
  3062. if (ret < 0)
  3063. return -1;
  3064. return 0;
  3065. }
  3066. /**
  3067. * xenHypervisorSetVcpus:
  3068. * @domain: pointer to domain object
  3069. * @nvcpus: the new number of virtual CPUs for this domain
  3070. *
  3071. * Dynamically change the number of virtual CPUs used by the domain.
  3072. *
  3073. * Returns 0 in case of success, -1 in case of failure.
  3074. */
  3075. int
  3076. xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
  3077. {
  3078. int ret;
  3079. xenUnifiedPrivatePtr priv;
  3080. if (domain->conn == NULL)
  3081. return -1;
  3082. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  3083. if (priv->handle < 0 || domain->id < 0 || nvcpus < 1)
  3084. return -1;
  3085. ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
  3086. if (ret < 0)
  3087. return -1;
  3088. return 0;
  3089. }
  3090. /**
  3091. * xenHypervisorPinVcpu:
  3092. * @domain: pointer to domain object
  3093. * @vcpu: virtual CPU number
  3094. * @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
  3095. * @maplen: length of cpumap in bytes
  3096. *
  3097. * Dynamically change the real CPUs which can be allocated to a virtual CPU.
  3098. *
  3099. * Returns 0 in case of success, -1 in case of failure.
  3100. */
  3101. int
  3102. xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
  3103. unsigned char *cpumap, int maplen)
  3104. {
  3105. int ret;
  3106. xenUnifiedPrivatePtr priv;
  3107. if (domain->conn == NULL)
  3108. return -1;
  3109. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  3110. if (priv->handle < 0 || (domain->id < 0) ||
  3111. (cpumap == NULL) || (maplen < 1))
  3112. return -1;
  3113. ret = virXen_setvcpumap(priv->handle, domain->id, vcpu,
  3114. cpumap, maplen);
  3115. if (ret < 0)
  3116. return -1;
  3117. return 0;
  3118. }
  3119. /**
  3120. * virDomainGetVcpus:
  3121. * @domain: pointer to domain object, or NULL for Domain0
  3122. * @info: pointer to an array of virVcpuInfo structures (OUT)
  3123. * @maxinfo: number of structures in info array
  3124. * @cpumaps: pointer to a bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
  3125. * If cpumaps is NULL, then no cpumap information is returned by the API.
  3126. * It's assumed there is <maxinfo> cpumap in cpumaps array.
  3127. * The memory allocated to cpumaps must be (maxinfo * maplen) bytes
  3128. * (ie: calloc(maxinfo, maplen)).
  3129. * One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
  3130. * @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
  3131. * underlying virtualization system (Xen...).
  3132. *
  3133. * Extract information about virtual CPUs of domain, store it in info array
  3134. * and also in cpumaps if this pointer isn't NULL.
  3135. *
  3136. * Returns the number of info filled in case of success, -1 in case of failure.
  3137. */
  3138. int
  3139. xenHypervisorGetVcpus(virDomainPtr domain, virVcpuInfoPtr info, int maxinfo,
  3140. unsigned char *cpumaps, int maplen)
  3141. {
  3142. xen_getdomaininfo dominfo;
  3143. int ret;
  3144. xenUnifiedPrivatePtr priv;
  3145. virVcpuInfoPtr ipt;
  3146. int nbinfo, i;
  3147. if (domain->conn == NULL)
  3148. return -1;
  3149. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  3150. if (priv->handle < 0 || (domain->id < 0) ||
  3151. (info == NULL) || (maxinfo < 1) ||
  3152. (sizeof(cpumap_t) & 7)) {
  3153. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  3154. _("domain shut off or invalid"));
  3155. return -1;
  3156. }
  3157. if ((cpumaps != NULL) && (maplen < 1)) {
  3158. virXenError(VIR_ERR_INVALID_ARG, "%s",
  3159. _("invalid argument"));
  3160. return -1;
  3161. }
  3162. /* first get the number of virtual CPUs in this domain */
  3163. XEN_GETDOMAININFO_CLEAR(dominfo);
  3164. ret = virXen_getdomaininfo(priv->handle, domain->id,
  3165. &dominfo);
  3166. if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id)) {
  3167. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  3168. _("cannot get domain details"));
  3169. return -1;
  3170. }
  3171. nbinfo = XEN_GETDOMAININFO_CPUCOUNT(dominfo) + 1;
  3172. if (nbinfo > maxinfo) nbinfo = maxinfo;
  3173. if (cpumaps != NULL)
  3174. memset(cpumaps, 0, maxinfo * maplen);
  3175. for (i = 0, ipt = info; i < nbinfo; i++, ipt++) {
  3176. if ((cpumaps != NULL) && (i < maxinfo)) {
  3177. ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
  3178. ipt,
  3179. (unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, i),
  3180. maplen);
  3181. if (ret < 0) {
  3182. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  3183. _("cannot get VCPUs info"));
  3184. return -1;
  3185. }
  3186. } else {
  3187. ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
  3188. ipt, NULL, 0);
  3189. if (ret < 0) {
  3190. virXenError(VIR_ERR_INTERNAL_ERROR, "%s",
  3191. _("cannot get VCPUs info"));
  3192. return -1;
  3193. }
  3194. }
  3195. }
  3196. return nbinfo;
  3197. }
  3198. /**
  3199. * xenHypervisorGetVcpuMax:
  3200. *
  3201. * Returns the maximum number of virtual CPUs supported for
  3202. * the guest VM. If the guest is inactive, this is the maximum
  3203. * of CPU defined by Xen. If the guest is running this reflect
  3204. * the maximum number of virtual CPUs the guest was booted with.
  3205. */
  3206. int
  3207. xenHypervisorGetVcpuMax(virDomainPtr domain)
  3208. {
  3209. xen_getdomaininfo dominfo;
  3210. int ret;
  3211. int maxcpu;
  3212. xenUnifiedPrivatePtr priv;
  3213. if (domain->conn == NULL)
  3214. return -1;
  3215. priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
  3216. if (priv->handle < 0)
  3217. return -1;
  3218. /* inactive domain */
  3219. if (domain->id < 0) {
  3220. maxcpu = MAX_VIRT_CPUS;
  3221. } else {
  3222. XEN_GETDOMAININFO_CLEAR(dominfo);
  3223. ret = virXen_getdomaininfo(priv->handle, domain->id,
  3224. &dominfo);
  3225. if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
  3226. return -1;
  3227. maxcpu = XEN_GETDOMAININFO_MAXCPUID(dominfo) + 1;
  3228. }
  3229. return maxcpu;
  3230. }
  3231. /**
  3232. * xenHavePrivilege()
  3233. *
  3234. * Return true if the current process should be able to connect to Xen.
  3235. */
  3236. int
  3237. xenHavePrivilege(void)
  3238. {
  3239. #ifdef __sun
  3240. return priv_ineffect (PRIV_XVM_CONTROL);
  3241. #else
  3242. return access(XEN_HYPERVISOR_SOCKET, R_OK) == 0;
  3243. #endif
  3244. }