PageRenderTime 77ms CodeModel.GetById 21ms RepoModel.GetById 1ms app.codeStats 0ms

/pkg/cloudprovider/providers/vsphere/vsphere_util.go

https://gitlab.com/unofficial-mirrors/kubernetes
Go | 579 lines | 486 code | 45 blank | 48 comment | 150 complexity | a7400af35407b5f6b785d20fee0c84c4 MD5 | raw file
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package vsphere
  14. import (
  15. "context"
  16. "errors"
  17. "os"
  18. "regexp"
  19. "strings"
  20. "time"
  21. "github.com/golang/glog"
  22. "github.com/vmware/govmomi/vim25"
  23. "fmt"
  24. "github.com/vmware/govmomi/vim25/mo"
  25. "io/ioutil"
  26. "k8s.io/api/core/v1"
  27. k8stypes "k8s.io/apimachinery/pkg/types"
  28. "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
  29. "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
  30. "k8s.io/kubernetes/pkg/util/version"
  31. "path/filepath"
  32. )
  33. const (
  34. DatastoreProperty = "datastore"
  35. DatastoreInfoProperty = "info"
  36. Folder = "Folder"
  37. VirtualMachine = "VirtualMachine"
  38. DummyDiskName = "kube-dummyDisk.vmdk"
  39. UUIDPath = "/sys/class/dmi/id/product_serial"
  40. UUIDPrefix = "VMware-"
  41. ProviderPrefix = "vsphere://"
  42. vSphereConfFileEnvVar = "VSPHERE_CONF_FILE"
  43. )
  44. // GetVSphere reads vSphere configuration from system environment and construct vSphere object
  45. func GetVSphere() (*VSphere, error) {
  46. cfg, err := getVSphereConfig()
  47. if err != nil {
  48. return nil, err
  49. }
  50. vs, err := newControllerNode(*cfg)
  51. if err != nil {
  52. return nil, err
  53. }
  54. return vs, nil
  55. }
  56. func getVSphereConfig() (*VSphereConfig, error) {
  57. confFileLocation := os.Getenv(vSphereConfFileEnvVar)
  58. if confFileLocation == "" {
  59. return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set.")
  60. }
  61. confFile, err := os.Open(confFileLocation)
  62. if err != nil {
  63. return nil, err
  64. }
  65. defer confFile.Close()
  66. cfg, err := readConfig(confFile)
  67. if err != nil {
  68. return nil, err
  69. }
  70. return &cfg, nil
  71. }
  72. func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection {
  73. vSphereConn := &vclib.VSphereConnection{
  74. Username: cfg.Global.User,
  75. Password: cfg.Global.Password,
  76. Hostname: cfg.Global.VCenterIP,
  77. Insecure: cfg.Global.InsecureFlag,
  78. RoundTripperCount: cfg.Global.RoundTripperCount,
  79. Port: cfg.Global.VCenterPort,
  80. }
  81. return vSphereConn
  82. }
  83. // Returns the accessible datastores for the given node VM.
  84. func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
  85. accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx)
  86. if err != nil {
  87. // Check if the node VM is not found which indicates that the node info in the node manager is stale.
  88. // If so, rediscover the node and retry.
  89. if vclib.IsManagedObjectNotFoundError(err) {
  90. glog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName)
  91. err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName))
  92. if err == nil {
  93. glog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName)
  94. nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName))
  95. if err != nil {
  96. glog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail)
  97. return nil, err
  98. }
  99. accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx)
  100. if err != nil {
  101. glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
  102. return nil, err
  103. }
  104. } else {
  105. glog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail)
  106. return nil, err
  107. }
  108. } else {
  109. glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
  110. return nil, err
  111. }
  112. }
  113. return accessibleDatastores, nil
  114. }
  115. // Get all datastores accessible for the virtual machine object.
  116. func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
  117. nodeVmDetails, err := nodeManager.GetNodeDetails()
  118. if err != nil {
  119. glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
  120. return nil, err
  121. }
  122. if len(nodeVmDetails) == 0 {
  123. msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails)
  124. glog.Error(msg)
  125. return nil, fmt.Errorf(msg)
  126. }
  127. var sharedDatastores []*vclib.DatastoreInfo
  128. for _, nodeVmDetail := range nodeVmDetails {
  129. glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName)
  130. accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager)
  131. if err != nil {
  132. if err == vclib.ErrNoVMFound {
  133. glog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName)
  134. continue
  135. }
  136. return nil, err
  137. }
  138. if len(sharedDatastores) == 0 {
  139. sharedDatastores = accessibleDatastores
  140. } else {
  141. sharedDatastores = intersect(sharedDatastores, accessibleDatastores)
  142. if len(sharedDatastores) == 0 {
  143. return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster for nodeVmDetails: %+v", nodeVmDetails)
  144. }
  145. }
  146. }
  147. glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
  148. sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
  149. if err != nil {
  150. glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
  151. return nil, err
  152. }
  153. glog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores)
  154. return sharedDatastores, nil
  155. }
  156. func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo {
  157. glog.V(9).Infof("list1: %+v", list1)
  158. glog.V(9).Infof("list2: %+v", list2)
  159. var sharedDs []*vclib.DatastoreInfo
  160. for _, val1 := range list1 {
  161. // Check if val1 is found in list2
  162. for _, val2 := range list2 {
  163. // Intersection is performed based on the datastoreUrl as this uniquely identifies the datastore.
  164. if val1.Info.Url == val2.Info.Url {
  165. sharedDs = append(sharedDs, val1)
  166. break
  167. }
  168. }
  169. }
  170. return sharedDs
  171. }
  172. // getMostFreeDatastore gets the best fit compatible datastore by free space.
  173. func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsInfoList []*vclib.DatastoreInfo) (string, error) {
  174. var curMax int64
  175. curMax = -1
  176. var index int
  177. for i, dsInfo := range dsInfoList {
  178. dsFreeSpace := dsInfo.Info.GetDatastoreInfo().FreeSpace
  179. if dsFreeSpace > curMax {
  180. curMax = dsFreeSpace
  181. index = i
  182. }
  183. }
  184. return dsInfoList[index].Info.GetDatastoreInfo().Name, nil
  185. }
  186. // Returns the datastores in the given datacenter by performing lookup based on datastore URL.
  187. func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, sharedDsInfos []*vclib.DatastoreInfo) ([]*vclib.DatastoreInfo, error) {
  188. var datastores []*vclib.DatastoreInfo
  189. allDsInfoMap, err := dc.GetAllDatastores(ctx)
  190. if err != nil {
  191. return nil, err
  192. }
  193. for _, sharedDsInfo := range sharedDsInfos {
  194. dsInfo, ok := allDsInfoMap[sharedDsInfo.Info.Url]
  195. if ok {
  196. datastores = append(datastores, dsInfo)
  197. } else {
  198. glog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url)
  199. }
  200. }
  201. glog.V(9).Infof("Datastore from endpoint VC: %+v", datastores)
  202. return datastores, nil
  203. }
  204. func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, nodeManager *NodeManager) (string, error) {
  205. pbmClient, err := vclib.NewPbmClient(ctx, dc.Client())
  206. if err != nil {
  207. return "", err
  208. }
  209. storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
  210. if err != nil {
  211. glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
  212. return "", err
  213. }
  214. sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager)
  215. if err != nil {
  216. glog.Errorf("Failed to get shared datastores. err: %+v", err)
  217. return "", err
  218. }
  219. if len(sharedDs) == 0 {
  220. msg := "No shared datastores found in the endpoint virtual center"
  221. glog.Errorf(msg)
  222. return "", errors.New(msg)
  223. }
  224. compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs)
  225. if err != nil {
  226. glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
  227. sharedDs, storagePolicyID, err)
  228. return "", err
  229. }
  230. glog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
  231. datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores)
  232. if err != nil {
  233. glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
  234. return "", err
  235. }
  236. glog.V(4).Infof("Most free datastore : %+s", datastore)
  237. return datastore, err
  238. }
  239. func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resourcePoolPath string) (*vclib.VMOptions, error) {
  240. var vmOptions vclib.VMOptions
  241. resourcePool, err := dc.GetResourcePool(ctx, resourcePoolPath)
  242. if err != nil {
  243. return nil, err
  244. }
  245. glog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool)
  246. folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
  247. if err != nil {
  248. return nil, err
  249. }
  250. vmOptions.VMFolder = folder
  251. vmOptions.VMResourcePool = resourcePool
  252. return &vmOptions, nil
  253. }
  254. // A background routine which will be responsible for deleting stale dummy VM's.
  255. func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
  256. // Create context
  257. ctx, cancel := context.WithCancel(context.Background())
  258. defer cancel()
  259. for {
  260. time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute)
  261. vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
  262. if err != nil {
  263. glog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err)
  264. continue
  265. }
  266. dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
  267. if err != nil {
  268. glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err)
  269. continue
  270. }
  271. // Get the folder reference for global working directory where the dummy VM needs to be created.
  272. vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
  273. if err != nil {
  274. glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err)
  275. continue
  276. }
  277. // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
  278. defer cleanUpDummyVMLock.Lock()
  279. err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc)
  280. if err != nil {
  281. glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err)
  282. }
  283. }
  284. }
  285. // Get canonical volume path for volume Path.
  286. // Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
  287. // Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
  288. func getcanonicalVolumePath(ctx context.Context, dc *vclib.Datacenter, volumePath string) (string, error) {
  289. var folderID string
  290. var folderExists bool
  291. canonicalVolumePath := volumePath
  292. dsPathObj, err := vclib.GetDatastorePathObjFromVMDiskPath(volumePath)
  293. if err != nil {
  294. return "", err
  295. }
  296. dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
  297. if len(dsPath) <= 1 {
  298. return canonicalVolumePath, nil
  299. }
  300. datastore := dsPathObj.Datastore
  301. dsFolder := dsPath[0]
  302. folderNameIDMap, datastoreExists := datastoreFolderIDMap[datastore]
  303. if datastoreExists {
  304. folderID, folderExists = folderNameIDMap[dsFolder]
  305. }
  306. // Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
  307. if !datastoreExists || !folderExists {
  308. if !vclib.IsValidUUID(dsFolder) {
  309. dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
  310. // Querying a non-existent dummy disk on the datastore folder.
  311. // It would fail and return an folder ID in the error message.
  312. _, err := dc.GetVirtualDiskPage83Data(ctx, dummyDiskVolPath)
  313. if err != nil {
  314. re := regexp.MustCompile("File (.*?) was not found")
  315. match := re.FindStringSubmatch(err.Error())
  316. canonicalVolumePath = match[1]
  317. }
  318. }
  319. diskPath := vclib.GetPathFromVMDiskPath(canonicalVolumePath)
  320. if diskPath == "" {
  321. return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
  322. }
  323. folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
  324. setdatastoreFolderIDMap(datastoreFolderIDMap, datastore, dsFolder, folderID)
  325. }
  326. canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
  327. return canonicalVolumePath, nil
  328. }
  329. func setdatastoreFolderIDMap(
  330. datastoreFolderIDMap map[string]map[string]string,
  331. datastore string,
  332. folderName string,
  333. folderID string) {
  334. folderNameIDMap := datastoreFolderIDMap[datastore]
  335. if folderNameIDMap == nil {
  336. folderNameIDMap = make(map[string]string)
  337. datastoreFolderIDMap[datastore] = folderNameIDMap
  338. }
  339. folderNameIDMap[folderName] = folderID
  340. }
  341. func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPath string) (string, error) {
  342. volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath)
  343. // Get the canonical volume path for volPath.
  344. canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath)
  345. if err != nil {
  346. glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
  347. return "", err
  348. }
  349. // Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
  350. if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
  351. canonicalVolumePath += ".vmdk"
  352. }
  353. return canonicalVolumePath, nil
  354. }
  355. // convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
  356. func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName][]string, error) {
  357. vmVolumes := make(map[k8stypes.NodeName][]string)
  358. for nodeName, volPaths := range nodeVolumes {
  359. nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
  360. if err != nil {
  361. return nil, err
  362. }
  363. _, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx)
  364. if err != nil {
  365. return nil, err
  366. }
  367. for i, volPath := range volPaths {
  368. deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath)
  369. if err != nil {
  370. glog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
  371. return nil, err
  372. }
  373. volPaths[i] = deviceVolPath
  374. }
  375. vmVolumes[nodeName] = volPaths
  376. }
  377. return vmVolumes, nil
  378. }
  379. // checkDiskAttached verifies volumes are attached to the VMs which are in same vCenter and Datacenter
  380. // Returns nodes if exist any for which VM is not found in that vCenter and Datacenter
  381. func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeName, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) {
  382. var nodesToRetry []k8stypes.NodeName
  383. var vmList []*vclib.VirtualMachine
  384. var nodeInfo NodeInfo
  385. var err error
  386. for _, nodeName := range nodes {
  387. nodeInfo, err = vs.nodeManager.GetNodeInfo(nodeName)
  388. if err != nil {
  389. return nodesToRetry, err
  390. }
  391. vmList = append(vmList, nodeInfo.vm)
  392. }
  393. // Making sure session is valid
  394. _, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx)
  395. if err != nil {
  396. return nodesToRetry, err
  397. }
  398. // If any of the nodes are not present property collector query will fail for entire operation
  399. vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"})
  400. if err != nil {
  401. if vclib.IsManagedObjectNotFoundError(err) && !retry {
  402. glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList)
  403. // Property Collector Query failed
  404. // VerifyVolumePaths per VM
  405. for _, nodeName := range nodes {
  406. nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
  407. if err != nil {
  408. return nodesToRetry, err
  409. }
  410. devices, err := nodeInfo.vm.VirtualMachine.Device(ctx)
  411. if err != nil {
  412. if vclib.IsManagedObjectNotFoundError(err) {
  413. glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm)
  414. nodesToRetry = append(nodesToRetry, nodeName)
  415. continue
  416. }
  417. return nodesToRetry, err
  418. }
  419. glog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
  420. vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached)
  421. }
  422. }
  423. return nodesToRetry, err
  424. }
  425. vmMoMap := make(map[string]mo.VirtualMachine)
  426. for _, vmMo := range vmMoList {
  427. if vmMo.Config == nil {
  428. glog.Errorf("Config is not available for VM: %q", vmMo.Name)
  429. continue
  430. }
  431. glog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid))
  432. vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo
  433. }
  434. glog.V(9).Infof("vmMoMap: +%v", vmMoMap)
  435. for _, nodeName := range nodes {
  436. node, err := vs.nodeManager.GetNode(nodeName)
  437. if err != nil {
  438. return nodesToRetry, err
  439. }
  440. nodeUUID, err := GetNodeUUID(&node)
  441. if err != nil {
  442. glog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
  443. return nodesToRetry, err
  444. }
  445. nodeUUID = strings.ToLower(nodeUUID)
  446. glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %s", nodeName, nodeUUID, vmMoMap)
  447. vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
  448. }
  449. return nodesToRetry, nil
  450. }
  451. func (vs *VSphere) IsDummyVMPresent(vmName string) (bool, error) {
  452. isDummyVMPresent := false
  453. // Create context
  454. ctx, cancel := context.WithCancel(context.Background())
  455. defer cancel()
  456. vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
  457. if err != nil {
  458. return isDummyVMPresent, err
  459. }
  460. dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
  461. if err != nil {
  462. return isDummyVMPresent, err
  463. }
  464. vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
  465. if err != nil {
  466. return isDummyVMPresent, err
  467. }
  468. vms, err := vmFolder.GetVirtualMachines(ctx)
  469. if err != nil {
  470. return isDummyVMPresent, err
  471. }
  472. for _, vm := range vms {
  473. if vm.Name() == vmName {
  474. isDummyVMPresent = true
  475. break
  476. }
  477. }
  478. return isDummyVMPresent, nil
  479. }
  480. func GetVMUUID() (string, error) {
  481. id, err := ioutil.ReadFile(UUIDPath)
  482. if err != nil {
  483. return "", fmt.Errorf("error retrieving vm uuid: %s", err)
  484. }
  485. uuidFromFile := string(id[:])
  486. //strip leading and trailing white space and new line char
  487. uuid := strings.TrimSpace(uuidFromFile)
  488. // check the uuid starts with "VMware-"
  489. if !strings.HasPrefix(uuid, UUIDPrefix) {
  490. return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile)
  491. }
  492. // Strip the prefix and white spaces and -
  493. uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1)
  494. uuid = strings.Replace(uuid, "-", "", -1)
  495. if len(uuid) != 32 {
  496. return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile)
  497. }
  498. // need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f"
  499. uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32])
  500. return uuid, nil
  501. }
  502. func GetUUIDFromProviderID(providerID string) string {
  503. return strings.TrimPrefix(providerID, ProviderPrefix)
  504. }
  505. func IsUUIDSupportedNode(node *v1.Node) (bool, error) {
  506. newVersion, err := version.ParseSemantic("v1.9.4")
  507. if err != nil {
  508. glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
  509. return false, err
  510. }
  511. nodeVersion, err := version.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
  512. if err != nil {
  513. glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
  514. return false, err
  515. }
  516. if nodeVersion.LessThan(newVersion) {
  517. return true, nil
  518. }
  519. return false, nil
  520. }
  521. func GetNodeUUID(node *v1.Node) (string, error) {
  522. oldNode, err := IsUUIDSupportedNode(node)
  523. if err != nil {
  524. glog.Errorf("Failed to get node UUID for node %+v with error %v", node, err)
  525. return "", err
  526. }
  527. if oldNode {
  528. return node.Status.NodeInfo.SystemUUID, nil
  529. }
  530. return GetUUIDFromProviderID(node.Spec.ProviderID), nil
  531. }