mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
65
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"nodemanager.go",
|
||||
"vsphere.go",
|
||||
"vsphere_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["vsphere_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/vsphere/vclib:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
approvers:
|
||||
- abrarshivani
|
||||
- baludontu
|
||||
- divyenpatel
|
||||
- imkin
|
||||
- kerneltime
|
||||
- luomiao
|
319
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go
generated
vendored
Normal file
319
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/api/core/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||
)
|
||||
|
||||
// Stores info about the kubernetes node
|
||||
type NodeInfo struct {
|
||||
dataCenter *vclib.Datacenter
|
||||
vm *vclib.VirtualMachine
|
||||
vcServer string
|
||||
}
|
||||
|
||||
type NodeManager struct {
|
||||
// TODO: replace map with concurrent map when k8s supports go v1.9
|
||||
|
||||
// Maps the VC server to VSphereInstance
|
||||
vsphereInstanceMap map[string]*VSphereInstance
|
||||
// Maps node name to node info.
|
||||
nodeInfoMap map[string]*NodeInfo
|
||||
// Maps node name to node structure
|
||||
registeredNodes map[string]*v1.Node
|
||||
|
||||
// Mutexes
|
||||
registeredNodesLock sync.RWMutex
|
||||
nodeInfoLock sync.RWMutex
|
||||
}
|
||||
|
||||
type NodeDetails struct {
|
||||
NodeName string
|
||||
vm *vclib.VirtualMachine
|
||||
}
|
||||
|
||||
// TODO: Make it configurable in vsphere.conf
|
||||
const (
|
||||
POOL_SIZE = 8
|
||||
QUEUE_SIZE = POOL_SIZE * 10
|
||||
)
|
||||
|
||||
func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
|
||||
type VmSearch struct {
|
||||
vc string
|
||||
datacenter *vclib.Datacenter
|
||||
}
|
||||
|
||||
var mutex = &sync.Mutex{}
|
||||
var globalErrMutex = &sync.Mutex{}
|
||||
var queueChannel chan *VmSearch
|
||||
var wg sync.WaitGroup
|
||||
var globalErr *error
|
||||
|
||||
queueChannel = make(chan *VmSearch, QUEUE_SIZE)
|
||||
nodeUUID := node.Status.NodeInfo.SystemUUID
|
||||
vmFound := false
|
||||
globalErr = nil
|
||||
|
||||
setGlobalErr := func(err error) {
|
||||
globalErrMutex.Lock()
|
||||
globalErr = &err
|
||||
globalErrMutex.Unlock()
|
||||
}
|
||||
|
||||
setVMFound := func(found bool) {
|
||||
mutex.Lock()
|
||||
vmFound = found
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
getVMFound := func() bool {
|
||||
mutex.Lock()
|
||||
found := vmFound
|
||||
mutex.Unlock()
|
||||
return found
|
||||
}
|
||||
|
||||
go func() {
|
||||
var datacenterObjs []*vclib.Datacenter
|
||||
for vc, vsi := range nm.vsphereInstanceMap {
|
||||
|
||||
found := getVMFound()
|
||||
if found == true {
|
||||
break
|
||||
}
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
err := vsi.conn.Connect(ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Info("Discovering node error vc:", err)
|
||||
setGlobalErr(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if vsi.cfg.Datacenters == "" {
|
||||
datacenterObjs, err = vclib.GetAllDatacenter(ctx, vsi.conn)
|
||||
if err != nil {
|
||||
glog.V(4).Info("Discovering node error dc:", err)
|
||||
setGlobalErr(err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
datacenters := strings.Split(vsi.cfg.Datacenters, ",")
|
||||
for _, dc := range datacenters {
|
||||
dc = strings.TrimSpace(dc)
|
||||
if dc == "" {
|
||||
continue
|
||||
}
|
||||
datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc)
|
||||
if err != nil {
|
||||
glog.V(4).Info("Discovering node error dc:", err)
|
||||
setGlobalErr(err)
|
||||
continue
|
||||
}
|
||||
datacenterObjs = append(datacenterObjs, datacenterObj)
|
||||
}
|
||||
}
|
||||
|
||||
for _, datacenterObj := range datacenterObjs {
|
||||
found := getVMFound()
|
||||
if found == true {
|
||||
break
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name())
|
||||
queueChannel <- &VmSearch{
|
||||
vc: vc,
|
||||
datacenter: datacenterObj,
|
||||
}
|
||||
}
|
||||
}
|
||||
close(queueChannel)
|
||||
}()
|
||||
|
||||
for i := 0; i < POOL_SIZE; i++ {
|
||||
go func() {
|
||||
for res := range queueChannel {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error %q while looking for vm=%+v in vc=%s and datacenter=%s",
|
||||
err, node.Name, vm, res.vc, res.datacenter.Name())
|
||||
if err != vclib.ErrNoVMFound {
|
||||
setGlobalErr(err)
|
||||
} else {
|
||||
glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s",
|
||||
node.Name, res.vc, res.datacenter.Name(), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if vm != nil {
|
||||
glog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s",
|
||||
node.Name, vm, res.vc, res.datacenter.Name())
|
||||
|
||||
nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc}
|
||||
nm.addNodeInfo(node.ObjectMeta.Name, nodeInfo)
|
||||
for range queueChannel {
|
||||
}
|
||||
setVMFound(true)
|
||||
break
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Add(1)
|
||||
}
|
||||
wg.Wait()
|
||||
if vmFound {
|
||||
return nil
|
||||
}
|
||||
if globalErr != nil {
|
||||
return *globalErr
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Discovery Node: %q vm not found", node.Name)
|
||||
return vclib.ErrNoVMFound
|
||||
}
|
||||
|
||||
func (nm *NodeManager) RegisterNode(node *v1.Node) error {
|
||||
nm.addNode(node)
|
||||
nm.DiscoverNode(node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nm *NodeManager) UnRegisterNode(node *v1.Node) error {
|
||||
nm.removeNode(node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nm *NodeManager) RediscoverNode(nodeName k8stypes.NodeName) error {
|
||||
node, err := nm.GetNode(nodeName)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nm.DiscoverNode(&node)
|
||||
}
|
||||
|
||||
func (nm *NodeManager) GetNode(nodeName k8stypes.NodeName) (v1.Node, error) {
|
||||
nm.registeredNodesLock.RLock()
|
||||
node := nm.registeredNodes[convertToString(nodeName)]
|
||||
nm.registeredNodesLock.RUnlock()
|
||||
if node == nil {
|
||||
return v1.Node{}, vclib.ErrNoVMFound
|
||||
}
|
||||
return *node, nil
|
||||
}
|
||||
|
||||
func (nm *NodeManager) addNode(node *v1.Node) {
|
||||
nm.registeredNodesLock.Lock()
|
||||
nm.registeredNodes[node.ObjectMeta.Name] = node
|
||||
nm.registeredNodesLock.Unlock()
|
||||
}
|
||||
|
||||
func (nm *NodeManager) removeNode(node *v1.Node) {
|
||||
nm.registeredNodesLock.Lock()
|
||||
delete(nm.registeredNodes, node.ObjectMeta.Name)
|
||||
nm.registeredNodesLock.Unlock()
|
||||
|
||||
nm.nodeInfoLock.Lock()
|
||||
delete(nm.nodeInfoMap, node.ObjectMeta.Name)
|
||||
nm.nodeInfoLock.Unlock()
|
||||
}
|
||||
|
||||
// GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address.
|
||||
// This method returns an error if it is unable find node VCs and DCs listed in vSphere.conf
|
||||
// NodeInfo returned may not be updated to reflect current VM location.
|
||||
func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) {
|
||||
getNodeInfo := func(nodeName k8stypes.NodeName) *NodeInfo {
|
||||
nm.nodeInfoLock.RLock()
|
||||
nodeInfo := nm.nodeInfoMap[convertToString(nodeName)]
|
||||
nm.nodeInfoLock.RUnlock()
|
||||
return nodeInfo
|
||||
}
|
||||
nodeInfo := getNodeInfo(nodeName)
|
||||
if nodeInfo == nil {
|
||||
err := nm.RediscoverNode(nodeName)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error %q node info for node %q not found", err, convertToString(nodeName))
|
||||
return NodeInfo{}, err
|
||||
}
|
||||
nodeInfo = getNodeInfo(nodeName)
|
||||
}
|
||||
return *nodeInfo, nil
|
||||
}
|
||||
|
||||
func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
|
||||
nm.nodeInfoLock.RLock()
|
||||
defer nm.nodeInfoLock.RUnlock()
|
||||
var nodeDetails []NodeDetails
|
||||
vsphereSessionRefreshMap := make(map[string]bool)
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for nodeName, nodeInfo := range nm.nodeInfoMap {
|
||||
nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm})
|
||||
if vsphereSessionRefreshMap[nodeInfo.vcServer] {
|
||||
continue
|
||||
}
|
||||
vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer]
|
||||
if vsphereInstance == nil {
|
||||
err := fmt.Errorf("vSphereInstance for vc server %q not found while looking for vm %q", nodeInfo.vcServer, nodeInfo.vm)
|
||||
return nil, err
|
||||
}
|
||||
err := vsphereInstance.conn.Connect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vsphereSessionRefreshMap[nodeInfo.vcServer] = true
|
||||
}
|
||||
return nodeDetails, nil
|
||||
}
|
||||
|
||||
func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) {
|
||||
nm.nodeInfoLock.Lock()
|
||||
nm.nodeInfoMap[nodeName] = nodeInfo
|
||||
nm.nodeInfoLock.Unlock()
|
||||
}
|
||||
|
||||
func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereInstance, error) {
|
||||
nodeInfo, err := nm.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("node info for node %q not found", convertToString(nodeName))
|
||||
return VSphereInstance{}, err
|
||||
}
|
||||
vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer]
|
||||
if vsphereInstance == nil {
|
||||
return VSphereInstance{}, fmt.Errorf("vSphereInstance for vc server %q not found while looking for node %q", nodeInfo.vcServer, convertToString(nodeName))
|
||||
}
|
||||
return *vsphereInstance, nil
|
||||
}
|
57
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"connection.go",
|
||||
"constants.go",
|
||||
"custom_errors.go",
|
||||
"datacenter.go",
|
||||
"datastore.go",
|
||||
"folder.go",
|
||||
"pbm.go",
|
||||
"utils.go",
|
||||
"virtualmachine.go",
|
||||
"vmoptions.go",
|
||||
"volumeoptions.go",
|
||||
"vsphere_metrics.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib",
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/find:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/object:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/pbm:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/pbm/types:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/property:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/session:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
99
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/connection.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/connection.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi"
|
||||
"github.com/vmware/govmomi/session"
|
||||
"github.com/vmware/govmomi/vim25"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// VSphereConnection contains information for connecting to vCenter
|
||||
type VSphereConnection struct {
|
||||
GoVmomiClient *govmomi.Client
|
||||
Username string
|
||||
Password string
|
||||
Hostname string
|
||||
Port string
|
||||
Insecure bool
|
||||
RoundTripperCount uint
|
||||
}
|
||||
|
||||
var (
|
||||
clientLock sync.Mutex
|
||||
)
|
||||
|
||||
// Connect makes connection to vCenter and sets VSphereConnection.GoVmomiClient.
|
||||
// If connection.GoVmomiClient is already set, it obtains the existing user session.
|
||||
// if user session is not valid, connection.GoVmomiClient will be set to the new client.
|
||||
func (connection *VSphereConnection) Connect(ctx context.Context) error {
|
||||
var err error
|
||||
clientLock.Lock()
|
||||
defer clientLock.Unlock()
|
||||
|
||||
if connection.GoVmomiClient == nil {
|
||||
connection.GoVmomiClient, err = connection.NewClient(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create govmomi client. err: %+v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
m := session.NewManager(connection.GoVmomiClient.Client)
|
||||
userSession, err := m.UserSession(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Error while obtaining user session. err: %+v", err)
|
||||
return err
|
||||
}
|
||||
if userSession != nil {
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
|
||||
connection.GoVmomiClient.Logout(ctx)
|
||||
connection.GoVmomiClient, err = connection.NewClient(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create govmomi client. err: %+v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClient creates a new govmomi client for the VSphereConnection obj
|
||||
func (connection *VSphereConnection) NewClient(ctx context.Context) (*govmomi.Client, error) {
|
||||
url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", connection.Hostname, connection.Port))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
|
||||
return nil, err
|
||||
}
|
||||
url.User = neturl.UserPassword(connection.Username, connection.Password)
|
||||
client, err := govmomi.NewClient(ctx, url, connection.Insecure)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create new client. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
if connection.RoundTripperCount == 0 {
|
||||
connection.RoundTripperCount = RoundTripperDefaultCount
|
||||
}
|
||||
client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(connection.RoundTripperCount)))
|
||||
return client, nil
|
||||
}
|
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/constants.go
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/constants.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
// Volume Constnts
|
||||
const (
|
||||
ThinDiskType = "thin"
|
||||
PreallocatedDiskType = "preallocated"
|
||||
EagerZeroedThickDiskType = "eagerZeroedThick"
|
||||
ZeroedThickDiskType = "zeroedThick"
|
||||
)
|
||||
|
||||
// Controller Constants
|
||||
const (
|
||||
SCSIControllerLimit = 4
|
||||
SCSIControllerDeviceLimit = 15
|
||||
SCSIDeviceSlots = 16
|
||||
SCSIReservedSlot = 7
|
||||
|
||||
SCSIControllerType = "scsi"
|
||||
LSILogicControllerType = "lsiLogic"
|
||||
BusLogicControllerType = "busLogic"
|
||||
LSILogicSASControllerType = "lsiLogic-sas"
|
||||
PVSCSIControllerType = "pvscsi"
|
||||
)
|
||||
|
||||
// Other Constants
|
||||
const (
|
||||
LogLevel = 4
|
||||
DatastoreProperty = "datastore"
|
||||
ResourcePoolProperty = "resourcePool"
|
||||
DatastoreInfoProperty = "info"
|
||||
VirtualMachineType = "VirtualMachine"
|
||||
RoundTripperDefaultCount = 3
|
||||
VSANDatastoreType = "vsan"
|
||||
DummyVMPrefixName = "vsphere-k8s"
|
||||
ActivePowerState = "poweredOn"
|
||||
)
|
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/custom_errors.go
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/custom_errors.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import "errors"
|
||||
|
||||
// Error Messages
|
||||
const (
|
||||
FileAlreadyExistErrMsg = "File requested already exist"
|
||||
NoDiskUUIDFoundErrMsg = "No disk UUID found"
|
||||
NoDevicesFoundErrMsg = "No devices found"
|
||||
DiskNotFoundErrMsg = "No vSphere disk ID found"
|
||||
InvalidVolumeOptionsErrMsg = "VolumeOptions verification failed"
|
||||
NoVMFoundErrMsg = "No VM found"
|
||||
)
|
||||
|
||||
// Error constants
|
||||
var (
|
||||
ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg)
|
||||
ErrNoDiskUUIDFound = errors.New(NoDiskUUIDFoundErrMsg)
|
||||
ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg)
|
||||
ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg)
|
||||
ErrInvalidVolumeOptions = errors.New(InvalidVolumeOptionsErrMsg)
|
||||
ErrNoVMFound = errors.New(NoVMFoundErrMsg)
|
||||
)
|
325
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go
generated
vendored
Normal file
325
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go
generated
vendored
Normal file
@ -0,0 +1,325 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/property"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Datacenter extends the govmomi Datacenter object
|
||||
type Datacenter struct {
|
||||
*object.Datacenter
|
||||
}
|
||||
|
||||
// GetDatacenter returns the DataCenter Object for the given datacenterPath
|
||||
// If datacenter is located in a folder, include full path to datacenter else just provide the datacenter name
|
||||
func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacenterPath string) (*Datacenter, error) {
|
||||
finder := find.NewFinder(connection.GoVmomiClient.Client, true)
|
||||
datacenter, err := finder.Datacenter(ctx, datacenterPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err)
|
||||
return nil, err
|
||||
}
|
||||
dc := Datacenter{datacenter}
|
||||
return &dc, nil
|
||||
}
|
||||
|
||||
// GetAllDatacenter returns all the DataCenter Objects
|
||||
func GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Datacenter, error) {
|
||||
var dc []*Datacenter
|
||||
finder := find.NewFinder(connection.GoVmomiClient.Client, true)
|
||||
datacenters, err := finder.DatacenterList(ctx, "*")
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to find the datacenter. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
for _, datacenter := range datacenters {
|
||||
dc = append(dc, &(Datacenter{datacenter}))
|
||||
}
|
||||
|
||||
return dc, nil
|
||||
}
|
||||
|
||||
// GetVMByUUID gets the VM object from the given vmUUID
|
||||
func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualMachine, error) {
|
||||
s := object.NewSearchIndex(dc.Client())
|
||||
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
|
||||
svm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err)
|
||||
return nil, err
|
||||
}
|
||||
if svm == nil {
|
||||
glog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID)
|
||||
return nil, ErrNoVMFound
|
||||
}
|
||||
virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc}
|
||||
return &virtualMachine, nil
|
||||
}
|
||||
|
||||
// GetVMByPath gets the VM object from the given vmPath
|
||||
// vmPath should be the full path to VM and not just the name
|
||||
func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualMachine, error) {
|
||||
finder := getFinder(dc)
|
||||
vm, err := finder.VirtualMachine(ctx, vmPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err)
|
||||
return nil, err
|
||||
}
|
||||
virtualMachine := VirtualMachine{vm, dc}
|
||||
return &virtualMachine, nil
|
||||
}
|
||||
|
||||
// GetAllDatastores gets the datastore URL to DatastoreInfo map for all the datastores in
|
||||
// the datacenter.
|
||||
func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*DatastoreInfo, error) {
|
||||
finder := getFinder(dc)
|
||||
datastores, err := finder.DatastoreList(ctx, "*")
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get all the datastores. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
var dsList []types.ManagedObjectReference
|
||||
for _, ds := range datastores {
|
||||
dsList = append(dsList, ds.Reference())
|
||||
}
|
||||
|
||||
var dsMoList []mo.Datastore
|
||||
pc := property.DefaultCollector(dc.Client())
|
||||
properties := []string{DatastoreInfoProperty}
|
||||
err = pc.Retrieve(ctx, dsList, properties, &dsMoList)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get Datastore managed objects from datastore objects."+
|
||||
" dsObjList: %+v, properties: %+v, err: %v", dsList, properties, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dsURLInfoMap := make(map[string]*DatastoreInfo)
|
||||
for _, dsMo := range dsMoList {
|
||||
dsURLInfoMap[dsMo.Info.GetDatastoreInfo().Url] = &DatastoreInfo{
|
||||
&Datastore{object.NewDatastore(dc.Client(), dsMo.Reference()),
|
||||
dc},
|
||||
dsMo.Info.GetDatastoreInfo()}
|
||||
}
|
||||
glog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap)
|
||||
return dsURLInfoMap, nil
|
||||
}
|
||||
|
||||
// GetDatastoreByPath gets the Datastore object from the given vmDiskPath
|
||||
func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) {
|
||||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
|
||||
return nil, errors.New("Failed to parse vmDiskPath")
|
||||
}
|
||||
finder := getFinder(dc)
|
||||
ds, err := finder.Datastore(ctx, datastorePathObj.Datastore)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while searching for datastore: %s. err: %+v", datastorePathObj.Datastore, err)
|
||||
return nil, err
|
||||
}
|
||||
datastore := Datastore{ds, dc}
|
||||
return &datastore, nil
|
||||
}
|
||||
|
||||
// GetDatastoreByName gets the Datastore object for the given datastore name
|
||||
func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Datastore, error) {
|
||||
finder := getFinder(dc)
|
||||
ds, err := finder.Datastore(ctx, name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err)
|
||||
return nil, err
|
||||
}
|
||||
datastore := Datastore{ds, dc}
|
||||
return &datastore, nil
|
||||
}
|
||||
|
||||
// GetResourcePool gets the resource pool for the given path
|
||||
func (dc *Datacenter) GetResourcePool(ctx context.Context, computePath string) (*object.ResourcePool, error) {
|
||||
finder := getFinder(dc)
|
||||
var computeResource *object.ComputeResource
|
||||
var err error
|
||||
if computePath == "" {
|
||||
computeResource, err = finder.DefaultComputeResource(ctx)
|
||||
} else {
|
||||
computeResource, err = finder.ComputeResource(ctx, computePath)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get the ResourcePool for computePath '%s'. err: %+v", computePath, err)
|
||||
return nil, err
|
||||
}
|
||||
return computeResource.ResourcePool(ctx)
|
||||
}
|
||||
|
||||
// GetFolderByPath gets the Folder Object from the given folder path
|
||||
// folderPath should be the full path to folder
|
||||
func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*Folder, error) {
|
||||
finder := getFinder(dc)
|
||||
vmFolder, err := finder.Folder(ctx, folderPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
|
||||
return nil, err
|
||||
}
|
||||
folder := Folder{vmFolder, dc}
|
||||
return &folder, nil
|
||||
}
|
||||
|
||||
// GetVMMoList gets the VM Managed Objects with the given properties from the VM object
|
||||
func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachine, properties []string) ([]mo.VirtualMachine, error) {
|
||||
var vmMoList []mo.VirtualMachine
|
||||
var vmRefs []types.ManagedObjectReference
|
||||
if len(vmObjList) < 1 {
|
||||
glog.Errorf("VirtualMachine Object list is empty")
|
||||
return nil, fmt.Errorf("VirtualMachine Object list is empty")
|
||||
}
|
||||
|
||||
for _, vmObj := range vmObjList {
|
||||
vmRefs = append(vmRefs, vmObj.Reference())
|
||||
}
|
||||
pc := property.DefaultCollector(dc.Client())
|
||||
err := pc.Retrieve(ctx, vmRefs, properties, &vmMoList)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err)
|
||||
return nil, err
|
||||
}
|
||||
return vmMoList, nil
|
||||
}
|
||||
|
||||
// GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath
|
||||
func (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) {
|
||||
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
|
||||
diskPath += ".vmdk"
|
||||
}
|
||||
vdm := object.NewVirtualDiskManager(dc.Client())
|
||||
// Returns uuid of vmdk virtual disk
|
||||
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter)
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
|
||||
return "", err
|
||||
}
|
||||
diskUUID = formatVirtualDiskUUID(diskUUID)
|
||||
return diskUUID, nil
|
||||
}
|
||||
|
||||
// GetDatastoreMoList gets the Datastore Managed Objects with the given properties from the datastore objects
|
||||
func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datastore, properties []string) ([]mo.Datastore, error) {
|
||||
var dsMoList []mo.Datastore
|
||||
var dsRefs []types.ManagedObjectReference
|
||||
if len(dsObjList) < 1 {
|
||||
glog.Errorf("Datastore Object list is empty")
|
||||
return nil, fmt.Errorf("Datastore Object list is empty")
|
||||
}
|
||||
|
||||
for _, dsObj := range dsObjList {
|
||||
dsRefs = append(dsRefs, dsObj.Reference())
|
||||
}
|
||||
pc := property.DefaultCollector(dc.Client())
|
||||
err := pc.Retrieve(ctx, dsRefs, properties, &dsMoList)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err)
|
||||
return nil, err
|
||||
}
|
||||
return dsMoList, nil
|
||||
}
|
||||
|
||||
// CheckDisksAttached checks if the disk is attached to node.
|
||||
// This is done by comparing the volume path with the backing.FilePath on the VM Virtual disk devices.
|
||||
func (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) {
|
||||
attached := make(map[string]map[string]bool)
|
||||
var vmList []*VirtualMachine
|
||||
for nodeName, volPaths := range nodeVolumes {
|
||||
for _, volPath := range volPaths {
|
||||
setNodeVolumeMap(attached, volPath, nodeName, false)
|
||||
}
|
||||
vm, err := dc.GetVMByPath(ctx, nodeName)
|
||||
if err != nil {
|
||||
if IsNotFound(err) {
|
||||
glog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths)
|
||||
}
|
||||
continue
|
||||
}
|
||||
vmList = append(vmList, vm)
|
||||
}
|
||||
if len(vmList) == 0 {
|
||||
glog.V(2).Infof("vSphere CP will assume no disks are attached to any node.")
|
||||
return attached, nil
|
||||
}
|
||||
vmMoList, err := dc.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name"})
|
||||
if err != nil {
|
||||
// When there is an error fetching instance information
|
||||
// it is safer to return nil and let volume information not be touched.
|
||||
glog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vmMo := range vmMoList {
|
||||
if vmMo.Config == nil {
|
||||
glog.Errorf("Config is not available for VM: %q", vmMo.Name)
|
||||
continue
|
||||
}
|
||||
for nodeName, volPaths := range nodeVolumes {
|
||||
if nodeName == vmMo.Name {
|
||||
verifyVolumePathsForVM(vmMo, volPaths, attached)
|
||||
}
|
||||
}
|
||||
}
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.
|
||||
func verifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeVolumeMap map[string]map[string]bool) {
|
||||
// Verify if the volume paths are present on the VM backing virtual disk devices
|
||||
for _, volPath := range volPaths {
|
||||
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
|
||||
for _, device := range vmDevices {
|
||||
if vmDevices.TypeName(device) == "VirtualDisk" {
|
||||
virtualDevice := device.GetVirtualDevice()
|
||||
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
if backing.FileName == volPath {
|
||||
setNodeVolumeMap(nodeVolumeMap, volPath, vmMo.Name, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setNodeVolumeMap(
|
||||
nodeVolumeMap map[string]map[string]bool,
|
||||
volumePath string,
|
||||
nodeName string,
|
||||
check bool) {
|
||||
volumeMap := nodeVolumeMap[nodeName]
|
||||
if volumeMap == nil {
|
||||
volumeMap = make(map[string]bool)
|
||||
nodeVolumeMap[nodeName] = volumeMap
|
||||
}
|
||||
volumeMap[volumePath] = check
|
||||
}
|
86
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go
generated
vendored
Normal file
86
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/property"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Datastore extends the govmomi Datastore object
|
||||
type Datastore struct {
|
||||
*object.Datastore
|
||||
Datacenter *Datacenter
|
||||
}
|
||||
|
||||
// DatastoreInfo is a structure to store the Datastore and it's Info.
|
||||
type DatastoreInfo struct {
|
||||
*Datastore
|
||||
Info *types.DatastoreInfo
|
||||
}
|
||||
|
||||
func (di DatastoreInfo) String() string {
|
||||
return fmt.Sprintf("Datastore: %+v, datastore URL: %s", di.Datastore, di.Info.Url)
|
||||
}
|
||||
|
||||
// CreateDirectory creates the directory at location specified by directoryPath.
|
||||
// If the intermediate level folders do not exist, and the parameter createParents is true, all the non-existent folders are created.
|
||||
// directoryPath must be in the format "[vsanDatastore] kubevols"
|
||||
func (ds *Datastore) CreateDirectory(ctx context.Context, directoryPath string, createParents bool) error {
|
||||
fileManager := object.NewFileManager(ds.Client())
|
||||
err := fileManager.MakeDirectory(ctx, directoryPath, ds.Datacenter.Datacenter, createParents)
|
||||
if err != nil {
|
||||
if soap.IsSoapFault(err) {
|
||||
soapFault := soap.ToSoapFault(err)
|
||||
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
|
||||
return ErrFileAlreadyExist
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
glog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetType returns the type of datastore
|
||||
func (ds *Datastore) GetType(ctx context.Context) (string, error) {
|
||||
var dsMo mo.Datastore
|
||||
pc := property.DefaultCollector(ds.Client())
|
||||
err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"summary"}, &dsMo)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve datastore summary property. err: %v", err)
|
||||
return "", err
|
||||
}
|
||||
return dsMo.Summary.Type, nil
|
||||
}
|
||||
|
||||
// IsCompatibleWithStoragePolicy returns true if datastore is compatible with given storage policy else return false
|
||||
// for not compatible datastore, fault message is also returned
|
||||
func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storagePolicyID string) (bool, string, error) {
|
||||
pbmClient, err := NewPbmClient(ctx, ds.Client())
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get new PbmClient Object. err: %v", err)
|
||||
return false, "", err
|
||||
}
|
||||
return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds)
|
||||
}
|
36
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"vdm.go",
|
||||
"virtualdisk.go",
|
||||
"vmdm.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers",
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/object:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
92
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go
generated
vendored
Normal file
92
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diskmanagers
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||
)
|
||||
|
||||
// virtualDiskManager implements VirtualDiskProvider Interface for creating and deleting volume using VirtualDiskManager
|
||||
type virtualDiskManager struct {
|
||||
diskPath string
|
||||
volumeOptions *vclib.VolumeOptions
|
||||
}
|
||||
|
||||
// Create implements Disk's Create interface
|
||||
// Contains implementation of virtualDiskManager based Provisioning
|
||||
func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) {
|
||||
if diskManager.volumeOptions.SCSIControllerType == "" {
|
||||
diskManager.volumeOptions.SCSIControllerType = vclib.LSILogicControllerType
|
||||
}
|
||||
// Create virtual disk
|
||||
diskFormat := vclib.DiskFormatValidType[diskManager.volumeOptions.DiskFormat]
|
||||
// Create a virtual disk manager
|
||||
vdm := object.NewVirtualDiskManager(datastore.Client())
|
||||
// Create specification for new virtual disk
|
||||
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
|
||||
VirtualDiskSpec: types.VirtualDiskSpec{
|
||||
AdapterType: diskManager.volumeOptions.SCSIControllerType,
|
||||
DiskType: diskFormat,
|
||||
},
|
||||
CapacityKb: int64(diskManager.volumeOptions.CapacityKB),
|
||||
}
|
||||
requestTime := time.Now()
|
||||
// Create virtual disk
|
||||
task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec)
|
||||
if err != nil {
|
||||
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
|
||||
glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err)
|
||||
return "", err
|
||||
}
|
||||
taskInfo, err := task.WaitForResult(ctx, nil)
|
||||
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err)
|
||||
return "", err
|
||||
}
|
||||
canonicalDiskPath = taskInfo.Result.(string)
|
||||
return canonicalDiskPath, nil
|
||||
}
|
||||
|
||||
// Delete implements Disk's Delete interface
|
||||
func (diskManager virtualDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error {
|
||||
// Create a virtual disk manager
|
||||
virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client())
|
||||
diskPath := vclib.RemoveStorageClusterORFolderNameFromVDiskPath(diskManager.diskPath)
|
||||
requestTime := time.Now()
|
||||
// Delete virtual disk
|
||||
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter.Datacenter)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete virtual disk. err: %v", err)
|
||||
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
|
||||
return err
|
||||
}
|
||||
err = task.Wait(ctx)
|
||||
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete virtual disk. err: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go
generated
vendored
Normal file
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diskmanagers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||
)
|
||||
|
||||
// VirtualDisk is for the Disk Management
|
||||
type VirtualDisk struct {
|
||||
DiskPath string
|
||||
VolumeOptions *vclib.VolumeOptions
|
||||
VMOptions *vclib.VMOptions
|
||||
}
|
||||
|
||||
// VirtualDisk Operations Const
|
||||
const (
|
||||
VirtualDiskCreateOperation = "Create"
|
||||
VirtualDiskDeleteOperation = "Delete"
|
||||
)
|
||||
|
||||
// VirtualDiskProvider defines interfaces for creating disk
|
||||
type VirtualDiskProvider interface {
|
||||
Create(ctx context.Context, datastore *vclib.Datastore) (string, error)
|
||||
Delete(ctx context.Context, datacenter *vclib.Datacenter) error
|
||||
}
|
||||
|
||||
// getDiskManager returns vmDiskManager or vdmDiskManager based on given volumeoptions
|
||||
func getDiskManager(disk *VirtualDisk, diskOperation string) VirtualDiskProvider {
|
||||
var diskProvider VirtualDiskProvider
|
||||
switch diskOperation {
|
||||
case VirtualDiskDeleteOperation:
|
||||
diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions}
|
||||
case VirtualDiskCreateOperation:
|
||||
if disk.VolumeOptions.StoragePolicyName != "" || disk.VolumeOptions.VSANStorageProfileData != "" || disk.VolumeOptions.StoragePolicyID != "" {
|
||||
diskProvider = vmDiskManager{disk.DiskPath, disk.VolumeOptions, disk.VMOptions}
|
||||
} else {
|
||||
diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions}
|
||||
}
|
||||
}
|
||||
return diskProvider
|
||||
}
|
||||
|
||||
// Create gets appropriate disk manager and calls respective create method
|
||||
func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Datastore) (string, error) {
|
||||
if virtualDisk.VolumeOptions.DiskFormat == "" {
|
||||
virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType
|
||||
}
|
||||
if !virtualDisk.VolumeOptions.VerifyVolumeOptions() {
|
||||
glog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions)
|
||||
return "", vclib.ErrInvalidVolumeOptions
|
||||
}
|
||||
if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" {
|
||||
return "", fmt.Errorf("Storage Policy ID and Storage Policy Name both set, Please set only one parameter")
|
||||
}
|
||||
return getDiskManager(virtualDisk, VirtualDiskCreateOperation).Create(ctx, datastore)
|
||||
}
|
||||
|
||||
// Delete gets appropriate disk manager and calls respective delete method
|
||||
func (virtualDisk *VirtualDisk) Delete(ctx context.Context, datacenter *vclib.Datacenter) error {
|
||||
return getDiskManager(virtualDisk, VirtualDiskDeleteOperation).Delete(ctx, datacenter)
|
||||
}
|
249
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go
generated
vendored
Normal file
249
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diskmanagers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||
)
|
||||
|
||||
// vmDiskManager implements VirtualDiskProvider interface for creating volume using Virtual Machine Reconfigure approach
|
||||
type vmDiskManager struct {
|
||||
diskPath string
|
||||
volumeOptions *vclib.VolumeOptions
|
||||
vmOptions *vclib.VMOptions
|
||||
}
|
||||
|
||||
// Create implements Disk's Create interface
|
||||
// Contains implementation of VM based Provisioning to provision disk with SPBM Policy or VSANStorageProfileData
|
||||
func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) {
|
||||
if vmdisk.volumeOptions.SCSIControllerType == "" {
|
||||
vmdisk.volumeOptions.SCSIControllerType = vclib.PVSCSIControllerType
|
||||
}
|
||||
pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client())
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while creating new pbmClient, err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" {
|
||||
vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if vmdisk.volumeOptions.StoragePolicyID != "" {
|
||||
compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !compatible {
|
||||
glog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName)
|
||||
return "", fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage)
|
||||
}
|
||||
}
|
||||
|
||||
storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{}
|
||||
// Is PBM storage policy ID is present, set the storage spec profile ID,
|
||||
// else, set raw the VSAN policy string.
|
||||
if vmdisk.volumeOptions.StoragePolicyID != "" {
|
||||
storageProfileSpec.ProfileId = vmdisk.volumeOptions.StoragePolicyID
|
||||
} else if vmdisk.volumeOptions.VSANStorageProfileData != "" {
|
||||
// Check Datastore type - VSANStorageProfileData is only applicable to vSAN Datastore
|
||||
dsType, err := datastore.GetType(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if dsType != vclib.VSANDatastoreType {
|
||||
glog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name())
|
||||
return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+
|
||||
" The policy parameters will work only with VSAN Datastore."+
|
||||
" So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name())
|
||||
}
|
||||
storageProfileSpec.ProfileId = ""
|
||||
storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{
|
||||
ExtensionKey: "com.vmware.vim.sps",
|
||||
ObjectData: vmdisk.volumeOptions.VSANStorageProfileData,
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
|
||||
return "", fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
|
||||
}
|
||||
var dummyVM *vclib.VirtualMachine
|
||||
// Check if VM already exist in the folder.
|
||||
// If VM is already present, use it, else create a new dummy VM.
|
||||
fnvHash := fnv.New32a()
|
||||
fnvHash.Write([]byte(vmdisk.volumeOptions.Name))
|
||||
dummyVMFullName := vclib.DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
||||
dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName)
|
||||
if err != nil {
|
||||
// Create a dummy VM
|
||||
glog.V(1).Info("Creating Dummy VM: %q", dummyVMFullName)
|
||||
dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create Dummy VM. err: %v", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Reconfigure the VM to attach the disk with the VSAN policy configured
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
|
||||
disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create Disk Spec. err: %v", err)
|
||||
return "", err
|
||||
}
|
||||
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
|
||||
Device: disk,
|
||||
Operation: types.VirtualDeviceConfigSpecOperationAdd,
|
||||
FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate,
|
||||
}
|
||||
|
||||
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec)
|
||||
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
|
||||
fileAlreadyExist := false
|
||||
task, err := dummyVM.Reconfigure(ctx, virtualMachineConfigSpec)
|
||||
err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err)
|
||||
if fileAlreadyExist {
|
||||
//Skip error and continue to detach the disk as the disk was already created on the datastore.
|
||||
glog.V(vclib.LogLevel).Info("File: %v already exists", vmdisk.diskPath)
|
||||
} else {
|
||||
glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
// Detach the disk from the dummy VM.
|
||||
err = dummyVM.DetachDisk(ctx, vmdisk.diskPath)
|
||||
if err != nil {
|
||||
if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
|
||||
// Skip error if disk was already detached from the dummy VM but still present on the datastore.
|
||||
glog.V(vclib.LogLevel).Info("File: %v is already detached", vmdisk.diskPath)
|
||||
} else {
|
||||
glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
// Delete the dummy VM
|
||||
err = dummyVM.DeleteVM(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
|
||||
}
|
||||
return vmdisk.diskPath, nil
|
||||
}
|
||||
|
||||
func (vmdisk vmDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error {
|
||||
return fmt.Errorf("vmDiskManager.Delete is not supported")
|
||||
}
|
||||
|
||||
// CreateDummyVM create a Dummy VM at specified location with given name.
|
||||
func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib.Datacenter, vmName string) (*vclib.VirtualMachine, error) {
|
||||
// Create a virtual machine config spec with 1 SCSI adapter.
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{
|
||||
Name: vmName,
|
||||
Files: &types.VirtualMachineFileInfo{
|
||||
VmPathName: "[" + vmdisk.volumeOptions.Datastore + "]",
|
||||
},
|
||||
NumCPUs: 1,
|
||||
MemoryMB: 4,
|
||||
DeviceChange: []types.BaseVirtualDeviceConfigSpec{
|
||||
&types.VirtualDeviceConfigSpec{
|
||||
Operation: types.VirtualDeviceConfigSpecOperationAdd,
|
||||
Device: &types.ParaVirtualSCSIController{
|
||||
VirtualSCSIController: types.VirtualSCSIController{
|
||||
SharedBus: types.VirtualSCSISharingNoSharing,
|
||||
VirtualController: types.VirtualController{
|
||||
BusNumber: 0,
|
||||
VirtualDevice: types.VirtualDevice{
|
||||
Key: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
task, err := vmdisk.vmOptions.VMFolder.CreateVM(ctx, virtualMachineConfigSpec, vmdisk.vmOptions.VMResourcePool, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create VM. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dummyVMTaskInfo, err := task.WaitForResult(ctx, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmRef := dummyVMTaskInfo.Result.(object.Reference)
|
||||
dummyVM := object.NewVirtualMachine(datacenter.Client(), vmRef.Reference())
|
||||
return &vclib.VirtualMachine{VirtualMachine: dummyVM, Datacenter: datacenter}, nil
|
||||
}
|
||||
|
||||
// CleanUpDummyVMs deletes stale dummyVM's
|
||||
func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datacenter) error {
|
||||
vmList, err := folder.GetVirtualMachines(ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err)
|
||||
return err
|
||||
}
|
||||
if vmList == nil || len(vmList) == 0 {
|
||||
glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
|
||||
return fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
|
||||
}
|
||||
var dummyVMList []*vclib.VirtualMachine
|
||||
// Loop through VM's in the Kubernetes cluster to find dummy VM's
|
||||
for _, vm := range vmList {
|
||||
vmName, err := vm.ObjectName(ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get name from VM with err: %+v", err)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) {
|
||||
vmObj := vclib.VirtualMachine{VirtualMachine: object.NewVirtualMachine(dc.Client(), vm.Reference()), Datacenter: dc}
|
||||
dummyVMList = append(dummyVMList, &vmObj)
|
||||
}
|
||||
}
|
||||
for _, vm := range dummyVMList {
|
||||
err = vm.DeleteVM(ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to delete dummy VM with err: %+v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isAlreadyExists(path string, err error) bool {
|
||||
errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", path)
|
||||
if errorMessage == err.Error() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/folder.go
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/folder.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Folder extends the govmomi Folder object
|
||||
type Folder struct {
|
||||
*object.Folder
|
||||
Datacenter *Datacenter
|
||||
}
|
||||
|
||||
// GetVirtualMachines returns list of VirtualMachine inside a folder.
|
||||
func (folder *Folder) GetVirtualMachines(ctx context.Context) ([]*VirtualMachine, error) {
|
||||
vmFolders, err := folder.Children(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
var vmObjList []*VirtualMachine
|
||||
for _, vmFolder := range vmFolders {
|
||||
if vmFolder.Reference().Type == VirtualMachineType {
|
||||
vmObj := VirtualMachine{object.NewVirtualMachine(folder.Client(), vmFolder.Reference()), folder.Datacenter}
|
||||
vmObjList = append(vmObjList, &vmObj)
|
||||
}
|
||||
}
|
||||
return vmObjList, nil
|
||||
}
|
169
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go
generated
vendored
Normal file
169
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/pbm"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
pbmtypes "github.com/vmware/govmomi/pbm/types"
|
||||
"github.com/vmware/govmomi/vim25"
|
||||
)
|
||||
|
||||
// PbmClient is extending govmomi pbm, and provides functions to get compatible list of datastore for given policy
|
||||
type PbmClient struct {
|
||||
*pbm.Client
|
||||
}
|
||||
|
||||
// NewPbmClient returns a new PBM Client object
|
||||
func NewPbmClient(ctx context.Context, client *vim25.Client) (*PbmClient, error) {
|
||||
pbmClient, err := pbm.NewClient(ctx, client)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create new Pbm Client. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
return &PbmClient{pbmClient}, nil
|
||||
}
|
||||
|
||||
// IsDatastoreCompatible check if the datastores is compatible for given storage policy id
|
||||
// if datastore is not compatible with policy, fault message with the Datastore Name is returned
|
||||
func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePolicyID string, datastore *Datastore) (bool, string, error) {
|
||||
faultMessage := ""
|
||||
placementHub := pbmtypes.PbmPlacementHub{
|
||||
HubType: datastore.Reference().Type,
|
||||
HubId: datastore.Reference().Value,
|
||||
}
|
||||
hubs := []pbmtypes.PbmPlacementHub{placementHub}
|
||||
req := []pbmtypes.BasePbmPlacementRequirement{
|
||||
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
|
||||
ProfileId: pbmtypes.PbmProfileId{
|
||||
UniqueId: storagePolicyID,
|
||||
},
|
||||
},
|
||||
}
|
||||
compatibilityResult, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred for CheckRequirements call. err %+v", err)
|
||||
return false, "", err
|
||||
}
|
||||
if compatibilityResult != nil && len(compatibilityResult) > 0 {
|
||||
compatibleHubs := compatibilityResult.CompatibleDatastores()
|
||||
if compatibleHubs != nil && len(compatibleHubs) > 0 {
|
||||
return true, "", nil
|
||||
}
|
||||
dsName, err := datastore.ObjectName(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get datastore ObjectName")
|
||||
return false, "", err
|
||||
}
|
||||
if compatibilityResult[0].Error[0].LocalizedMessage == "" {
|
||||
faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy."
|
||||
} else {
|
||||
faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy. LocalizedMessage: " + compatibilityResult[0].Error[0].LocalizedMessage + "\n"
|
||||
}
|
||||
return false, faultMessage, nil
|
||||
}
|
||||
return false, "", fmt.Errorf("compatibilityResult is nil or empty")
|
||||
}
|
||||
|
||||
// GetCompatibleDatastores filters and returns compatible list of datastores for given storage policy id
|
||||
// For Non Compatible Datastores, fault message with the Datastore Name is also returned
|
||||
func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Datacenter, storagePolicyID string, datastores []*DatastoreInfo) ([]*DatastoreInfo, string, error) {
|
||||
var (
|
||||
dsMorNameMap = getDsMorNameMap(ctx, datastores)
|
||||
localizedMessagesForNotCompatibleDatastores = ""
|
||||
)
|
||||
compatibilityResult, err := pbmClient.GetPlacementCompatibilityResult(ctx, storagePolicyID, datastores)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err)
|
||||
return nil, "", err
|
||||
}
|
||||
compatibleHubs := compatibilityResult.CompatibleDatastores()
|
||||
var compatibleDatastoreList []*DatastoreInfo
|
||||
for _, hub := range compatibleHubs {
|
||||
compatibleDatastoreList = append(compatibleDatastoreList, getDatastoreFromPlacementHub(datastores, hub))
|
||||
}
|
||||
for _, res := range compatibilityResult {
|
||||
for _, err := range res.Error {
|
||||
dsName := dsMorNameMap[res.Hub.HubId]
|
||||
localizedMessage := ""
|
||||
if err.LocalizedMessage != "" {
|
||||
localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. LocalizedMessage: " + err.LocalizedMessage + "\n"
|
||||
} else {
|
||||
localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. \n"
|
||||
}
|
||||
localizedMessagesForNotCompatibleDatastores += localizedMessage
|
||||
}
|
||||
}
|
||||
// Return an error if there are no compatible datastores.
|
||||
if len(compatibleHubs) < 1 {
|
||||
glog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID)
|
||||
return nil, localizedMessagesForNotCompatibleDatastores, fmt.Errorf("No compatible datastores found that satisfy the storage policy requirements")
|
||||
}
|
||||
return compatibleDatastoreList, localizedMessagesForNotCompatibleDatastores, nil
|
||||
}
|
||||
|
||||
// GetPlacementCompatibilityResult gets placement compatibility result based on storage policy requirements.
|
||||
func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, storagePolicyID string, datastore []*DatastoreInfo) (pbm.PlacementCompatibilityResult, error) {
|
||||
var hubs []pbmtypes.PbmPlacementHub
|
||||
for _, ds := range datastore {
|
||||
hubs = append(hubs, pbmtypes.PbmPlacementHub{
|
||||
HubType: ds.Reference().Type,
|
||||
HubId: ds.Reference().Value,
|
||||
})
|
||||
}
|
||||
req := []pbmtypes.BasePbmPlacementRequirement{
|
||||
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
|
||||
ProfileId: pbmtypes.PbmProfileId{
|
||||
UniqueId: storagePolicyID,
|
||||
},
|
||||
},
|
||||
}
|
||||
res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred for CheckRequirements call. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getDataStoreForPlacementHub returns matching datastore associated with given pbmPlacementHub
|
||||
func getDatastoreFromPlacementHub(datastore []*DatastoreInfo, pbmPlacementHub pbmtypes.PbmPlacementHub) *DatastoreInfo {
|
||||
for _, ds := range datastore {
|
||||
if ds.Reference().Type == pbmPlacementHub.HubType && ds.Reference().Value == pbmPlacementHub.HubId {
|
||||
return ds
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDsMorNameMap returns map of ds Mor and Datastore Object Name
|
||||
func getDsMorNameMap(ctx context.Context, datastores []*DatastoreInfo) map[string]string {
|
||||
dsMorNameMap := make(map[string]string)
|
||||
for _, ds := range datastores {
|
||||
dsObjectName, err := ds.ObjectName(ctx)
|
||||
if err == nil {
|
||||
dsMorNameMap[ds.Reference().Value] = dsObjectName
|
||||
} else {
|
||||
glog.Errorf("Error occurred while getting datastore object name. err: %+v", err)
|
||||
}
|
||||
}
|
||||
return dsMorNameMap
|
||||
}
|
213
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go
generated
vendored
Normal file
213
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
// IsNotFound return true if err is NotFoundError or DefaultNotFoundError
|
||||
func IsNotFound(err error) bool {
|
||||
_, ok := err.(*find.NotFoundError)
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
|
||||
_, ok = err.(*find.DefaultNotFoundError)
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getFinder(dc *Datacenter) *find.Finder {
|
||||
finder := find.NewFinder(dc.Client(), true)
|
||||
finder.SetDatacenter(dc.Datacenter)
|
||||
return finder
|
||||
}
|
||||
|
||||
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
|
||||
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
|
||||
func formatVirtualDiskUUID(uuid string) string {
|
||||
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
|
||||
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
|
||||
return strings.ToLower(uuidWithNoHypens)
|
||||
}
|
||||
|
||||
// getSCSIControllersOfType filters specific type of Controller device from given list of Virtual Machine Devices
|
||||
func getSCSIControllersOfType(vmDevices object.VirtualDeviceList, scsiType string) []*types.VirtualController {
|
||||
// get virtual scsi controllers of passed argument type
|
||||
var scsiControllers []*types.VirtualController
|
||||
for _, device := range vmDevices {
|
||||
devType := vmDevices.Type(device)
|
||||
if devType == scsiType {
|
||||
if c, ok := device.(types.BaseVirtualController); ok {
|
||||
scsiControllers = append(scsiControllers, c.GetVirtualController())
|
||||
}
|
||||
}
|
||||
}
|
||||
return scsiControllers
|
||||
}
|
||||
|
||||
// getAvailableSCSIController gets available SCSI Controller from list of given controllers, which has less than 15 disk devices.
|
||||
func getAvailableSCSIController(scsiControllers []*types.VirtualController) *types.VirtualController {
|
||||
// get SCSI controller which has space for adding more devices
|
||||
for _, controller := range scsiControllers {
|
||||
if len(controller.Device) < SCSIControllerDeviceLimit {
|
||||
return controller
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNextUnitNumber gets the next available SCSI controller unit number from given list of Controller Device List
|
||||
func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) {
|
||||
var takenUnitNumbers [SCSIDeviceSlots]bool
|
||||
takenUnitNumbers[SCSIReservedSlot] = true
|
||||
key := c.GetVirtualController().Key
|
||||
|
||||
for _, device := range devices {
|
||||
d := device.GetVirtualDevice()
|
||||
if d.ControllerKey == key {
|
||||
if d.UnitNumber != nil {
|
||||
takenUnitNumbers[*d.UnitNumber] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
for unitNumber, takenUnitNumber := range takenUnitNumbers {
|
||||
if !takenUnitNumber {
|
||||
return int32(unitNumber), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("SCSI Controller with key=%d does not have any available slots", key)
|
||||
}
|
||||
|
||||
// getSCSIControllers filters and return list of Controller Devices from given list of Virtual Machine Devices.
|
||||
func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController {
|
||||
// get all virtual scsi controllers
|
||||
var scsiControllers []*types.VirtualController
|
||||
for _, device := range vmDevices {
|
||||
devType := vmDevices.Type(device)
|
||||
switch devType {
|
||||
case SCSIControllerType, strings.ToLower(LSILogicControllerType), strings.ToLower(BusLogicControllerType), PVSCSIControllerType, strings.ToLower(LSILogicSASControllerType):
|
||||
if c, ok := device.(types.BaseVirtualController); ok {
|
||||
scsiControllers = append(scsiControllers, c.GetVirtualController())
|
||||
}
|
||||
}
|
||||
}
|
||||
return scsiControllers
|
||||
}
|
||||
|
||||
// RemoveStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
|
||||
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
|
||||
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
|
||||
func RemoveStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
|
||||
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
|
||||
if filepath.Base(datastore) != datastore {
|
||||
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
|
||||
}
|
||||
return vDiskPath
|
||||
}
|
||||
|
||||
// GetPathFromVMDiskPath retrieves the path from VM Disk Path.
|
||||
// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
|
||||
func GetPathFromVMDiskPath(vmDiskPath string) string {
|
||||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
|
||||
return ""
|
||||
}
|
||||
return datastorePathObj.Path
|
||||
}
|
||||
|
||||
// GetDatastoreFromVMDiskPath retrieves the path from VM Disk Path.
|
||||
// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is vsanDatastore
|
||||
func GetDatastoreFromVMDiskPath(vmDiskPath string) string {
|
||||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
|
||||
return ""
|
||||
}
|
||||
return datastorePathObj.Datastore
|
||||
}
|
||||
|
||||
//GetDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
|
||||
func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
|
||||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
glog.Errorf("Failed to parse volPath: %s", vmDiskPath)
|
||||
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
|
||||
}
|
||||
return datastorePathObj, nil
|
||||
}
|
||||
|
||||
//IsValidUUID checks if the string is a valid UUID.
|
||||
func IsValidUUID(uuid string) bool {
|
||||
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
|
||||
return r.MatchString(uuid)
|
||||
}
|
||||
|
||||
// IsManagedObjectNotFoundError returns true if error is of type ManagedObjectNotFound
|
||||
func IsManagedObjectNotFoundError(err error) bool {
|
||||
isManagedObjectNotFoundError := false
|
||||
if soap.IsSoapFault(err) {
|
||||
_, isManagedObjectNotFoundError = soap.ToSoapFault(err).VimFault().(types.ManagedObjectNotFound)
|
||||
}
|
||||
return isManagedObjectNotFoundError
|
||||
}
|
||||
|
||||
// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.
|
||||
func VerifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
|
||||
// Verify if the volume paths are present on the VM backing virtual disk devices
|
||||
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
|
||||
VerifyVolumePathsForVMDevices(vmDevices, volPaths, nodeName, nodeVolumeMap)
|
||||
|
||||
}
|
||||
|
||||
// VerifyVolumePathsForVMDevices verifies if the volume paths (volPaths) are attached to VM.
|
||||
func VerifyVolumePathsForVMDevices(vmDevices object.VirtualDeviceList, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
|
||||
volPathsMap := make(map[string]bool)
|
||||
for _, volPath := range volPaths {
|
||||
volPathsMap[volPath] = true
|
||||
}
|
||||
// Verify if the volume paths are present on the VM backing virtual disk devices
|
||||
for _, device := range vmDevices {
|
||||
if vmDevices.TypeName(device) == "VirtualDisk" {
|
||||
virtualDevice := device.GetVirtualDevice()
|
||||
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
if volPathsMap[backing.FileName] {
|
||||
setNodeVolumeMap(nodeVolumeMap, backing.FileName, nodeName, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
402
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go
generated
vendored
Normal file
402
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go
generated
vendored
Normal file
@ -0,0 +1,402 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/property"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
// VirtualMachine extends the govmomi VirtualMachine object
|
||||
type VirtualMachine struct {
|
||||
*object.VirtualMachine
|
||||
Datacenter *Datacenter
|
||||
}
|
||||
|
||||
// IsDiskAttached checks if disk is attached to the VM.
|
||||
func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) (bool, error) {
|
||||
device, err := vm.getVirtualDeviceByPath(ctx, diskPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if device != nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// DeleteVM deletes the VM.
|
||||
func (vm *VirtualMachine) DeleteVM(ctx context.Context) error {
|
||||
destroyTask, err := vm.Destroy(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return err
|
||||
}
|
||||
return destroyTask.Wait(ctx)
|
||||
}
|
||||
|
||||
// AttachDisk attaches the disk at location - vmDiskPath from Datastore - dsObj to the Virtual Machine
|
||||
// Additionally the disk can be configured with SPBM policy if volumeOptions.StoragePolicyID is non-empty.
|
||||
func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, volumeOptions *VolumeOptions) (string, error) {
|
||||
// Check if the diskControllerType is valid
|
||||
if !CheckControllerSupported(volumeOptions.SCSIControllerType) {
|
||||
return "", fmt.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
|
||||
}
|
||||
vmDiskPathCopy := vmDiskPath
|
||||
vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath)
|
||||
attached, err := vm.IsDiskAttached(ctx, vmDiskPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err)
|
||||
return "", err
|
||||
}
|
||||
// If disk is already attached, return the disk UUID
|
||||
if attached {
|
||||
diskUUID, _ := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath)
|
||||
return diskUUID, nil
|
||||
}
|
||||
|
||||
if volumeOptions.StoragePolicyName != "" {
|
||||
pbmClient, err := NewPbmClient(ctx, vm.Client())
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while creating new pbmClient. err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err)
|
||||
return "", err
|
||||
}
|
||||
// If disk is not attached, create a disk spec for disk to be attached to the VM.
|
||||
disk, newSCSIController, err := vm.CreateDiskSpec(ctx, vmDiskPath, dsObj, volumeOptions)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while creating disk spec. err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return "", err
|
||||
}
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
|
||||
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
|
||||
Device: disk,
|
||||
Operation: types.VirtualDeviceConfigSpecOperationAdd,
|
||||
}
|
||||
// Configure the disk with the SPBM profile only if ProfileID is not empty.
|
||||
if volumeOptions.StoragePolicyID != "" {
|
||||
profileSpec := &types.VirtualMachineDefinedProfileSpec{
|
||||
ProfileId: volumeOptions.StoragePolicyID,
|
||||
}
|
||||
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, profileSpec)
|
||||
}
|
||||
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
|
||||
requestTime := time.Now()
|
||||
task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec)
|
||||
if err != nil {
|
||||
RecordvSphereMetric(APIAttachVolume, requestTime, err)
|
||||
glog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
|
||||
if newSCSIController != nil {
|
||||
vm.deleteController(ctx, newSCSIController, vmDevices)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
err = task.Wait(ctx)
|
||||
RecordvSphereMetric(APIAttachVolume, requestTime, err)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
|
||||
if newSCSIController != nil {
|
||||
vm.deleteController(ctx, newSCSIController, vmDevices)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Once disk is attached, get the disk UUID.
|
||||
diskUUID, err := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err)
|
||||
vm.DetachDisk(ctx, vmDiskPath)
|
||||
if newSCSIController != nil {
|
||||
vm.deleteController(ctx, newSCSIController, vmDevices)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return diskUUID, nil
|
||||
}
|
||||
|
||||
// DetachDisk detaches the disk specified by vmDiskPath
|
||||
func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) error {
|
||||
vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath)
|
||||
device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath)
|
||||
return err
|
||||
}
|
||||
if device == nil {
|
||||
glog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
|
||||
return fmt.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
|
||||
}
|
||||
// Detach disk from VM
|
||||
requestTime := time.Now()
|
||||
err = vm.RemoveDevice(ctx, true, device)
|
||||
RecordvSphereMetric(APIDetachVolume, requestTime, err)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetResourcePool gets the resource pool for VM.
|
||||
func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.ResourcePool, error) {
|
||||
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"resourcePool"})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
return object.NewResourcePool(vm.Client(), vmMoList[0].ResourcePool.Reference()), nil
|
||||
}
|
||||
|
||||
// IsActive checks if the VM is active.
|
||||
// Returns true if VM is in poweredOn state.
|
||||
func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) {
|
||||
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err)
|
||||
return false, err
|
||||
}
|
||||
if vmMoList[0].Summary.Runtime.PowerState == ActivePowerState {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetAllAccessibleDatastores gets the list of accessible Datastores for the given Virtual Machine
|
||||
func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) {
|
||||
host, err := vm.HostSystem(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
var hostSystemMo mo.HostSystem
|
||||
s := object.NewSearchIndex(vm.Client())
|
||||
err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err)
|
||||
return nil, err
|
||||
}
|
||||
var dsRefList []types.ManagedObjectReference
|
||||
for _, dsRef := range hostSystemMo.Datastore {
|
||||
dsRefList = append(dsRefList, dsRef)
|
||||
}
|
||||
|
||||
var dsMoList []mo.Datastore
|
||||
pc := property.DefaultCollector(vm.Client())
|
||||
properties := []string{DatastoreInfoProperty}
|
||||
err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get Datastore managed objects from datastore objects."+
|
||||
" dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(9).Infof("Result dsMoList: %+v", dsMoList)
|
||||
var dsObjList []*DatastoreInfo
|
||||
for _, dsMo := range dsMoList {
|
||||
dsObjList = append(dsObjList,
|
||||
&DatastoreInfo{
|
||||
&Datastore{object.NewDatastore(vm.Client(), dsMo.Reference()),
|
||||
vm.Datacenter},
|
||||
dsMo.Info.GetDatastoreInfo()})
|
||||
}
|
||||
return dsObjList, nil
|
||||
}
|
||||
|
||||
// CreateDiskSpec creates a disk spec for disk
|
||||
func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, dsObj *Datastore, volumeOptions *VolumeOptions) (*types.VirtualDisk, types.BaseVirtualDevice, error) {
|
||||
var newSCSIController types.BaseVirtualDevice
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve VM devices. err: %+v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
// find SCSI controller of particular type from VM devices
|
||||
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType)
|
||||
scsiController := getAvailableSCSIController(scsiControllersOfRequiredType)
|
||||
if scsiController == nil {
|
||||
newSCSIController, err = vm.createAndAttachSCSIController(ctx, volumeOptions.SCSIControllerType)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
// Get VM device list
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve VM devices. err: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
// verify scsi controller in virtual machine
|
||||
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType)
|
||||
scsiController = getAvailableSCSIController(scsiControllersOfRequiredType)
|
||||
if scsiController == nil {
|
||||
glog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
|
||||
// attempt clean up of scsi controller
|
||||
vm.deleteController(ctx, newSCSIController, vmDevices)
|
||||
return nil, nil, fmt.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
|
||||
}
|
||||
}
|
||||
disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath)
|
||||
unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
*disk.UnitNumber = unitNumber
|
||||
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
|
||||
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
|
||||
|
||||
if volumeOptions.CapacityKB != 0 {
|
||||
disk.CapacityInKB = int64(volumeOptions.CapacityKB)
|
||||
}
|
||||
if volumeOptions.DiskFormat != "" {
|
||||
var diskFormat string
|
||||
diskFormat = DiskFormatValidType[volumeOptions.DiskFormat]
|
||||
switch diskFormat {
|
||||
case ThinDiskType:
|
||||
backing.ThinProvisioned = types.NewBool(true)
|
||||
case EagerZeroedThickDiskType:
|
||||
backing.EagerlyScrub = types.NewBool(true)
|
||||
default:
|
||||
backing.ThinProvisioned = types.NewBool(false)
|
||||
}
|
||||
}
|
||||
return disk, newSCSIController, nil
|
||||
}
|
||||
|
||||
// GetVirtualDiskPath gets the first available virtual disk devicePath from the VM
|
||||
func (vm *VirtualMachine) GetVirtualDiskPath(ctx context.Context) (string, error) {
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return "", err
|
||||
}
|
||||
// filter vm devices to retrieve device for the given vmdk file identified by disk path
|
||||
for _, device := range vmDevices {
|
||||
if vmDevices.TypeName(device) == "VirtualDisk" {
|
||||
virtualDevice := device.GetVirtualDevice()
|
||||
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
return backing.FileName, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// createAndAttachSCSIController creates and attachs the SCSI controller to the VM.
|
||||
func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, diskControllerType string) (types.BaseVirtualDevice, error) {
|
||||
// Get VM device list
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
allSCSIControllers := getSCSIControllers(vmDevices)
|
||||
if len(allSCSIControllers) >= SCSIControllerLimit {
|
||||
// we reached the maximum number of controllers we can attach
|
||||
glog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
|
||||
return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
|
||||
}
|
||||
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
|
||||
hotAndRemove := true
|
||||
configNewSCSIController.HotAddRemove = &hotAndRemove
|
||||
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
|
||||
|
||||
// add the scsi controller to virtual machine
|
||||
err = vm.AddDevice(context.TODO(), newSCSIController)
|
||||
if err != nil {
|
||||
glog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
// attempt clean up of scsi controller
|
||||
vm.deleteController(ctx, newSCSIController, vmDevices)
|
||||
return nil, err
|
||||
}
|
||||
return newSCSIController, nil
|
||||
}
|
||||
|
||||
// getVirtualDeviceByPath gets the virtual device by path
|
||||
func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) {
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// filter vm devices to retrieve device for the given vmdk file identified by disk path
|
||||
for _, device := range vmDevices {
|
||||
if vmDevices.TypeName(device) == "VirtualDisk" {
|
||||
virtualDevice := device.GetVirtualDevice()
|
||||
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
|
||||
glog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
|
||||
return device, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
|
||||
fileExt := ".vmdk"
|
||||
diskPath = strings.TrimSuffix(diskPath, fileExt)
|
||||
volPath = strings.TrimSuffix(volPath, fileExt)
|
||||
return diskPath == volPath
|
||||
}
|
||||
|
||||
// deleteController removes latest added SCSI controller from VM.
|
||||
func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice types.BaseVirtualDevice, vmDevices object.VirtualDeviceList) error {
|
||||
controllerDeviceList := vmDevices.SelectByType(controllerDevice)
|
||||
if len(controllerDeviceList) < 1 {
|
||||
return ErrNoDevicesFound
|
||||
}
|
||||
device := controllerDeviceList[len(controllerDeviceList)-1]
|
||||
err := vm.RemoveDevice(ctx, true, device)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/vmoptions.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/vmoptions.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"github.com/vmware/govmomi/object"
|
||||
)
|
||||
|
||||
// VMOptions provides helper objects for provisioning volume with SPBM Policy
|
||||
type VMOptions struct {
|
||||
VMFolder *Folder
|
||||
VMResourcePool *object.ResourcePool
|
||||
}
|
107
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go
generated
vendored
Normal file
107
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// VolumeOptions specifies various options for a volume.
|
||||
type VolumeOptions struct {
|
||||
CapacityKB int
|
||||
Tags map[string]string
|
||||
Name string
|
||||
DiskFormat string
|
||||
Datastore string
|
||||
VSANStorageProfileData string
|
||||
StoragePolicyName string
|
||||
StoragePolicyID string
|
||||
SCSIControllerType string
|
||||
}
|
||||
|
||||
var (
|
||||
// DiskFormatValidType specifies the valid disk formats
|
||||
DiskFormatValidType = map[string]string{
|
||||
ThinDiskType: ThinDiskType,
|
||||
strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType,
|
||||
strings.ToLower(ZeroedThickDiskType): PreallocatedDiskType,
|
||||
}
|
||||
// SCSIControllerValidType specifies the supported SCSI controllers
|
||||
SCSIControllerValidType = []string{LSILogicControllerType, LSILogicSASControllerType, PVSCSIControllerType}
|
||||
)
|
||||
|
||||
// DiskformatValidOptions generates Valid Options for Diskformat
|
||||
func DiskformatValidOptions() string {
|
||||
validopts := ""
|
||||
for diskformat := range DiskFormatValidType {
|
||||
validopts += diskformat + ", "
|
||||
}
|
||||
validopts = strings.TrimSuffix(validopts, ", ")
|
||||
return validopts
|
||||
}
|
||||
|
||||
// CheckDiskFormatSupported checks if the diskFormat is valid
|
||||
func CheckDiskFormatSupported(diskFormat string) bool {
|
||||
if DiskFormatValidType[diskFormat] == "" {
|
||||
glog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SCSIControllerTypeValidOptions generates valid options for SCSIControllerType
|
||||
func SCSIControllerTypeValidOptions() string {
|
||||
validopts := ""
|
||||
for _, controllerType := range SCSIControllerValidType {
|
||||
validopts += (controllerType + ", ")
|
||||
}
|
||||
validopts = strings.TrimSuffix(validopts, ", ")
|
||||
return validopts
|
||||
}
|
||||
|
||||
// CheckControllerSupported checks if the given controller type is valid
|
||||
func CheckControllerSupported(ctrlType string) bool {
|
||||
for _, c := range SCSIControllerValidType {
|
||||
if ctrlType == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
glog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyVolumeOptions checks if volumeOptions.SCIControllerType is valid controller type
|
||||
func (volumeOptions VolumeOptions) VerifyVolumeOptions() bool {
|
||||
// Validate only if SCSIControllerType is set by user.
|
||||
// Default value is set later in virtualDiskManager.Create and vmDiskManager.Create
|
||||
if volumeOptions.SCSIControllerType != "" {
|
||||
isValid := CheckControllerSupported(volumeOptions.SCSIControllerType)
|
||||
if !isValid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// ThinDiskType is the default, so skip the validation.
|
||||
if volumeOptions.DiskFormat != ThinDiskType {
|
||||
isValid := CheckDiskFormatSupported(volumeOptions.DiskFormat)
|
||||
if !isValid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
133
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/vsphere_metrics.go
generated
vendored
Normal file
133
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/vsphere_metrics.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vclib
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Cloud Provider API constants
|
||||
const (
|
||||
APICreateVolume = "CreateVolume"
|
||||
APIDeleteVolume = "DeleteVolume"
|
||||
APIAttachVolume = "AttachVolume"
|
||||
APIDetachVolume = "DetachVolume"
|
||||
)
|
||||
|
||||
// Cloud Provider Operation constants
|
||||
const (
|
||||
OperationDeleteVolume = "DeleteVolumeOperation"
|
||||
OperationAttachVolume = "AttachVolumeOperation"
|
||||
OperationDetachVolume = "DetachVolumeOperation"
|
||||
OperationDiskIsAttached = "DiskIsAttachedOperation"
|
||||
OperationDisksAreAttached = "DisksAreAttachedOperation"
|
||||
OperationCreateVolume = "CreateVolumeOperation"
|
||||
OperationCreateVolumeWithPolicy = "CreateVolumeWithPolicyOperation"
|
||||
OperationCreateVolumeWithRawVSANPolicy = "CreateVolumeWithRawVSANPolicyOperation"
|
||||
)
|
||||
|
||||
// vsphereAPIMetric is for recording latency of Single API Call.
|
||||
var vsphereAPIMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_vsphere_api_request_duration_seconds",
|
||||
Help: "Latency of vsphere api call",
|
||||
},
|
||||
[]string{"request"},
|
||||
)
|
||||
|
||||
var vsphereAPIErrorMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_vsphere_api_request_errors",
|
||||
Help: "vsphere Api errors",
|
||||
},
|
||||
[]string{"request"},
|
||||
)
|
||||
|
||||
// vsphereOperationMetric is for recording latency of vSphere Operation which invokes multiple APIs to get the task done.
|
||||
var vsphereOperationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_vsphere_operation_duration_seconds",
|
||||
Help: "Latency of vsphere operation call",
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
|
||||
var vsphereOperationErrorMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_vsphere_operation_errors",
|
||||
Help: "vsphere operation errors",
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
|
||||
// RegisterMetrics registers all the API and Operation metrics
|
||||
func RegisterMetrics() {
|
||||
prometheus.MustRegister(vsphereAPIMetric)
|
||||
prometheus.MustRegister(vsphereAPIErrorMetric)
|
||||
prometheus.MustRegister(vsphereOperationMetric)
|
||||
prometheus.MustRegister(vsphereOperationErrorMetric)
|
||||
}
|
||||
|
||||
// RecordvSphereMetric records the vSphere API and Operation metrics
|
||||
func RecordvSphereMetric(actionName string, requestTime time.Time, err error) {
|
||||
switch actionName {
|
||||
case APICreateVolume, APIDeleteVolume, APIAttachVolume, APIDetachVolume:
|
||||
recordvSphereAPIMetric(actionName, requestTime, err)
|
||||
default:
|
||||
recordvSphereOperationMetric(actionName, requestTime, err)
|
||||
}
|
||||
}
|
||||
|
||||
func recordvSphereAPIMetric(actionName string, requestTime time.Time, err error) {
|
||||
if err != nil {
|
||||
vsphereAPIErrorMetric.With(prometheus.Labels{"request": actionName}).Inc()
|
||||
} else {
|
||||
vsphereAPIMetric.With(prometheus.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime))
|
||||
}
|
||||
}
|
||||
|
||||
func recordvSphereOperationMetric(actionName string, requestTime time.Time, err error) {
|
||||
if err != nil {
|
||||
vsphereOperationErrorMetric.With(prometheus.Labels{"operation": actionName}).Inc()
|
||||
} else {
|
||||
vsphereOperationMetric.With(prometheus.Labels{"operation": actionName}).Observe(calculateTimeTaken(requestTime))
|
||||
}
|
||||
}
|
||||
|
||||
// RecordCreateVolumeMetric records the Create Volume metric
|
||||
func RecordCreateVolumeMetric(volumeOptions *VolumeOptions, requestTime time.Time, err error) {
|
||||
var actionName string
|
||||
if volumeOptions.StoragePolicyName != "" {
|
||||
actionName = OperationCreateVolumeWithPolicy
|
||||
} else if volumeOptions.VSANStorageProfileData != "" {
|
||||
actionName = OperationCreateVolumeWithRawVSANPolicy
|
||||
} else {
|
||||
actionName = OperationCreateVolume
|
||||
}
|
||||
RecordvSphereMetric(actionName, requestTime, err)
|
||||
}
|
||||
|
||||
func calculateTimeTaken(requestBeginTime time.Time) (timeTaken float64) {
|
||||
if !requestBeginTime.IsZero() {
|
||||
timeTaken = time.Since(requestBeginTime).Seconds()
|
||||
} else {
|
||||
timeTaken = 0
|
||||
}
|
||||
return timeTaken
|
||||
}
|
1174
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go
generated
vendored
Normal file
1174
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
257
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go
generated
vendored
Normal file
257
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||
)
|
||||
|
||||
func configFromEnv() (cfg VSphereConfig, ok bool) {
|
||||
var InsecureFlag bool
|
||||
var err error
|
||||
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
|
||||
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
|
||||
cfg.Global.User = os.Getenv("VSPHERE_USER")
|
||||
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
|
||||
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
|
||||
cfg.Network.PublicNetwork = os.Getenv("VSPHERE_PUBLIC_NETWORK")
|
||||
cfg.Global.DefaultDatastore = os.Getenv("VSPHERE_DATASTORE")
|
||||
cfg.Disk.SCSIControllerType = os.Getenv("VSPHERE_SCSICONTROLLER_TYPE")
|
||||
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
||||
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
|
||||
if os.Getenv("VSPHERE_INSECURE") != "" {
|
||||
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
|
||||
} else {
|
||||
InsecureFlag = false
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cfg.Global.InsecureFlag = InsecureFlag
|
||||
|
||||
ok = (cfg.Global.VCenterIP != "" &&
|
||||
cfg.Global.User != "")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
_, err := readConfig(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when no config is provided: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
server = 0.0.0.0
|
||||
port = 443
|
||||
user = user
|
||||
password = password
|
||||
insecure-flag = true
|
||||
datacenter = us-west
|
||||
vm-uuid = 1234
|
||||
vm-name = vmname
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Global.VCenterIP != "0.0.0.0" {
|
||||
t.Errorf("incorrect vcenter ip: %s", cfg.Global.VCenterIP)
|
||||
}
|
||||
|
||||
if cfg.Global.Datacenter != "us-west" {
|
||||
t.Errorf("incorrect datacenter: %s", cfg.Global.Datacenter)
|
||||
}
|
||||
|
||||
if cfg.Global.VMUUID != "1234" {
|
||||
t.Errorf("incorrect vm-uuid: %s", cfg.Global.VMUUID)
|
||||
}
|
||||
|
||||
if cfg.Global.VMName != "vmname" {
|
||||
t.Errorf("incorrect vm-name: %s", cfg.Global.VMName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVSphere(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newControllerNode(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVSphereLogin(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
// Create vSphere configuration object
|
||||
vs, err := newControllerNode(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create vSphere client
|
||||
var vcInstance *VSphereInstance
|
||||
if vcInstance, ok = vs.vsphereInstanceMap[cfg.Global.VCenterIP]; !ok {
|
||||
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
|
||||
}
|
||||
|
||||
err = vcInstance.conn.Connect(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to connect to vSphere: %s", err)
|
||||
}
|
||||
defer vcInstance.conn.GoVmomiClient.Logout(ctx)
|
||||
}
|
||||
|
||||
func TestZones(t *testing.T) {
|
||||
cfg := VSphereConfig{}
|
||||
cfg.Global.Datacenter = "myDatacenter"
|
||||
|
||||
// Create vSphere configuration object
|
||||
vs := VSphere{
|
||||
cfg: &cfg,
|
||||
}
|
||||
|
||||
_, ok := vs.Zones()
|
||||
if ok {
|
||||
t.Fatalf("Zones() returned true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstances(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
vs, err := newControllerNode(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
i, ok := vs.Instances()
|
||||
if !ok {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
nodeName, err := vs.CurrentNodeName("")
|
||||
if err != nil {
|
||||
t.Fatalf("CurrentNodeName() failed: %s", err)
|
||||
}
|
||||
|
||||
externalID, err := i.ExternalID(nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.ExternalID(%s) failed: %s", nodeName, err)
|
||||
}
|
||||
t.Logf("Found ExternalID(%s) = %s\n", nodeName, externalID)
|
||||
|
||||
nonExistingVM := types.NodeName(rand.String(15))
|
||||
externalID, err = i.ExternalID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Logf("VM %s was not found as expected\n", nonExistingVM)
|
||||
} else if err == nil {
|
||||
t.Fatalf("Instances.ExternalID did not fail as expected, VM %s was found", nonExistingVM)
|
||||
} else {
|
||||
t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
instanceID, err := i.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.InstanceID(%s) failed: %s", nodeName, err)
|
||||
}
|
||||
t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceID)
|
||||
|
||||
instanceID, err = i.InstanceID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Logf("VM %s was not found as expected\n", nonExistingVM)
|
||||
} else if err == nil {
|
||||
t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
|
||||
} else {
|
||||
t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
addrs, err := i.NodeAddresses(nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", nodeName, err)
|
||||
}
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", nodeName, addrs)
|
||||
}
|
||||
|
||||
func TestVolumes(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
vs, err := newControllerNode(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
nodeName, err := vs.CurrentNodeName("")
|
||||
if err != nil {
|
||||
t.Fatalf("CurrentNodeName() failed: %s", err)
|
||||
}
|
||||
|
||||
volumeOptions := &vclib.VolumeOptions{
|
||||
CapacityKB: 1 * 1024 * 1024,
|
||||
Tags: nil,
|
||||
Name: "kubernetes-test-volume-" + rand.String(10),
|
||||
DiskFormat: "thin"}
|
||||
|
||||
volPath, err := vs.CreateVolume(volumeOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create a new VMDK volume: %v", err)
|
||||
}
|
||||
|
||||
_, err = vs.AttachDisk(volPath, "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err)
|
||||
}
|
||||
|
||||
err = vs.DetachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, nodeName, err)
|
||||
}
|
||||
|
||||
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
|
||||
// err = vs.DeleteVolume(volPath)
|
||||
// if err != nil {
|
||||
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
|
||||
// }
|
||||
}
|
539
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go
generated
vendored
Normal file
539
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go
generated
vendored
Normal file
@ -0,0 +1,539 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/govmomi"
|
||||
"github.com/vmware/govmomi/vim25"
|
||||
|
||||
"fmt"
|
||||
|
||||
"path/filepath"
|
||||
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"k8s.io/api/core/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
|
||||
)
|
||||
|
||||
const (
|
||||
DatastoreProperty = "datastore"
|
||||
DatastoreInfoProperty = "info"
|
||||
Folder = "Folder"
|
||||
VirtualMachine = "VirtualMachine"
|
||||
DummyDiskName = "kube-dummyDisk.vmdk"
|
||||
)
|
||||
|
||||
// GetVSphere reads vSphere configuration from system environment and construct vSphere object
|
||||
func GetVSphere() (*VSphere, error) {
|
||||
cfg := getVSphereConfig()
|
||||
vSphereConn := getVSphereConn(cfg)
|
||||
client, err := GetgovmomiClient(vSphereConn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vSphereConn.GoVmomiClient = client
|
||||
vsphereIns := &VSphereInstance{
|
||||
conn: vSphereConn,
|
||||
cfg: &VirtualCenterConfig{
|
||||
User: cfg.Global.User,
|
||||
Password: cfg.Global.Password,
|
||||
VCenterPort: cfg.Global.VCenterPort,
|
||||
Datacenters: cfg.Global.Datacenters,
|
||||
RoundTripperCount: cfg.Global.RoundTripperCount,
|
||||
},
|
||||
}
|
||||
vsphereInsMap := make(map[string]*VSphereInstance)
|
||||
vsphereInsMap[cfg.Global.VCenterIP] = vsphereIns
|
||||
// TODO: Initialize nodeManager and set it in VSphere.
|
||||
vs := &VSphere{
|
||||
vsphereInstanceMap: vsphereInsMap,
|
||||
hostName: "",
|
||||
cfg: cfg,
|
||||
nodeManager: &NodeManager{
|
||||
vsphereInstanceMap: vsphereInsMap,
|
||||
nodeInfoMap: make(map[string]*NodeInfo),
|
||||
registeredNodes: make(map[string]*v1.Node),
|
||||
},
|
||||
}
|
||||
runtime.SetFinalizer(vs, logout)
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func getVSphereConfig() *VSphereConfig {
|
||||
var cfg VSphereConfig
|
||||
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
|
||||
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
|
||||
cfg.Global.User = os.Getenv("VSPHERE_USER")
|
||||
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
|
||||
cfg.Global.Datacenters = os.Getenv("VSPHERE_DATACENTER")
|
||||
cfg.Global.DefaultDatastore = os.Getenv("VSPHERE_DATASTORE")
|
||||
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
||||
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
|
||||
cfg.Global.InsecureFlag = false
|
||||
if strings.ToLower(os.Getenv("VSPHERE_INSECURE")) == "true" {
|
||||
cfg.Global.InsecureFlag = true
|
||||
}
|
||||
cfg.Workspace.VCenterIP = cfg.Global.VCenterIP
|
||||
cfg.Workspace.Datacenter = cfg.Global.Datacenters
|
||||
cfg.Workspace.DefaultDatastore = cfg.Global.DefaultDatastore
|
||||
cfg.Workspace.Folder = cfg.Global.WorkingDir
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection {
|
||||
vSphereConn := &vclib.VSphereConnection{
|
||||
Username: cfg.Global.User,
|
||||
Password: cfg.Global.Password,
|
||||
Hostname: cfg.Global.VCenterIP,
|
||||
Insecure: cfg.Global.InsecureFlag,
|
||||
RoundTripperCount: cfg.Global.RoundTripperCount,
|
||||
Port: cfg.Global.VCenterPort,
|
||||
}
|
||||
return vSphereConn
|
||||
}
|
||||
|
||||
// GetgovmomiClient gets the goVMOMI client for the vsphere connection object
|
||||
func GetgovmomiClient(conn *vclib.VSphereConnection) (*govmomi.Client, error) {
|
||||
if conn == nil {
|
||||
cfg := getVSphereConfig()
|
||||
conn = getVSphereConn(cfg)
|
||||
}
|
||||
client, err := conn.NewClient(context.TODO())
|
||||
return client, err
|
||||
}
|
||||
|
||||
// getvmUUID gets the BIOS UUID via the sys interface. This UUID is known by vsphere
|
||||
func getvmUUID() (string, error) {
|
||||
id, err := ioutil.ReadFile(UUIDPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error retrieving vm uuid: %s", err)
|
||||
}
|
||||
uuidFromFile := string(id[:])
|
||||
//strip leading and trailing white space and new line char
|
||||
uuid := strings.TrimSpace(uuidFromFile)
|
||||
// check the uuid starts with "VMware-"
|
||||
if !strings.HasPrefix(uuid, UUIDPrefix) {
|
||||
return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile)
|
||||
}
|
||||
// Strip the prefix and while spaces and -
|
||||
uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1)
|
||||
uuid = strings.Replace(uuid, "-", "", -1)
|
||||
if len(uuid) != 32 {
|
||||
return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile)
|
||||
}
|
||||
// need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f"
|
||||
uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// Returns the accessible datastores for the given node VM.
|
||||
func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
|
||||
accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx)
|
||||
if err != nil {
|
||||
// Check if the node VM is not found which indicates that the node info in the node manager is stale.
|
||||
// If so, rediscover the node and retry.
|
||||
if vclib.IsManagedObjectNotFoundError(err) {
|
||||
glog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName)
|
||||
err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName))
|
||||
if err == nil {
|
||||
glog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName)
|
||||
nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName))
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return accessibleDatastores, nil
|
||||
}
|
||||
|
||||
// Get all datastores accessible for the virtual machine object.
|
||||
func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
|
||||
nodeVmDetails, err := nodeManager.GetNodeDetails()
|
||||
if err != nil {
|
||||
glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(nodeVmDetails) == 0 {
|
||||
msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails)
|
||||
glog.Error(msg)
|
||||
return nil, fmt.Errorf(msg)
|
||||
}
|
||||
var sharedDatastores []*vclib.DatastoreInfo
|
||||
for _, nodeVmDetail := range nodeVmDetails {
|
||||
glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName)
|
||||
accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager)
|
||||
if err != nil {
|
||||
if err == vclib.ErrNoVMFound {
|
||||
glog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(sharedDatastores) == 0 {
|
||||
sharedDatastores = accessibleDatastores
|
||||
} else {
|
||||
sharedDatastores = intersect(sharedDatastores, accessibleDatastores)
|
||||
if len(sharedDatastores) == 0 {
|
||||
return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster for nodeVmDetails: %+v", nodeVmDetails)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
|
||||
sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores)
|
||||
return sharedDatastores, nil
|
||||
}
|
||||
|
||||
func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo {
|
||||
glog.V(9).Infof("list1: %+v", list1)
|
||||
glog.V(9).Infof("list2: %+v", list2)
|
||||
var sharedDs []*vclib.DatastoreInfo
|
||||
for _, val1 := range list1 {
|
||||
// Check if val1 is found in list2
|
||||
for _, val2 := range list2 {
|
||||
// Intersection is performed based on the datastoreUrl as this uniquely identifies the datastore.
|
||||
if val1.Info.Url == val2.Info.Url {
|
||||
sharedDs = append(sharedDs, val1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return sharedDs
|
||||
}
|
||||
|
||||
// getMostFreeDatastore gets the best fit compatible datastore by free space.
|
||||
func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsInfoList []*vclib.DatastoreInfo) (string, error) {
|
||||
var curMax int64
|
||||
curMax = -1
|
||||
var index int
|
||||
for i, dsInfo := range dsInfoList {
|
||||
dsFreeSpace := dsInfo.Info.GetDatastoreInfo().FreeSpace
|
||||
if dsFreeSpace > curMax {
|
||||
curMax = dsFreeSpace
|
||||
index = i
|
||||
}
|
||||
}
|
||||
return dsInfoList[index].Info.GetDatastoreInfo().Name, nil
|
||||
}
|
||||
|
||||
// Returns the datastores in the given datacenter by performing lookup based on datastore URL.
|
||||
func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, sharedDsInfos []*vclib.DatastoreInfo) ([]*vclib.DatastoreInfo, error) {
|
||||
var datastores []*vclib.DatastoreInfo
|
||||
allDsInfoMap, err := dc.GetAllDatastores(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, sharedDsInfo := range sharedDsInfos {
|
||||
dsInfo, ok := allDsInfoMap[sharedDsInfo.Info.Url]
|
||||
if ok {
|
||||
datastores = append(datastores, dsInfo)
|
||||
} else {
|
||||
glog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url)
|
||||
}
|
||||
}
|
||||
glog.V(9).Infof("Datastore from endpoint VC: %+v", datastores)
|
||||
return datastores, nil
|
||||
}
|
||||
|
||||
func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, nodeManager *NodeManager) (string, error) {
|
||||
pbmClient, err := vclib.NewPbmClient(ctx, dc.Client())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
|
||||
return "", err
|
||||
}
|
||||
sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get shared datastores. err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
if len(sharedDs) == 0 {
|
||||
msg := "No shared datastores found in the endpoint virtual center"
|
||||
glog.Errorf(msg)
|
||||
return "", errors.New(msg)
|
||||
}
|
||||
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
|
||||
sharedDs, storagePolicyID, err)
|
||||
return "", err
|
||||
}
|
||||
glog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
|
||||
datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Infof("Most free datastore : %+s", datastore)
|
||||
return datastore, err
|
||||
}
|
||||
|
||||
func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resourcePoolPath string) (*vclib.VMOptions, error) {
|
||||
var vmOptions vclib.VMOptions
|
||||
resourcePool, err := dc.GetResourcePool(ctx, resourcePoolPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool)
|
||||
folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vmOptions.VMFolder = folder
|
||||
vmOptions.VMResourcePool = resourcePool
|
||||
return &vmOptions, nil
|
||||
}
|
||||
|
||||
// A background routine which will be responsible for deleting stale dummy VM's.
|
||||
func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
for {
|
||||
time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute)
|
||||
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err)
|
||||
continue
|
||||
}
|
||||
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err)
|
||||
continue
|
||||
}
|
||||
// Get the folder reference for global working directory where the dummy VM needs to be created.
|
||||
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err)
|
||||
continue
|
||||
}
|
||||
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
|
||||
defer cleanUpDummyVMLock.Lock()
|
||||
err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get canonical volume path for volume Path.
|
||||
// Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
|
||||
// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
|
||||
func getcanonicalVolumePath(ctx context.Context, dc *vclib.Datacenter, volumePath string) (string, error) {
|
||||
var folderID string
|
||||
var folderExists bool
|
||||
canonicalVolumePath := volumePath
|
||||
dsPathObj, err := vclib.GetDatastorePathObjFromVMDiskPath(volumePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
|
||||
if len(dsPath) <= 1 {
|
||||
return canonicalVolumePath, nil
|
||||
}
|
||||
datastore := dsPathObj.Datastore
|
||||
dsFolder := dsPath[0]
|
||||
folderNameIDMap, datastoreExists := datastoreFolderIDMap[datastore]
|
||||
if datastoreExists {
|
||||
folderID, folderExists = folderNameIDMap[dsFolder]
|
||||
}
|
||||
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
|
||||
if !datastoreExists || !folderExists {
|
||||
if !vclib.IsValidUUID(dsFolder) {
|
||||
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
|
||||
// Querying a non-existent dummy disk on the datastore folder.
|
||||
// It would fail and return an folder ID in the error message.
|
||||
_, err := dc.GetVirtualDiskPage83Data(ctx, dummyDiskVolPath)
|
||||
if err != nil {
|
||||
re := regexp.MustCompile("File (.*?) was not found")
|
||||
match := re.FindStringSubmatch(err.Error())
|
||||
canonicalVolumePath = match[1]
|
||||
}
|
||||
}
|
||||
diskPath := vclib.GetPathFromVMDiskPath(canonicalVolumePath)
|
||||
if diskPath == "" {
|
||||
return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
|
||||
}
|
||||
folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
|
||||
setdatastoreFolderIDMap(datastoreFolderIDMap, datastore, dsFolder, folderID)
|
||||
}
|
||||
canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
|
||||
return canonicalVolumePath, nil
|
||||
}
|
||||
|
||||
func setdatastoreFolderIDMap(
|
||||
datastoreFolderIDMap map[string]map[string]string,
|
||||
datastore string,
|
||||
folderName string,
|
||||
folderID string) {
|
||||
folderNameIDMap := datastoreFolderIDMap[datastore]
|
||||
if folderNameIDMap == nil {
|
||||
folderNameIDMap = make(map[string]string)
|
||||
datastoreFolderIDMap[datastore] = folderNameIDMap
|
||||
}
|
||||
folderNameIDMap[folderName] = folderID
|
||||
}
|
||||
|
||||
func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPath string) (string, error) {
|
||||
volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath)
|
||||
// Get the canonical volume path for volPath.
|
||||
canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
|
||||
return "", err
|
||||
}
|
||||
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
|
||||
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
|
||||
canonicalVolumePath += ".vmdk"
|
||||
}
|
||||
return canonicalVolumePath, nil
|
||||
}
|
||||
|
||||
// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
|
||||
func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName][]string, error) {
|
||||
vmVolumes := make(map[k8stypes.NodeName][]string)
|
||||
for nodeName, volPaths := range nodeVolumes {
|
||||
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, volPath := range volPaths {
|
||||
deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
|
||||
return nil, err
|
||||
}
|
||||
volPaths[i] = deviceVolPath
|
||||
}
|
||||
vmVolumes[nodeName] = volPaths
|
||||
}
|
||||
return vmVolumes, nil
|
||||
}
|
||||
|
||||
// checkDiskAttached verifies volumes are attached to the VMs which are in same vCenter and Datacenter
|
||||
// Returns nodes if exist any for which VM is not found in that vCenter and Datacenter
|
||||
func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeName, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) {
|
||||
var nodesToRetry []k8stypes.NodeName
|
||||
var vmList []*vclib.VirtualMachine
|
||||
var nodeInfo NodeInfo
|
||||
var err error
|
||||
|
||||
for _, nodeName := range nodes {
|
||||
nodeInfo, err = vs.nodeManager.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return nodesToRetry, err
|
||||
}
|
||||
vmList = append(vmList, nodeInfo.vm)
|
||||
}
|
||||
|
||||
// Making sure session is valid
|
||||
_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx)
|
||||
if err != nil {
|
||||
return nodesToRetry, err
|
||||
}
|
||||
|
||||
// If any of the nodes are not present property collector query will fail for entire operation
|
||||
vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"})
|
||||
if err != nil {
|
||||
if vclib.IsManagedObjectNotFoundError(err) && !retry {
|
||||
glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList)
|
||||
// Property Collector Query failed
|
||||
// VerifyVolumePaths per VM
|
||||
for _, nodeName := range nodes {
|
||||
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return nodesToRetry, err
|
||||
}
|
||||
devices, err := nodeInfo.vm.VirtualMachine.Device(ctx)
|
||||
if err != nil {
|
||||
if vclib.IsManagedObjectNotFoundError(err) {
|
||||
glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm)
|
||||
nodesToRetry = append(nodesToRetry, nodeName)
|
||||
continue
|
||||
}
|
||||
return nodesToRetry, err
|
||||
}
|
||||
glog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
|
||||
vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached)
|
||||
}
|
||||
}
|
||||
return nodesToRetry, err
|
||||
}
|
||||
|
||||
vmMoMap := make(map[string]mo.VirtualMachine)
|
||||
for _, vmMo := range vmMoList {
|
||||
if vmMo.Config == nil {
|
||||
glog.Errorf("Config is not available for VM: %q", vmMo.Name)
|
||||
continue
|
||||
}
|
||||
glog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid))
|
||||
vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo
|
||||
}
|
||||
|
||||
glog.V(9).Infof("vmMoMap: +%v", vmMoMap)
|
||||
|
||||
for _, nodeName := range nodes {
|
||||
node, err := vs.nodeManager.GetNode(nodeName)
|
||||
if err != nil {
|
||||
return nodesToRetry, err
|
||||
}
|
||||
glog.V(9).Infof("Verifying volume for nodeName: %q with nodeuuid: %s", nodeName, node.Status.NodeInfo.SystemUUID, vmMoMap)
|
||||
vclib.VerifyVolumePathsForVM(vmMoMap[strings.ToLower(node.Status.NodeInfo.SystemUUID)], nodeVolumes[nodeName], convertToString(nodeName), attached)
|
||||
}
|
||||
return nodesToRetry, nil
|
||||
}
|
Reference in New Issue
Block a user