Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -17,21 +17,22 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/tags:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -43,20 +44,26 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib/fixtures:go_default_library",
"//pkg/controller:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/vmware/govmomi/lookup/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/property:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator/vpx:go_default_library",
"//vendor/github.com/vmware/govmomi/sts/simulator:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/tags:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
],
)

View File

@ -3,5 +3,6 @@ approvers:
- baludontu
- divyenpatel
- imkin
- kerneltime
- luomiao
- frapposelli
- dougm
- SandeepPissay

View File

@ -19,10 +19,10 @@ package vsphere
import (
"errors"
"fmt"
"github.com/golang/glog"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/listers/core/v1"
"k8s.io/klog"
"net/http"
"strings"
"sync"
@ -71,12 +71,12 @@ func (secretCredentialManager *SecretCredentialManager) GetCredential(server str
return nil, err
}
// Handle secrets deletion by finding credentials from cache
glog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace)
klog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace)
}
credential, found := secretCredentialManager.Cache.GetCredential(server)
if !found {
glog.Errorf("credentials not found for server %q", server)
klog.Errorf("credentials not found for server %q", server)
return nil, ErrCredentialsNotFound
}
return &credential, nil
@ -88,13 +88,13 @@ func (secretCredentialManager *SecretCredentialManager) updateCredentialsMap() e
}
secret, err := secretCredentialManager.SecretLister.Secrets(secretCredentialManager.SecretNamespace).Get(secretCredentialManager.SecretName)
if err != nil {
glog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err)
klog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err)
return err
}
cacheSecret := secretCredentialManager.Cache.GetSecret()
if cacheSecret != nil &&
cacheSecret.GetResourceVersion() == secret.GetResourceVersion() {
glog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion())
klog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion())
return nil
}
secretCredentialManager.Cache.UpdateSecret(secret)
@ -150,13 +150,13 @@ func parseConfig(data map[string][]byte, config map[string]*Credential) error {
}
config[vcServer].User = string(credentialValue)
} else {
glog.Errorf("Unknown secret key %s", credentialKey)
klog.Errorf("Unknown secret key %s", credentialKey)
return ErrUnknownSecretKey
}
}
for vcServer, credential := range config {
if credential.User == "" || credential.Password == "" {
glog.Errorf("Username/Password is missing for server %s", vcServer)
klog.Errorf("Username/Password is missing for server %s", vcServer)
return ErrCredentialMissing
}
}

View File

@ -22,9 +22,9 @@ import (
"strings"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
@ -81,11 +81,11 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
queueChannel = make(chan *VmSearch, QUEUE_SIZE)
nodeUUID, err := GetNodeUUID(node)
if err != nil {
glog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
return err
}
glog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID)
klog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID)
vmFound := false
globalErr = nil
@ -124,7 +124,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
err := nm.vcConnect(ctx, vsi)
if err != nil {
glog.V(4).Info("Discovering node error vc:", err)
klog.V(4).Info("Discovering node error vc:", err)
setGlobalErr(err)
continue
}
@ -132,7 +132,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
if vsi.cfg.Datacenters == "" {
datacenterObjs, err = vclib.GetAllDatacenter(ctx, vsi.conn)
if err != nil {
glog.V(4).Info("Discovering node error dc:", err)
klog.V(4).Info("Discovering node error dc:", err)
setGlobalErr(err)
continue
}
@ -145,7 +145,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
}
datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc)
if err != nil {
glog.V(4).Info("Discovering node error dc:", err)
klog.V(4).Info("Discovering node error dc:", err)
setGlobalErr(err)
continue
}
@ -159,7 +159,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
break
}
glog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name())
klog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name())
queueChannel <- &VmSearch{
vc: vc,
datacenter: datacenterObj,
@ -176,18 +176,18 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
defer cancel()
vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID)
if err != nil {
glog.V(4).Infof("Error %q while looking for vm=%+v in vc=%s and datacenter=%s",
err, node.Name, vm, res.vc, res.datacenter.Name())
klog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v",
vm, res.vc, res.datacenter.Name(), err)
if err != vclib.ErrNoVMFound {
setGlobalErr(err)
} else {
glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s",
node.Name, res.vc, res.datacenter.Name(), err)
klog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s",
node.Name, res.vc, res.datacenter.Name())
}
continue
}
if vm != nil {
glog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s",
klog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s",
node.Name, vm, res.vc, res.datacenter.Name())
nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc, vmUUID: nodeUUID}
@ -210,7 +210,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
return *globalErr
}
glog.V(4).Infof("Discovery Node: %q vm not found", node.Name)
klog.V(4).Infof("Discovery Node: %q vm not found", node.Name)
return vclib.ErrNoVMFound
}
@ -276,19 +276,19 @@ func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error)
var err error
if nodeInfo == nil {
// Rediscover node if no NodeInfo found.
glog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName))
klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName))
err = nm.RediscoverNode(nodeName)
if err != nil {
glog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName))
klog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName))
return NodeInfo{}, err
}
nodeInfo = getNodeInfo(nodeName)
} else {
// Renew the found NodeInfo to avoid stale vSphere connection.
glog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName))
klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName))
nodeInfo, err = nm.renewNodeInfo(nodeInfo, true)
if err != nil {
glog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName))
klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName))
return NodeInfo{}, err
}
nm.addNodeInfo(convertToString(nodeName), nodeInfo)
@ -309,7 +309,7 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
if err != nil {
return nil, err
}
glog.V(4).Infof("Updated NodeInfo %q for node %q.", nodeInfo, nodeName)
klog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName)
nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID})
}
return nodeDetails, nil
@ -324,7 +324,7 @@ func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) {
func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereInstance, error) {
nodeInfo, err := nm.GetNodeInfo(nodeName)
if err != nil {
glog.V(4).Infof("node info for node %q not found", convertToString(nodeName))
klog.V(4).Infof("node info for node %q not found", convertToString(nodeName))
return VSphereInstance{}, err
}
vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer]
@ -351,7 +351,12 @@ func (nm *NodeManager) renewNodeInfo(nodeInfo *NodeInfo, reconnect bool) (*NodeI
}
}
vm := nodeInfo.vm.RenewVM(vsphereInstance.conn.Client)
return &NodeInfo{vm: &vm, dataCenter: vm.Datacenter, vcServer: nodeInfo.vcServer}, nil
return &NodeInfo{
vm: &vm,
dataCenter: vm.Datacenter,
vcServer: nodeInfo.vcServer,
vmUUID: nodeInfo.vmUUID,
}, nil
}
func (nodeInfo *NodeInfo) VM() *vclib.VirtualMachine {
@ -374,17 +379,16 @@ func (nm *NodeManager) vcConnect(ctx context.Context, vsphereInstance *VSphereIn
credentialManager := nm.CredentialManager()
if !vclib.IsInvalidCredentialsError(err) || credentialManager == nil {
glog.Errorf("Cannot connect to vCenter with err: %v", err)
klog.Errorf("Cannot connect to vCenter with err: %v", err)
return err
}
glog.V(4).Infof("Invalid credentials. Cannot connect to server %q. "+
"Fetching credentials from secrets.", vsphereInstance.conn.Hostname)
klog.V(4).Infof("Invalid credentials. Cannot connect to server %q. Fetching credentials from secrets.", vsphereInstance.conn.Hostname)
// Get latest credentials from SecretCredentialManager
credentials, err := credentialManager.GetCredential(vsphereInstance.conn.Hostname)
if err != nil {
glog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err)
klog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err)
return err
}
vsphereInstance.conn.UpdateCredentials(credentials.User, credentials.Password)
@ -408,19 +412,19 @@ func (nm *NodeManager) GetNodeInfoWithNodeObject(node *v1.Node) (NodeInfo, error
var err error
if nodeInfo == nil {
// Rediscover node if no NodeInfo found.
glog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", nodeName)
klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", nodeName)
err = nm.DiscoverNode(node)
if err != nil {
glog.Errorf("Error %q node info for node %q not found", err, nodeName)
klog.Errorf("Error %q node info for node %q not found", err, nodeName)
return NodeInfo{}, err
}
nodeInfo = getNodeInfo(nodeName)
} else {
// Renew the found NodeInfo to avoid stale vSphere connection.
glog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, nodeName)
klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, nodeName)
nodeInfo, err = nm.renewNodeInfo(nodeInfo, true)
if err != nil {
glog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, nodeName)
klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, nodeName)
return NodeInfo{}, err
}
nm.addNodeInfo(nodeName, nodeInfo)

View File

@ -24,7 +24,7 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//pkg/version:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
@ -37,6 +37,7 @@ go_library(
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -52,6 +53,7 @@ filegroup(
srcs = [
":package-srcs",
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:all-srcs",
"//pkg/cloudprovider/providers/vsphere/vclib/fixtures:all-srcs",
],
tags = ["automanaged"],
)
@ -59,6 +61,7 @@ filegroup(
go_test(
name = "go_default_test",
srcs = [
"connection_test.go",
"datacenter_test.go",
"datastore_test.go",
"folder_test.go",
@ -67,6 +70,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib/fixtures:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator:go_default_library",

View File

@ -20,15 +20,17 @@ import (
"context"
"crypto/tls"
"encoding/pem"
"fmt"
"net"
neturl "net/url"
"sync"
"github.com/golang/glog"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/sts"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/soap"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/version"
)
// VSphereConnection contains information for connecting to vCenter
@ -38,6 +40,8 @@ type VSphereConnection struct {
Password string
Hostname string
Port string
CACert string
Thumbprint string
Insecure bool
RoundTripperCount uint
credentialsLock sync.Mutex
@ -58,7 +62,7 @@ func (connection *VSphereConnection) Connect(ctx context.Context) error {
if connection.Client == nil {
connection.Client, err = connection.NewClient(ctx)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
klog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
@ -66,17 +70,17 @@ func (connection *VSphereConnection) Connect(ctx context.Context) error {
m := session.NewManager(connection.Client)
userSession, err := m.UserSession(ctx)
if err != nil {
glog.Errorf("Error while obtaining user session. err: %+v", err)
klog.Errorf("Error while obtaining user session. err: %+v", err)
return err
}
if userSession != nil {
return nil
}
glog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
klog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
connection.Client, err = connection.NewClient(ctx)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
klog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
@ -94,21 +98,21 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl
// decide to use LoginByToken if the username value is PEM encoded.
b, _ := pem.Decode([]byte(connection.Username))
if b == nil {
glog.V(3).Infof("SessionManager.Login with username '%s'", connection.Username)
klog.V(3).Infof("SessionManager.Login with username '%s'", connection.Username)
return m.Login(ctx, neturl.UserPassword(connection.Username, connection.Password))
}
glog.V(3).Infof("SessionManager.LoginByToken with certificate '%s'", connection.Username)
klog.V(3).Infof("SessionManager.LoginByToken with certificate '%s'", connection.Username)
cert, err := tls.X509KeyPair([]byte(connection.Username), []byte(connection.Password))
if err != nil {
glog.Errorf("Failed to load X509 key pair. err: %+v", err)
klog.Errorf("Failed to load X509 key pair. err: %+v", err)
return err
}
tokens, err := sts.NewClient(ctx, client)
if err != nil {
glog.Errorf("Failed to create STS client. err: %+v", err)
klog.Errorf("Failed to create STS client. err: %+v", err)
return err
}
@ -118,7 +122,7 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl
signer, err := tokens.Issue(ctx, req)
if err != nil {
glog.Errorf("Failed to issue SAML token. err: %+v", err)
klog.Errorf("Failed to issue SAML token. err: %+v", err)
return err
}
@ -129,9 +133,26 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl
// Logout calls SessionManager.Logout for the given connection.
func (connection *VSphereConnection) Logout(ctx context.Context) {
m := session.NewManager(connection.Client)
clientLock.Lock()
c := connection.Client
clientLock.Unlock()
if c == nil {
return
}
m := session.NewManager(c)
hasActiveSession, err := m.SessionIsActive(ctx)
if err != nil {
klog.Errorf("Logout failed: %s", err)
return
}
if !hasActiveSession {
klog.Errorf("No active session, cannot logout")
return
}
if err := m.Logout(ctx); err != nil {
glog.Errorf("Logout failed: %s", err)
klog.Errorf("Logout failed: %s", err)
}
}
@ -139,24 +160,38 @@ func (connection *VSphereConnection) Logout(ctx context.Context) {
func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Client, error) {
url, err := soap.ParseURL(net.JoinHostPort(connection.Hostname, connection.Port))
if err != nil {
glog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
klog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
return nil, err
}
sc := soap.NewClient(url, connection.Insecure)
if ca := connection.CACert; ca != "" {
if err := sc.SetRootCAs(ca); err != nil {
return nil, err
}
}
tpHost := connection.Hostname + ":" + connection.Port
sc.SetThumbprint(tpHost, connection.Thumbprint)
client, err := vim25.NewClient(ctx, sc)
if err != nil {
glog.Errorf("Failed to create new client. err: %+v", err)
klog.Errorf("Failed to create new client. err: %+v", err)
return nil, err
}
k8sVersion := version.Get().GitVersion
client.UserAgent = fmt.Sprintf("kubernetes-cloudprovider/%s", k8sVersion)
err = connection.login(ctx, client)
if err != nil {
return nil, err
}
if glog.V(3) {
if klog.V(3) {
s, err := session.NewManager(client).UserSession(ctx)
if err == nil {
glog.Infof("New session ID for '%s' = %s", s.UserName, s.Key)
klog.Infof("New session ID for '%s' = %s", s.UserName, s.Key)
}
}

View File

@ -0,0 +1,222 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vclib_test
import (
"context"
"crypto/sha1"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"testing"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/fixtures"
)
func createTestServer(
t *testing.T,
caCertPath string,
serverCertPath string,
serverKeyPath string,
handler http.HandlerFunc,
) (*httptest.Server, string) {
caCertPEM, err := ioutil.ReadFile(caCertPath)
if err != nil {
t.Fatalf("Could not read ca cert from file")
}
serverCert, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath)
if err != nil {
t.Fatalf("Could not load server cert and server key from files: %#v", err)
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(caCertPEM); !ok {
t.Fatalf("Cannot add CA to CAPool")
}
server := httptest.NewUnstartedServer(http.HandlerFunc(handler))
server.TLS = &tls.Config{
Certificates: []tls.Certificate{
serverCert,
},
RootCAs: certPool,
}
// calculate the leaf certificate's fingerprint
if len(server.TLS.Certificates) < 1 || len(server.TLS.Certificates[0].Certificate) < 1 {
t.Fatal("Expected server.TLS.Certificates not to be empty")
}
x509LeafCert := server.TLS.Certificates[0].Certificate[0]
var tpString string
for i, b := range sha1.Sum(x509LeafCert) {
if i > 0 {
tpString += ":"
}
tpString += fmt.Sprintf("%02X", b)
}
return server, tpString
}
func TestWithValidCaCert(t *testing.T) {
handler, verifyConnectionWasMade := getRequestVerifier(t)
server, _ := createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseUrl(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
CACert: fixtures.CaCertPath,
}
// Ignoring error here, because we only care about the TLS connection
connection.NewClient(context.Background())
verifyConnectionWasMade()
}
func TestWithVerificationWithWrongThumbprint(t *testing.T) {
handler, _ := getRequestVerifier(t)
server, _ := createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseUrl(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
Thumbprint: "obviously wrong",
}
_, err := connection.NewClient(context.Background())
if msg := err.Error(); !strings.Contains(msg, "thumbprint does not match") {
t.Fatalf("Expected wrong thumbprint error, got '%s'", msg)
}
}
func TestWithVerificationWithoutCaCertOrThumbprint(t *testing.T) {
handler, _ := getRequestVerifier(t)
server, _ := createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseUrl(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
}
_, err := connection.NewClient(context.Background())
verifyWrappedX509UnkownAuthorityErr(t, err)
}
func TestWithValidThumbprint(t *testing.T) {
handler, verifyConnectionWasMade := getRequestVerifier(t)
server, thumbprint :=
createTestServer(t, fixtures.CaCertPath, fixtures.ServerCertPath, fixtures.ServerKeyPath, handler)
server.StartTLS()
u := mustParseUrl(t, server.URL)
connection := &vclib.VSphereConnection{
Hostname: u.Hostname(),
Port: u.Port(),
Thumbprint: thumbprint,
}
// Ignoring error here, because we only care about the TLS connection
connection.NewClient(context.Background())
verifyConnectionWasMade()
}
func TestWithInvalidCaCertPath(t *testing.T) {
connection := &vclib.VSphereConnection{
Hostname: "should-not-matter",
Port: "should-not-matter",
CACert: "invalid-path",
}
_, err := connection.NewClient(context.Background())
if _, ok := err.(*os.PathError); !ok {
t.Fatalf("Expected an os.PathError, got: '%s' (%#v)", err.Error(), err)
}
}
func TestInvalidCaCert(t *testing.T) {
connection := &vclib.VSphereConnection{
Hostname: "should-not-matter",
Port: "should-not-matter",
CACert: fixtures.InvalidCertPath,
}
_, err := connection.NewClient(context.Background())
if msg := err.Error(); !strings.Contains(msg, "invalid certificate") {
t.Fatalf("Expected invalid certificate error, got '%s'", msg)
}
}
func verifyWrappedX509UnkownAuthorityErr(t *testing.T, err error) {
urlErr, ok := err.(*url.Error)
if !ok {
t.Fatalf("Expected to receive an url.Error, got '%s' (%#v)", err.Error(), err)
}
x509Err, ok := urlErr.Err.(x509.UnknownAuthorityError)
if !ok {
t.Fatalf("Expected to receive a wrapped x509.UnknownAuthorityError, got: '%s' (%#v)", urlErr.Error(), urlErr)
}
if msg := x509Err.Error(); msg != "x509: certificate signed by unknown authority" {
t.Fatalf("Expected 'signed by unknown authority' error, got: '%s'", msg)
}
}
func getRequestVerifier(t *testing.T) (http.HandlerFunc, func()) {
gotRequest := false
handler := func(w http.ResponseWriter, r *http.Request) {
gotRequest = true
}
checker := func() {
if !gotRequest {
t.Fatalf("Never saw a request, maybe TLS connection could not be established?")
}
}
return handler, checker
}
func mustParseUrl(t *testing.T, i string) *url.URL {
u, err := url.Parse(i)
if err != nil {
t.Fatalf("Cannot parse URL: %v", err)
}
return u
}

View File

@ -23,12 +23,12 @@ import (
"path/filepath"
"strings"
"github.com/golang/glog"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// Datacenter extends the govmomi Datacenter object
@ -42,7 +42,7 @@ func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacente
finder := find.NewFinder(connection.Client, false)
datacenter, err := finder.Datacenter(ctx, datacenterPath)
if err != nil {
glog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err)
klog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err)
return nil, err
}
dc := Datacenter{datacenter}
@ -55,7 +55,7 @@ func GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Da
finder := find.NewFinder(connection.Client, false)
datacenters, err := finder.DatacenterList(ctx, "*")
if err != nil {
glog.Errorf("Failed to find the datacenter. err: %+v", err)
klog.Errorf("Failed to find the datacenter. err: %+v", err)
return nil, err
}
for _, datacenter := range datacenters {
@ -71,24 +71,39 @@ func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualM
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
svm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil)
if err != nil {
glog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err)
klog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err)
return nil, err
}
if svm == nil {
glog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID)
klog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID)
return nil, ErrNoVMFound
}
virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc}
return &virtualMachine, nil
}
// GetHostByVMUUID gets the host object from the given vmUUID
func (dc *Datacenter) GetHostByVMUUID(ctx context.Context, vmUUID string) (*types.ManagedObjectReference, error) {
virtualMachine, err := dc.GetVMByUUID(ctx, vmUUID)
var vmMo mo.VirtualMachine
pc := property.DefaultCollector(virtualMachine.Client())
err = pc.RetrieveOne(ctx, virtualMachine.Reference(), []string{"summary.runtime.host"}, &vmMo)
if err != nil {
klog.Errorf("Failed to retrive VM runtime host, err: %v", err)
return nil, err
}
host := vmMo.Summary.Runtime.Host
klog.Infof("%s host is %s", virtualMachine.Reference(), host)
return host, nil
}
// GetVMByPath gets the VM object from the given vmPath
// vmPath should be the full path to VM and not just the name
func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualMachine, error) {
finder := getFinder(dc)
vm, err := finder.VirtualMachine(ctx, vmPath)
if err != nil {
glog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err)
klog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err)
return nil, err
}
virtualMachine := VirtualMachine{vm, dc}
@ -101,7 +116,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto
finder := getFinder(dc)
datastores, err := finder.DatastoreList(ctx, "*")
if err != nil {
glog.Errorf("Failed to get all the datastores. err: %+v", err)
klog.Errorf("Failed to get all the datastores. err: %+v", err)
return nil, err
}
var dsList []types.ManagedObjectReference
@ -114,7 +129,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto
properties := []string{DatastoreInfoProperty}
err = pc.Retrieve(ctx, dsList, properties, &dsMoList)
if err != nil {
glog.Errorf("Failed to get Datastore managed objects from datastore objects."+
klog.Errorf("Failed to get Datastore managed objects from datastore objects."+
" dsObjList: %+v, properties: %+v, err: %v", dsList, properties, err)
return nil, err
}
@ -126,7 +141,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto
dc},
dsMo.Info.GetDatastoreInfo()}
}
glog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap)
klog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap)
return dsURLInfoMap, nil
}
@ -135,7 +150,7 @@ func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string)
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
return nil, errors.New("Failed to parse vmDiskPath")
}
@ -147,7 +162,7 @@ func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Dat
finder := getFinder(dc)
ds, err := finder.Datastore(ctx, name)
if err != nil {
glog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err)
klog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err)
return nil, err
}
datastore := Datastore{ds, dc}
@ -155,20 +170,16 @@ func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Dat
}
// GetResourcePool gets the resource pool for the given path
func (dc *Datacenter) GetResourcePool(ctx context.Context, computePath string) (*object.ResourcePool, error) {
func (dc *Datacenter) GetResourcePool(ctx context.Context, resourcePoolPath string) (*object.ResourcePool, error) {
finder := getFinder(dc)
var computeResource *object.ComputeResource
var resourcePool *object.ResourcePool
var err error
if computePath == "" {
computeResource, err = finder.DefaultComputeResource(ctx)
} else {
computeResource, err = finder.ComputeResource(ctx, computePath)
}
resourcePool, err = finder.ResourcePoolOrDefault(ctx, resourcePoolPath)
if err != nil {
glog.Errorf("Failed to get the ResourcePool for computePath '%s'. err: %+v", computePath, err)
klog.Errorf("Failed to get the ResourcePool for path '%s'. err: %+v", resourcePoolPath, err)
return nil, err
}
return computeResource.ResourcePool(ctx)
return resourcePool, nil
}
// GetFolderByPath gets the Folder Object from the given folder path
@ -177,7 +188,7 @@ func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*
finder := getFinder(dc)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
glog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
klog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
return nil, err
}
folder := Folder{vmFolder, dc}
@ -189,7 +200,7 @@ func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachi
var vmMoList []mo.VirtualMachine
var vmRefs []types.ManagedObjectReference
if len(vmObjList) < 1 {
glog.Errorf("VirtualMachine Object list is empty")
klog.Errorf("VirtualMachine Object list is empty")
return nil, fmt.Errorf("VirtualMachine Object list is empty")
}
@ -199,7 +210,7 @@ func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachi
pc := property.DefaultCollector(dc.Client())
err := pc.Retrieve(ctx, vmRefs, properties, &vmMoList)
if err != nil {
glog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err)
klog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err)
return nil, err
}
return vmMoList, nil
@ -215,7 +226,7 @@ func (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath str
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter)
if err != nil {
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
return "", err
}
diskUUID = formatVirtualDiskUUID(diskUUID)
@ -227,7 +238,7 @@ func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datas
var dsMoList []mo.Datastore
var dsRefs []types.ManagedObjectReference
if len(dsObjList) < 1 {
glog.Errorf("Datastore Object list is empty")
klog.Errorf("Datastore Object list is empty")
return nil, fmt.Errorf("Datastore Object list is empty")
}
@ -237,7 +248,7 @@ func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datas
pc := property.DefaultCollector(dc.Client())
err := pc.Retrieve(ctx, dsRefs, properties, &dsMoList)
if err != nil {
glog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err)
klog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err)
return nil, err
}
return dsMoList, nil
@ -255,27 +266,27 @@ func (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[st
vm, err := dc.GetVMByPath(ctx, nodeName)
if err != nil {
if IsNotFound(err) {
glog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths)
klog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths)
}
continue
}
vmList = append(vmList, vm)
}
if len(vmList) == 0 {
glog.V(2).Infof("vSphere CP will assume no disks are attached to any node.")
klog.V(2).Infof("vSphere CP will assume no disks are attached to any node.")
return attached, nil
}
vmMoList, err := dc.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name"})
if err != nil {
// When there is an error fetching instance information
// it is safer to return nil and let volume information not be touched.
glog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err)
klog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err)
return nil, err
}
for _, vmMo := range vmMoList {
if vmMo.Config == nil {
glog.Errorf("Config is not available for VM: %q", vmMo.Name)
klog.Errorf("Config is not available for VM: %q", vmMo.Name)
continue
}
for nodeName, volPaths := range nodeVolumes {

View File

@ -20,12 +20,12 @@ import (
"context"
"fmt"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// Datastore extends the govmomi Datastore object
@ -59,7 +59,7 @@ func (ds *Datastore) CreateDirectory(ctx context.Context, directoryPath string,
}
return err
}
glog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath)
klog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath)
return nil
}
@ -69,7 +69,7 @@ func (ds *Datastore) GetType(ctx context.Context) (string, error) {
pc := property.DefaultCollector(ds.Client())
err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"summary"}, &dsMo)
if err != nil {
glog.Errorf("Failed to retrieve datastore summary property. err: %v", err)
klog.Errorf("Failed to retrieve datastore summary property. err: %v", err)
return "", err
}
return dsMo.Summary.Type, nil
@ -80,7 +80,7 @@ func (ds *Datastore) GetType(ctx context.Context) (string, error) {
func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storagePolicyID string) (bool, string, error) {
pbmClient, err := NewPbmClient(ctx, ds.Client())
if err != nil {
glog.Errorf("Failed to get new PbmClient Object. err: %v", err)
klog.Errorf("Failed to get new PbmClient Object. err: %v", err)
return false, "", err
}
return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds)

View File

@ -15,9 +15,9 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers",
deps = [
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -20,9 +20,9 @@ import (
"context"
"time"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
@ -55,13 +55,13 @@ func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vcl
task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec)
if err != nil {
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err)
klog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err)
return "", err
}
taskInfo, err := task.WaitForResult(ctx, nil)
vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err)
if err != nil {
glog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err)
klog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err)
return "", err
}
canonicalDiskPath = taskInfo.Result.(string)
@ -77,14 +77,14 @@ func (diskManager virtualDiskManager) Delete(ctx context.Context, datacenter *vc
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter.Datacenter)
if err != nil {
glog.Errorf("Failed to delete virtual disk. err: %v", err)
klog.Errorf("Failed to delete virtual disk. err: %v", err)
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
return err
}
err = task.Wait(ctx)
vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err)
if err != nil {
glog.Errorf("Failed to delete virtual disk. err: %v", err)
klog.Errorf("Failed to delete virtual disk. err: %v", err)
return err
}
return nil

View File

@ -20,7 +20,7 @@ import (
"context"
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
@ -65,7 +65,7 @@ func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Dat
virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType
}
if !virtualDisk.VolumeOptions.VerifyVolumeOptions() {
glog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions)
klog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions)
return "", vclib.ErrInvalidVolumeOptions
}
if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" {

View File

@ -22,9 +22,9 @@ import (
"hash/fnv"
"strings"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
)
@ -43,26 +43,26 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
}
pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client())
if err != nil {
glog.Errorf("Error occurred while creating new pbmClient, err: %+v", err)
klog.Errorf("Error occurred while creating new pbmClient, err: %+v", err)
return "", err
}
if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" {
vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName)
if err != nil {
glog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err)
klog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err)
return "", err
}
}
if vmdisk.volumeOptions.StoragePolicyID != "" {
compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID)
if err != nil {
glog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err)
klog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err)
return "", err
}
if !compatible {
glog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName)
klog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName)
return "", fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage)
}
}
@ -79,7 +79,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
return "", err
}
if dsType != vclib.VSANDatastoreType {
glog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name())
klog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name())
return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+
" The policy parameters will work only with VSAN Datastore."+
" So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name())
@ -90,7 +90,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
ObjectData: vmdisk.volumeOptions.VSANStorageProfileData,
}
} else {
glog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
klog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
return "", fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set")
}
var dummyVM *vclib.VirtualMachine
@ -102,10 +102,10 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName)
if err != nil {
// Create a dummy VM
glog.V(1).Info("Creating Dummy VM: %q", dummyVMFullName)
klog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName)
dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
if err != nil {
glog.Errorf("Failed to create Dummy VM. err: %v", err)
klog.Errorf("Failed to create Dummy VM. err: %v", err)
return "", err
}
}
@ -114,7 +114,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions)
if err != nil {
glog.Errorf("Failed to create Disk Spec. err: %v", err)
klog.Errorf("Failed to create Disk Spec. err: %v", err)
return "", err
}
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
@ -127,14 +127,18 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
fileAlreadyExist := false
task, err := dummyVM.Reconfigure(ctx, virtualMachineConfigSpec)
if err != nil {
klog.Errorf("Failed to reconfig. err: %v", err)
return "", err
}
err = task.Wait(ctx)
if err != nil {
fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err)
if fileAlreadyExist {
//Skip error and continue to detach the disk as the disk was already created on the datastore.
glog.V(vclib.LogLevel).Info("File: %v already exists", vmdisk.diskPath)
klog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath)
} else {
glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
klog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
return "", err
}
}
@ -143,16 +147,16 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
if err != nil {
if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
// Skip error if disk was already detached from the dummy VM but still present on the datastore.
glog.V(vclib.LogLevel).Info("File: %v is already detached", vmdisk.diskPath)
klog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath)
} else {
glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
klog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
return "", err
}
}
// Delete the dummy VM
err = dummyVM.DeleteVM(ctx)
if err != nil {
glog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
klog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err)
}
return vmdisk.diskPath, nil
}
@ -191,13 +195,13 @@ func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib
task, err := vmdisk.vmOptions.VMFolder.CreateVM(ctx, virtualMachineConfigSpec, vmdisk.vmOptions.VMResourcePool, nil)
if err != nil {
glog.Errorf("Failed to create VM. err: %+v", err)
klog.Errorf("Failed to create VM. err: %+v", err)
return nil, err
}
dummyVMTaskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
glog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err)
klog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err)
return nil, err
}
@ -210,11 +214,11 @@ func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib
func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datacenter) error {
vmList, err := folder.GetVirtualMachines(ctx)
if err != nil {
glog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err)
klog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err)
return err
}
if vmList == nil || len(vmList) == 0 {
glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
klog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
return fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath)
}
var dummyVMList []*vclib.VirtualMachine
@ -222,7 +226,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datace
for _, vm := range vmList {
vmName, err := vm.ObjectName(ctx)
if err != nil {
glog.V(4).Infof("Unable to get name from VM with err: %+v", err)
klog.V(4).Infof("Unable to get name from VM with err: %+v", err)
continue
}
if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) {
@ -233,7 +237,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datace
for _, vm := range dummyVMList {
err = vm.DeleteVM(ctx)
if err != nil {
glog.V(4).Infof("Unable to delete dummy VM with err: %+v", err)
klog.V(4).Infof("Unable to delete dummy VM with err: %+v", err)
continue
}
}

View File

@ -0,0 +1,26 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["fixtures.go"],
data = glob([
"*.pem",
"*.key",
]),
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/fixtures",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJJwIBAAKCAgEA4CKLwCPwMUIVaGhvZxLmXEzDflILVaGCZRRBbfYucfysylT/
JKPMlKs3ORNVW1cdiW1z/ZUlAlN+eqq40WSVQJqLUeXltsfZwemdFmf3SAWIu9v9
wI5mhLQJMh2XPKNILCBhrET/ANLVPbObJUFvGavpR9XVXTXsLUvuCR+oSpDvQYyn
WKJ5dAwqKaFx3GCEFAm0dNnSzliQrzKFOE0DUMxFQH5Lt2EYLHrya+K4ZtYbX5nK
X++T9R5pZs0npqmTQS/rIffv2hT89tKdqPz/MCt5xwmjsAO2uri5O+WaLUIkf8Bd
fmVAusE/5v2p3x3MH0rUXaNPg7FqLj1cnbcwHqqt3PmVl9VZINkPbnHHiua21GNq
DAZ/G/vP8/hlXwIeE8d6YPsSPm4NEH0Ku+yk0TEL6QkGFMYYpyCw1BNYGXd+zvf1
xjZtGrcViHhesxuv71nGdJbNSi7zwkYXydSKCNnjJ+Oqyip5uUC+DmydqcJTQLcZ
W5ObNfeB8PLz6UuVidMffh8evE13L60cS5wZyZWherMqB+I/uREt05gikCtlJVOo
shuLS0QjbK/INYCSFBJjt0xrwTbw13SQsEhktQYdqTHaDBWi6uh+pcY9msF1jZJ+
GAEPYcLzK3o2/kE6g09TZ3QDeP9bEDTllL+mIs4JGiWGNC/eGjGfyyAnfmECAwEA
AQKCAf88aRNBtm4G2MjsWzmrjmyIdCg84+AqNF3w4ITCHphmILRx1HbwaTW63GsF
9zAKbnCHmfipYImZFugAKAOobHPN9dmXOV+w5CzNFyo/38XGo7c26xR50efP3Lad
y1v3/Ap32kJ5LB+PGURgXQh0Ai7vvGYj9n6LoP0HOG/wBZhWgLn78O0p9qDFpoG2
tsz5mQoAXJ1G4W7wLu7QSc2eXyOFo4kG2QOPaZwaYQj2CyWokgzOt6TUNr6qUogW
LTWCtjH6X/AAN9Nt9Do6TIoyAf7F/PHVs8NqrZWSvjcu7bOgfzNXO4H3j1LjAzM2
Dyi5+k4KISEcG+hSln8H94H/AGD3Yea44sDnIZoOtKTB+O7V+jyU7qwtX9QaEu04
CslnZOun0/PR/C9mI7QaGu1YJcxdIw9Nlj07+iAzI4ZjuO+qHeUM7SNvH/MVbglA
2ZDkp7J3VlJgFObvHdINZMWNO1FIg/pc7TcXNsUkNAwnCwLh6//5/cZF+BtGlc4A
SGkhYKX3dRp8qLjNKxux3VHROoDByJDEUcnn0fEAm9aMbV+PofpghJtQqnKbsMn8
iF29v+9+JBIHFxAwhCIv9atF82VHt/sGPcsRqohttRWJDaUMBM3N8rvciiigcYzh
c/o4kH0YNoFSs4+evhYQDxk8yIGsgyuGfnW5QaLUIPa2AxblAoIBAQDyfoJr3UFq
LfkTkYHjAo4eua1vzlM3/8aFFnuQhMeoFvw4aA26x1DsUUozIRXTWWbnFH6GY8T3
B46jgWcO4UaMqbxQxTpHSDQFSVn/budugxGU70WQ9LcjSobk9uCXgk2MmRn9tA/6
+ergswSEuPxyNzgDF60BTrS5V2Akh6eF/sYZWnMKazZ3kdw1V5Y/IxfNH1yo6GRz
PTPVyyX6kU3+DNQSplgcsKYFhyoT2HPIRaxR1fTIw9E5w1rQWanYz/A0I3SDECsc
qJDy1rzC+0Tye2XLcWzHu5l1ng8GPLQJfjEtMTKXMIHjpLFC1P4hXNrlxTOnALSS
95bwzvDqfxULAoIBAQDsnkUVOvoXrE9xRI2EyWi1K08i5YSwy3rtV+uJP2Zyy4uB
r3TfzxFnYdXWykzHJGqHd6N5M6vCmbcLMS0G9z5QpDhrIF5vk26P9isvZ3k7rkWG
jgwif3kBcPQXlCDgwwnVmGsBf/A+2z3HOfNPK3Iy3VamFvYD52wgL8+N0puZ42aU
aH759JjLGcaVZWzWNdIcpS1OsBucGXCj3IeHmLjhJFbVebIHJ4rCs7gj51H8R8uk
fksxsgfPdRRpYq7NkDOzVDPb/KtTf5C4ZDogRaxj765DMnn6LhBFQVuDWEDJgjlF
Aolt8ynskf3xd19nlX99QAzXnql6LLClwps6G8XDAoIBADzhslDufevwmuZk091w
2MmyCG9Xt+EJYIgtetxv2cjD7JMk3L2WKSULy7tGhTpI6eL+bD3FcsAqr48xf/Rm
btYGD3ef7N/Uqurg3a2Z5JUEZzejUy3vosNDhNabfQvM9TdlgPcHbDOw511+1JWV
9Bug7XkpSpBXeFxIKaVCQbcMniPjZ5qoDEa84jKqSNiVMPaY9ySZJA8iwI7esCxW
quQryFreVKTvXN9qbhAJehhAFeF9/DUjpLYB7Bz/RftfSYltlWUKfCh30dyGOWIi
v865WHdZhNwop4C2LEN+nhz8B9C212LKFPJYeQC0hRFPRM4HUs6NCMkVTFotOqNF
QL0CggEAGXBysPOkS8NEz0K1jF8zGLdNTM0sVO2ri7T2J81fMFxd5VV91Uon7tt/
6BXb51Us9t+P/cnmX4ezPErPMn6GfpkJT8stHAXXzzaCMhiH2jjEVNEU0Oivk84X
ECnm1wNhHUvDxWeB5uAfZjn+xLZBEuLlG/o//O92modJY1APVp4yOyZ48FqxyrQ8
u3cqGmWy701674jTjxbVG2jsUVHEHsCPbWgmEcrYilJUK9gE4oC9jjPd1bv0RwOp
bCMl9Afa5x7YbIBf0xxV7N0puqqC/EOakrLslk85hJigRCDK5l9P1PGO4PlRupN/
n+Rbp4FVMZwfRVdTlUUUwN2JXtf5jQKCAQEAqSMv1mkLS3qnmW1E/qAYrEmMlHZo
253wuwsO0XS7xCxcEumIvjYCvhnHPYIO2rqsscmk42gYe/OUfteMb71BJ+HnlyOo
9oDbZg8W2DSUzTUy0yT/JMcNTwVCPeVj+bZ/LzDP5jKmZ7vXZkLGQCgU6ENVmsCg
b8nKz0xc7o8jERaSGY+h3LthXF0wAZJ3NdbnJjFbL8hYpwTrD6xd/yg3M5grrCLe
iBKfdpCIN6VrqI9VymoPZryb1OVEiClt0LHWTIXQPcH2J/CrMeWoGhRBW3yTAECf
HPhYMZddW2y6uOFjRcUCu2HG35ogEYlDd0kjH1HhPC2xXcFQBmOyPpEeDQ==
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,29 @@
-----BEGIN CERTIFICATE-----
MIIE/jCCAuYCCQDRJ2qPhdmG0DANBgkqhkiG9w0BAQsFADBAMQswCQYDVQQGEwJV
UzELMAkGA1UECAwCQ0ExEzARBgNVBAoMCkFjbWUsIEluYy4xDzANBgNVBAMMBnNv
bWVDQTAgFw0xODA2MDgxMzM5MjFaGA8yMjE4MDQyMTEzMzkyMVowQDELMAkGA1UE
BhMCVVMxCzAJBgNVBAgMAkNBMRMwEQYDVQQKDApBY21lLCBJbmMuMQ8wDQYDVQQD
DAZzb21lQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDgIovAI/Ax
QhVoaG9nEuZcTMN+UgtVoYJlFEFt9i5x/KzKVP8ko8yUqzc5E1VbVx2JbXP9lSUC
U356qrjRZJVAmotR5eW2x9nB6Z0WZ/dIBYi72/3AjmaEtAkyHZc8o0gsIGGsRP8A
0tU9s5slQW8Zq+lH1dVdNewtS+4JH6hKkO9BjKdYonl0DCopoXHcYIQUCbR02dLO
WJCvMoU4TQNQzEVAfku3YRgsevJr4rhm1htfmcpf75P1HmlmzSemqZNBL+sh9+/a
FPz20p2o/P8wK3nHCaOwA7a6uLk75ZotQiR/wF1+ZUC6wT/m/anfHcwfStRdo0+D
sWouPVydtzAeqq3c+ZWX1Vkg2Q9ucceK5rbUY2oMBn8b+8/z+GVfAh4Tx3pg+xI+
bg0QfQq77KTRMQvpCQYUxhinILDUE1gZd37O9/XGNm0atxWIeF6zG6/vWcZ0ls1K
LvPCRhfJ1IoI2eMn46rKKnm5QL4ObJ2pwlNAtxlbk5s194Hw8vPpS5WJ0x9+Hx68
TXcvrRxLnBnJlaF6syoH4j+5ES3TmCKQK2UlU6iyG4tLRCNsr8g1gJIUEmO3TGvB
NvDXdJCwSGS1Bh2pMdoMFaLq6H6lxj2awXWNkn4YAQ9hwvMrejb+QTqDT1NndAN4
/1sQNOWUv6YizgkaJYY0L94aMZ/LICd+YQIDAQABMA0GCSqGSIb3DQEBCwUAA4IC
AQBYBRH/q3gB4gEiOAUl9HbnoUb7MznZ0uQTH7fUYqr66ceZkg9w1McbwiAeZAaY
qQWwr3u4A8/Bg8csE2yQTsXeA33FP3Q6obyuYn4q7e++4+9SLkbSSQfbB67pGUK5
/pal6ULrLGzs69fbL1tOaA/VKQJndg3N9cftyiIUWTzHDop8SLmIobWVRtPQHf00
oKq8loakyluQdxQxnGdl7vMXwSpSpIH84TOdy2JN90MzVLgOz55sb/wRYfhClNFD
+1sb2V4nL2w1kXaO2UVPzk7qpG5FE54JPvvN67Ec4JjMSnGo8l3dJ9jGEmgBIML3
l1onrti2HStSs1vR4Ax0xok08okRlrGA4FqQiSx853T5uLa/JLmWfLKg9ixR4ZV+
dF+2ZrFwDLZUr4VeaDd2v2mQFBNLvdZrqp1OZ4B/1+H5S8ucb+oVhGqzDkEvRCc+
WYpNxx7kpwZPTLmMYTXXKdTWfpgz9GL0LSkY8d1rxLwHxtV8EzAkV+zIWix4h/IE
0FG4WvhrttMCu8ulZhGGoVqy7gdb4+ViWnUYNuCCjIcRJj7SeZaDawBASa/jZwik
Hxrwn0osGUqEUBmvjDdXJpTaKCr2GFOvhCM2pG6AXa14b5hS2DgbX+NZYcScYtVC
vn2HMDjnIEF4uOfDJU5eLok4jli5+VwzOQ7hOHs3DIm4+g==
-----END CERTIFICATE-----

View File

@ -0,0 +1,93 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
readonly VALID_DAYS='73000'
readonly RSA_KEY_SIZE='4096'
createKey() {
openssl genrsa \
-out "$1" \
"$RSA_KEY_SIZE"
}
createCaCert() {
openssl req \
-x509 \
-subj "$( getSubj 'someCA' )" \
-new \
-nodes \
-key "$2" \
-sha256 \
-days "$VALID_DAYS" \
-out "$1"
}
createCSR() {
openssl req \
-new \
-sha256 \
-key "$2" \
-subj "$( getSubj 'localhost' )" \
-reqexts SAN \
-config <( getSANConfig ) \
-out "$1"
}
signCSR() {
openssl x509 \
-req \
-in "$2" \
-CA "$3" \
-CAkey "$4" \
-CAcreateserial \
-days "$VALID_DAYS" \
-sha256 \
-extfile <( getSAN ) \
-out "$1"
}
getSubj() {
local cn="${1:-someRandomCN}"
echo "/C=US/ST=CA/O=Acme, Inc./CN=${cn}"
}
getSAN() {
printf "subjectAltName=DNS:localhost,IP:127.0.0.1"
}
getSANConfig() {
cat /etc/ssl/openssl.cnf
printf '\n[SAN]\n'
getSAN
}
main() {
local caCertPath="./ca.pem"
local caKeyPath="./ca.key"
local serverCsrPath="./server.csr"
local serverCertPath="./server.pem"
local serverKeyPath="./server.key"
createKey "$caKeyPath"
createCaCert "$caCertPath" "$caKeyPath"
createKey "$serverKeyPath"
createCSR "$serverCsrPath" "$serverKeyPath"
signCSR "$serverCertPath" "$serverCsrPath" "$caCertPath" "$caKeyPath"
}
main "$@"

View File

@ -0,0 +1,65 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fixtures
import (
"os"
"path/filepath"
"runtime"
"strings"
)
var (
// CaCertPath is the filepath to a certificate that can be used as a CA
// certificate.
CaCertPath string
// ServerCertPath is the filepath to a leaf certifiacte signed by the CA at
// `CaCertPath`.
ServerCertPath string
// ServerKeyPath is the filepath to the private key for the ceritifiacte at
// `ServerCertPath`.
ServerKeyPath string
// InvalidCertPath is the filepath to an invalid certificate.
InvalidCertPath string
)
func init() {
_, thisFile, _, ok := runtime.Caller(0)
if !ok {
panic("Cannot get path to the fixtures")
}
fixturesDir := filepath.Dir(thisFile)
cwd, err := os.Getwd()
if err != nil {
panic("Cannot get CWD: " + err.Error())
}
// When tests run in a bazel sandbox `runtime.Caller()`
// returns a relative path, when run with plain `go test` the path
// returned is absolute. To make those fixtures work in both those cases,
// we prepend the CWD iff the CWD is not yet part of the path to the fixtures.
if !strings.HasPrefix(fixturesDir, cwd) {
fixturesDir = filepath.Join(cwd, fixturesDir)
}
CaCertPath = filepath.Join(fixturesDir, "ca.pem")
ServerCertPath = filepath.Join(fixturesDir, "server.pem")
ServerKeyPath = filepath.Join(fixturesDir, "server.key")
InvalidCertPath = filepath.Join(fixturesDir, "invalid.pem")
}

View File

@ -0,0 +1 @@
this is some invalid content

View File

@ -0,0 +1,28 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIEtTCCAp0CAQAwQzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRMwEQYDVQQK
DApBY21lLCBJbmMuMRIwEAYDVQQDDAlsb2NhbGhvc3QwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQCVkk5HMKNvMXVJoJcUfKK252UT6rdnlsaFLZOlcbp3
otqiq3A2jhQLeL5Ocyd22s/ak2RX9liK+ynV8fP3YWoUBP5elhwbykubiIvSTRS5
85Z0s9NfzscImMpnivt+bOy3KOoriy/0jfJ7WMqLRUTUEusXUpW8QT/U9cK6DrwQ
E/9oXTr669yvqjyFsxjOB0pLOFFib0LeQZxrA2h+oAP8qT/Of6kyTgGWjLhSC1cV
eCPZsSeZUT61FbIu/b5M42WYuddoFbf8y9m0oLeYizYob7poE25jw91bNa8y2nfS
v+JuCcfO4wq29cnldGFNpJPhBhc1sbBvVshXXKWdfzN1c8RCS5hNANy1phAJ7RFe
3Uj0WneBVBHHJMz7Qh61uxTST1W8HBDTuaBTxGKTcPFWd9u4lj/BEScRFOSC/qiO
1HCKzOsYhjnHfql5GzfQKpEy/e4m2oL8VTqcJBsfHCyxDIH+6Y3ovttymxAUPJ14
r3mG9FDLq1va/+8xzDswyjmRIVQeOgvllzgM5vCKqz6nsXtLRYgkwHMk5yOaAIzO
BnsmZztsyaubjcYvM5pUsiO49VWk6ntiAn+WpF/sreFlesx1peQKbTVovwvn137d
V92Oncce+ZikKHxtz4qOz+dH1Fz7Ykor8fXcsfdbkKvwWdz8U/pOBu+83CxBXTWA
bwIDAQABoC0wKwYJKoZIhvcNAQkOMR4wHDAaBgNVHREEEzARgglsb2NhbGhvc3SH
BH8AAAEwDQYJKoZIhvcNAQELBQADggIBADgJfI3xRKlOInZQjg+afz+L477IiFmP
Pf0qwO/EqBkCmbDbmvXpXi/y9Ffh6bMx2naN873nW3k1uVG2W0O4Bl7di9PkmRxY
ktcWY+CaxDT5+Y3LmrqICgrZmELTuV5G8xX2/7bpdEtY4sWpoOeOun+CeGTCeUGx
sGxOWrhydYwrkowupPthYreIIBBPHWl2gEw/m+Y7aJZGtKnDD9eCbF6RxmXRWHDu
0Ly+F3veXbht9LjKPFsgfsogo33Nl8+W1LCActKNY7NMDdGkc+RqaTyxhYEwomui
N1NDOW1qHqSyp2RC13cXokfLL58WGXS6PpNhSln9u4ZG9a+TY+vw1qC//1CyTicY
ylyEn2qfqTSG3W7T/u6ZTL0MpMjFv8VigpffJcFDjq6lVH8LyTniSXdCREy78jAo
8O/2tzJtWrar8bbeN7KCwVcJVaK15a1GWZmo5Ei33U/2Tm+UyRbWL8eISO2Hs3WM
90aFPaHfqKpiPsJrnnOm270lZclgqEtpsyuLsAClqxytCYPw4zTa6WOfDJtmVUrT
1fvMjqwzvs7jbNrgfkwSxXiABwTMQQWeAtuSO+zZH4Ms10qyANoh4FFi/oS3dRKQ
0kdu7AsJqnou9q9HWq1WCTqMcyNE0KPHuo4xhtOlWoGbsugTs7XBml30D7bKJVfG
PazsY1b0/cx7
-----END CERTIFICATE REQUEST-----

View File

@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEAlZJORzCjbzF1SaCXFHyitudlE+q3Z5bGhS2TpXG6d6Laoqtw
No4UC3i+TnMndtrP2pNkV/ZYivsp1fHz92FqFAT+XpYcG8pLm4iL0k0UufOWdLPT
X87HCJjKZ4r7fmzstyjqK4sv9I3ye1jKi0VE1BLrF1KVvEE/1PXCug68EBP/aF06
+uvcr6o8hbMYzgdKSzhRYm9C3kGcawNofqAD/Kk/zn+pMk4Bloy4UgtXFXgj2bEn
mVE+tRWyLv2+TONlmLnXaBW3/MvZtKC3mIs2KG+6aBNuY8PdWzWvMtp30r/ibgnH
zuMKtvXJ5XRhTaST4QYXNbGwb1bIV1ylnX8zdXPEQkuYTQDctaYQCe0RXt1I9Fp3
gVQRxyTM+0IetbsU0k9VvBwQ07mgU8Rik3DxVnfbuJY/wREnERTkgv6ojtRwiszr
GIY5x36peRs30CqRMv3uJtqC/FU6nCQbHxwssQyB/umN6L7bcpsQFDydeK95hvRQ
y6tb2v/vMcw7MMo5kSFUHjoL5Zc4DObwiqs+p7F7S0WIJMBzJOcjmgCMzgZ7Jmc7
bMmrm43GLzOaVLIjuPVVpOp7YgJ/lqRf7K3hZXrMdaXkCm01aL8L59d+3Vfdjp3H
HvmYpCh8bc+Kjs/nR9Rc+2JKK/H13LH3W5Cr8Fnc/FP6TgbvvNwsQV01gG8CAwEA
AQKCAgBLBQn8DPo8YDsqxcBhRy45vQ/mkHiTHX3O+JAwkD1tmiI9Ku3qfxKwukwB
fyKRK6jLQdg3gljgxJ80Ltol/xc8mVCYUoQgsDOB/FfdEEpQBkw1lqhzSnxr5G7I
xl3kCHAmYgAp/PL9n2C620sj1YdzM1X06bgupy+D+gxEU/WhvtYBG5nklv6moSUg
DjdnxyJNXh7710Bbx97Tke8Ma+f0B1P4l/FeSN/lCgm9JPD11L9uhbuN28EvBIXN
qfmUCQ5BLx1KmHIi+n/kaCQN/+0XFQsS/oQEyA2znNaWFBu7egDxHji4nQoXwGoW
i2vujJibafmkNc5/2bA8mTx8JXvCLhU2L9j2ZumpKOda0g+pfMauesL+9rvZdqwW
gjdjndOHZlg3qm40hGCDBVmmV3mdnvXrk1BbuB4Y0N7qGo3PyYtJHGwJILaNQVGR
Sj75uTatxJwFXsqSaJaErV3Q90IiyXX4AOFGnWHOs29GEwtnDbCvT/rzqutTYSXD
Yv0XFDznzJelhZTH7FbaW3FW3YGEG1ER/0MtKpsAH4i7H9q3KKK8yrzUsgUkGwXt
xtoLckh91xilPIGbzARdELTEdHrjlFL+qaz3PIqEQScWz3WBu2JcIzGbp6PQfMZ+
FZXarEb/ADZuX0+WoKFYR5jzwMoQfF/fxe2Ib/37ETNw4BgfSQKCAQEAxOw64XgO
nUVJslzGK/H5fqTVpD1rfRmvVAiSDLAuWpClbpDZXqEPuoPPYsiccuUWu9VkJE1F
6MZEexGx1jFkN08QUHD1Bobzu6ThaBc2PrWHRjFGKM60d0AkhOiL4N04FGwVeCN6
xzIJFk1E4VOOo1+lzeAWRvi1lwuWTgQi+m25nwBJtmYdBLGeS+DXy80Fi6deECei
ipDzJ4rxJsZ61uqBeYC4CfuHW9m5rCzJWPMMMFrPdl3OxEyZzKng4Co5EYc5i/QH
piXD6IJayKcTPRK3tBJZp2YCIIdtQLcjAwmDEDowQtelHkbTihXMGRarf3VcOEoN
ozMRgcLEEynuKwKCAQEAwnF5ZkkJEL/1MCOZ6PZfSKl35ZMIz/4Umk8hOMAQGhCT
cnxlDUfGSBu4OihdBbIuBSBsYDjgcev8uyiIPDVy0FIkBKRGfgrNCLDh19aHljvE
bUc3akvbft0mro86AvSd/Rpc7sj841bru37RDUm6AJOtIvb6DWUpMOZgMm0WMmSI
kNs/UT+7rqg+AZPP8lumnJIFnRK38xOehQAaS1FHWGP//38py8yo8eXpMsoCWMch
c+kZD2jsAYV+SWjjkZjcrv/52+asd4AotRXIShV8E8xItQeq6vLHKOaIe0tC2Y44
ONAKiu4dgABt1voy8I5J63MwgeNmgAUS+KsgUclYzQKCAQEAlt/3bPAzIkQH5uQ1
4U2PvnxEQ4XbaQnYzyWR4K7LlQ/l8ASCxoHYLyr2JdVWKKFk/ZzNERMzUNk3dqNk
AZvuEII/GaKx2MJk04vMN5gxM3KZpinyeymEEynN0RbqtOpJITx+ZoGofB3V4IRr
FciTLJEH0+iwqMe9OXDjQ/rfYcfXw/7QezNZYFNF2RT3wWnfqdQduXrkig3sfotx
oCfJzgf2E0WPu/Y/CxyRqVzXF5N/7zxkX2gYF0YpQCmX5afz+X4FlTju81lT9DyL
mdiIYO6KWSkGD7+UOaAJEOA/rwAGrtQmTdAy7jONt+pjaYV4+DrO4UG7mSJzc1vq
JlSl6QKCAQARqwPv8mT7e6XI2QNMMs7XqGZ3mtOrKpguqVAIexM7exQazAjWmxX+
SV6FElPZh6Y82wRd/e0PDPVrADTY27ZyDXSuY0rwewTEbGYpGZo6YXXoxBbZ9sic
D3ZLWEJaMGYGsJWPMP4hni1PXSebwH5BPSn3Sl/QRcfnZJeLHXRt4cqy9uka9eKU
7T6tIAQ+LmvGQFJ4QlIqqTa3ORoqi9kiw/tn+OMQXKlhSZXWApsR/A4jHSQkzVDc
loeyHfDHsw8ia6oFfEFhnmiUg8UuTiN3HRHiOS8jqCnGoqP2KBGL+StMpkK++wH9
NozEgvmL+DHpTg8zTjlrGortw4btR5FlAoIBABVni+EsGA5K/PM1gIct2pDm+6Kq
UCYScTwIjftuwKLk/KqermG9QJLiJouKO3ZSz7iCelu87Dx1cKeXrc2LQ1pnQzCB
JnI6BCT+zRnQFXjLokJXD2hIS2hXhqV6/9FRXLKKMYePcDxWt/etLNGmpLnhDfb3
sMOH/9pnaGmtk36Ce03Hh7E1C6io/MKfTq+KKUV1UGwO1BdNQCiclkYzAUqn1O+Y
c8BaeGKc2c6as8DKrPTGGQGmzo/ZUxQVfVFl2g7+HXISWBBcui/G5gtnU1afZqbW
mTmDoqs4510vhlkhN9XZ0DyhewDIqNNGEY2vS1x2fJz1XC2Eve4KpSyUsiE=
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,30 @@
-----BEGIN CERTIFICATE-----
MIIFJjCCAw6gAwIBAgIJAOcEAbv8NslfMA0GCSqGSIb3DQEBCwUAMEAxCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJDQTETMBEGA1UECgwKQWNtZSwgSW5jLjEPMA0GA1UE
AwwGc29tZUNBMCAXDTE4MDYwODEzMzkyNFoYDzIyMTgwNDIxMTMzOTI0WjBDMQsw
CQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNVBAoMCkFjbWUsIEluYy4xEjAQ
BgNVBAMMCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
AJWSTkcwo28xdUmglxR8orbnZRPqt2eWxoUtk6Vxunei2qKrcDaOFAt4vk5zJ3ba
z9qTZFf2WIr7KdXx8/dhahQE/l6WHBvKS5uIi9JNFLnzlnSz01/OxwiYymeK+35s
7Lco6iuLL/SN8ntYyotFRNQS6xdSlbxBP9T1wroOvBAT/2hdOvrr3K+qPIWzGM4H
Sks4UWJvQt5BnGsDaH6gA/ypP85/qTJOAZaMuFILVxV4I9mxJ5lRPrUVsi79vkzj
ZZi512gVt/zL2bSgt5iLNihvumgTbmPD3Vs1rzLad9K/4m4Jx87jCrb1yeV0YU2k
k+EGFzWxsG9WyFdcpZ1/M3VzxEJLmE0A3LWmEAntEV7dSPRad4FUEcckzPtCHrW7
FNJPVbwcENO5oFPEYpNw8VZ327iWP8ERJxEU5IL+qI7UcIrM6xiGOcd+qXkbN9Aq
kTL97ibagvxVOpwkGx8cLLEMgf7pjei+23KbEBQ8nXiveYb0UMurW9r/7zHMOzDK
OZEhVB46C+WXOAzm8IqrPqexe0tFiCTAcyTnI5oAjM4GeyZnO2zJq5uNxi8zmlSy
I7j1VaTqe2ICf5akX+yt4WV6zHWl5AptNWi/C+fXft1X3Y6dxx75mKQofG3Pio7P
50fUXPtiSivx9dyx91uQq/BZ3PxT+k4G77zcLEFdNYBvAgMBAAGjHjAcMBoGA1Ud
EQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAgEABL8kffi7
48qSD+/l/UwCYdmqta1vAbOkvLnPtfXe1XlDpJipNuPxUBc8nNTemtrbg0erNJnC
jQHodqmdKBJJOdaEKTwAGp5pYvvjlU3WasmhfJy+QwOWgeqjJcTUo3+DEaHRls16
AZXlsp3hB6z0gzR/qzUuZwpMbL477JpuZtAcwLYeVvLG8bQRyWyEy8JgGDoYSn8s
Z16s+r6AX+cnL/2GHkZ+oc3iuXJbnac4xfWTKDiYnyzK6RWRnoyro7X0jiPz6XX3
wyoWzB1uMSCXscrW6ZcKyKqz75lySLuwGxOMhX4nGOoYHY0ZtrYn5WK2ZAJxsQnn
8QcjPB0nq37U7ifk1uebmuXe99iqyKnWaLvlcpe+HnO5pVxFkSQEf7Zh+hEnRDkN
IBzLFnqwDS1ug/oQ1aSvc8oBh2ylKDJuGtPNqGKibNJyb2diXO/aEUOKRUKPAxKa
dbKsc4Y1bhZNN3/MICMoyghwAOiuwUQMR5uhxTkQmZUwNrPFa+eW6GvyoYLFUsZs
hZfWLNGD5mLADElxs0HF7F9Zk6pSocTDXba4d4lfxsq88SyZZ7PbjJYFRfLQPzd1
CfvpRPqolEmZo1Y5Q644PELYiJRKpBxmX5GtC5j5eaUD9XdGKvXsGhb0m0gW75rq
iUnnLkZt2ya1cDJDiCnJjo7r5KxMo0XXFDc=
-----END CERTIFICATE-----

View File

@ -19,8 +19,8 @@ package vclib
import (
"context"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"k8s.io/klog"
)
// Folder extends the govmomi Folder object
@ -33,7 +33,7 @@ type Folder struct {
func (folder *Folder) GetVirtualMachines(ctx context.Context) ([]*VirtualMachine, error) {
vmFolders, err := folder.Children(ctx)
if err != nil {
glog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err)
klog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err)
return nil, err
}
var vmObjList []*VirtualMachine

View File

@ -20,8 +20,8 @@ import (
"context"
"fmt"
"github.com/golang/glog"
"github.com/vmware/govmomi/pbm"
"k8s.io/klog"
pbmtypes "github.com/vmware/govmomi/pbm/types"
"github.com/vmware/govmomi/vim25"
@ -36,7 +36,7 @@ type PbmClient struct {
func NewPbmClient(ctx context.Context, client *vim25.Client) (*PbmClient, error) {
pbmClient, err := pbm.NewClient(ctx, client)
if err != nil {
glog.Errorf("Failed to create new Pbm Client. err: %+v", err)
klog.Errorf("Failed to create new Pbm Client. err: %+v", err)
return nil, err
}
return &PbmClient{pbmClient}, nil
@ -60,7 +60,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo
}
compatibilityResult, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
if err != nil {
glog.Errorf("Error occurred for CheckRequirements call. err %+v", err)
klog.Errorf("Error occurred for CheckRequirements call. err %+v", err)
return false, "", err
}
if compatibilityResult != nil && len(compatibilityResult) > 0 {
@ -70,7 +70,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo
}
dsName, err := datastore.ObjectName(ctx)
if err != nil {
glog.Errorf("Failed to get datastore ObjectName")
klog.Errorf("Failed to get datastore ObjectName")
return false, "", err
}
if compatibilityResult[0].Error[0].LocalizedMessage == "" {
@ -92,7 +92,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Dat
)
compatibilityResult, err := pbmClient.GetPlacementCompatibilityResult(ctx, storagePolicyID, datastores)
if err != nil {
glog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err)
klog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err)
return nil, "", err
}
compatibleHubs := compatibilityResult.CompatibleDatastores()
@ -114,7 +114,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Dat
}
// Return an error if there are no compatible datastores.
if len(compatibleHubs) < 1 {
glog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID)
klog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID)
return nil, localizedMessagesForNotCompatibleDatastores, fmt.Errorf("No compatible datastores found that satisfy the storage policy requirements")
}
return compatibleDatastoreList, localizedMessagesForNotCompatibleDatastores, nil
@ -138,7 +138,7 @@ func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context,
}
res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
if err != nil {
glog.Errorf("Error occurred for CheckRequirements call. err: %+v", err)
klog.Errorf("Error occurred for CheckRequirements call. err: %+v", err)
return nil, err
}
return res, nil
@ -162,7 +162,7 @@ func getDsMorNameMap(ctx context.Context, datastores []*DatastoreInfo) map[strin
if err == nil {
dsMorNameMap[ds.Reference().Value] = dsObjectName
} else {
glog.Errorf("Error occurred while getting datastore object name. err: %+v", err)
klog.Errorf("Error occurred while getting datastore object name. err: %+v", err)
}
}
return dsMorNameMap

View File

@ -22,12 +22,12 @@ import (
"regexp"
"strings"
"github.com/golang/glog"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// IsNotFound return true if err is NotFoundError or DefaultNotFoundError
@ -140,7 +140,7 @@ func GetPathFromVMDiskPath(vmDiskPath string) string {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath)
return ""
}
return datastorePathObj.Path
@ -151,7 +151,7 @@ func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
glog.Errorf("Failed to parse volPath: %s", vmDiskPath)
klog.Errorf("Failed to parse volPath: %s", vmDiskPath)
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
}
return datastorePathObj, nil

View File

@ -22,12 +22,12 @@ import (
"strings"
"time"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
)
// VirtualMachine extends the govmomi VirtualMachine object
@ -52,7 +52,7 @@ func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) (
func (vm *VirtualMachine) DeleteVM(ctx context.Context) error {
destroyTask, err := vm.Destroy(ctx)
if err != nil {
glog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err)
return err
}
return destroyTask.Wait(ctx)
@ -69,7 +69,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol
vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath)
attached, err := vm.IsDiskAttached(ctx, vmDiskPath)
if err != nil {
glog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err)
klog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err)
return "", err
}
// If disk is already attached, return the disk UUID
@ -81,31 +81,31 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol
if volumeOptions.StoragePolicyName != "" {
pbmClient, err := NewPbmClient(ctx, vm.Client())
if err != nil {
glog.Errorf("Error occurred while creating new pbmClient. err: %+v", err)
klog.Errorf("Error occurred while creating new pbmClient. err: %+v", err)
return "", err
}
volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName)
if err != nil {
glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err)
klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err)
return "", err
}
}
dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy)
if err != nil {
glog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err)
klog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err)
return "", err
}
// If disk is not attached, create a disk spec for disk to be attached to the VM.
disk, newSCSIController, err := vm.CreateDiskSpec(ctx, vmDiskPath, dsObj, volumeOptions)
if err != nil {
glog.Errorf("Error occurred while creating disk spec. err: %+v", err)
klog.Errorf("Error occurred while creating disk spec. err: %+v", err)
return "", err
}
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
return "", err
}
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
@ -125,7 +125,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol
task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec)
if err != nil {
RecordvSphereMetric(APIAttachVolume, requestTime, err)
glog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
klog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
if newSCSIController != nil {
vm.deleteController(ctx, newSCSIController, vmDevices)
}
@ -134,7 +134,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol
err = task.Wait(ctx)
RecordvSphereMetric(APIAttachVolume, requestTime, err)
if err != nil {
glog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
klog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err)
if newSCSIController != nil {
vm.deleteController(ctx, newSCSIController, vmDevices)
}
@ -144,7 +144,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol
// Once disk is attached, get the disk UUID.
diskUUID, err := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath)
if err != nil {
glog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err)
klog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err)
vm.DetachDisk(ctx, vmDiskPath)
if newSCSIController != nil {
vm.deleteController(ctx, newSCSIController, vmDevices)
@ -159,11 +159,11 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err
vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath)
device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath)
if err != nil {
glog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath)
klog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath)
return err
}
if device == nil {
glog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
klog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
return fmt.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath)
}
// Detach disk from VM
@ -171,7 +171,7 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err
err = vm.RemoveDevice(ctx, true, device)
RecordvSphereMetric(APIDetachVolume, requestTime, err)
if err != nil {
glog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err)
klog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err)
return err
}
return nil
@ -181,7 +181,7 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err
func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.ResourcePool, error) {
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"resourcePool"})
if err != nil {
glog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
return object.NewResourcePool(vm.Client(), vmMoList[0].ResourcePool.Reference()), nil
@ -192,7 +192,7 @@ func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.Resource
func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) {
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"})
if err != nil {
glog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err)
klog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err)
return false, err
}
if vmMoList[0].Summary.Runtime.PowerState == ActivePowerState {
@ -206,14 +206,14 @@ func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) {
func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) {
host, err := vm.HostSystem(ctx)
if err != nil {
glog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
var hostSystemMo mo.HostSystem
s := object.NewSearchIndex(vm.Client())
err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo)
if err != nil {
glog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err)
klog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err)
return nil, err
}
var dsRefList []types.ManagedObjectReference
@ -226,11 +226,11 @@ func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Da
properties := []string{DatastoreInfoProperty}
err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList)
if err != nil {
glog.Errorf("Failed to get Datastore managed objects from datastore objects."+
klog.Errorf("Failed to get Datastore managed objects from datastore objects."+
" dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err)
return nil, err
}
glog.V(9).Infof("Result dsMoList: %+v", dsMoList)
klog.V(9).Infof("Result dsMoList: %+v", dsMoList)
var dsObjList []*DatastoreInfo
for _, dsMo := range dsMoList {
dsObjList = append(dsObjList,
@ -247,7 +247,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d
var newSCSIController types.BaseVirtualDevice
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices. err: %+v", err)
klog.Errorf("Failed to retrieve VM devices. err: %+v", err)
return nil, nil, err
}
// find SCSI controller of particular type from VM devices
@ -256,20 +256,20 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d
if scsiController == nil {
newSCSIController, err = vm.createAndAttachSCSIController(ctx, volumeOptions.SCSIControllerType)
if err != nil {
glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err)
return nil, nil, err
}
// Get VM device list
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices. err: %v", err)
klog.Errorf("Failed to retrieve VM devices. err: %v", err)
return nil, nil, err
}
// verify scsi controller in virtual machine
scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType)
scsiController = getAvailableSCSIController(scsiControllersOfRequiredType)
if scsiController == nil {
glog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
klog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
// attempt clean up of scsi controller
vm.deleteController(ctx, newSCSIController, vmDevices)
return nil, nil, fmt.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType)
@ -278,7 +278,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d
disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath)
unitNumber, err := getNextUnitNumber(vmDevices, scsiController)
if err != nil {
glog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err)
klog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err)
return nil, nil, err
}
*disk.UnitNumber = unitNumber
@ -307,7 +307,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d
func (vm *VirtualMachine) GetVirtualDiskPath(ctx context.Context) (string, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return "", err
}
// filter vm devices to retrieve device for the given vmdk file identified by disk path
@ -327,18 +327,18 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis
// Get VM device list
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
allSCSIControllers := getSCSIControllers(vmDevices)
if len(allSCSIControllers) >= SCSIControllerLimit {
// we reached the maximum number of controllers we can attach
glog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
klog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit)
}
newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType)
if err != nil {
glog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
@ -349,7 +349,7 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis
// add the scsi controller to virtual machine
err = vm.AddDevice(context.TODO(), newSCSIController)
if err != nil {
glog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err)
klog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err)
// attempt clean up of scsi controller
vm.deleteController(ctx, newSCSIController, vmDevices)
return nil, err
@ -361,7 +361,7 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis
func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
@ -371,7 +371,7 @@ func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath s
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
glog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
klog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
}
}
@ -396,7 +396,7 @@ func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice
device := controllerDeviceList[len(controllerDeviceList)-1]
err := vm.RemoveDevice(ctx, true, device)
if err != nil {
glog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err)
klog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err)
return err
}
return nil

View File

@ -19,7 +19,7 @@ package vclib
import (
"strings"
"github.com/golang/glog"
"k8s.io/klog"
)
// VolumeOptions specifies various options for a volume.
@ -38,7 +38,7 @@ type VolumeOptions struct {
var (
// DiskFormatValidType specifies the valid disk formats
DiskFormatValidType = map[string]string{
ThinDiskType: ThinDiskType,
ThinDiskType: ThinDiskType,
strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType,
strings.ToLower(ZeroedThickDiskType): PreallocatedDiskType,
}
@ -59,7 +59,7 @@ func DiskformatValidOptions() string {
// CheckDiskFormatSupported checks if the diskFormat is valid
func CheckDiskFormatSupported(diskFormat string) bool {
if DiskFormatValidType[diskFormat] == "" {
glog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions())
klog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions())
return false
}
return true
@ -82,7 +82,7 @@ func CheckControllerSupported(ctrlType string) bool {
return true
}
}
glog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
klog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions())
return false
}

View File

@ -22,6 +22,7 @@ import (
"fmt"
"io"
"net"
"net/url"
"os"
"path"
"path/filepath"
@ -32,16 +33,18 @@ import (
"gopkg.in/gcfg.v1"
"github.com/golang/glog"
"github.com/vmware/govmomi/vapi/rest"
"github.com/vmware/govmomi/vapi/tags"
"github.com/vmware/govmomi/vim25/mo"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
"k8s.io/kubernetes/pkg/controller"
)
// VSphere Cloud Provider constants
@ -103,6 +106,8 @@ type VirtualCenterConfig struct {
Datacenters string `gcfg:"datacenters"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
// Thumbprint of the VCenter's certificate thumbprint
Thumbprint string `gcfg:"thumbprint"`
}
// Structure that represents the content of vsphere.conf file.
@ -121,6 +126,11 @@ type VSphereConfig struct {
VCenterPort string `gcfg:"port"`
// True if vCenter uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
// Specifies the path to a CA certificate in PEM format. Optional; if not
// configured, the system's CA certificates will be used.
CAFile string `gcfg:"ca-file"`
// Thumbprint of the VCenter's certificate thumbprint
Thumbprint string `gcfg:"thumbprint"`
// Datacenter in which VMs are located.
// Deprecated. Use "datacenters" instead.
Datacenter string `gcfg:"datacenter"`
@ -134,7 +144,7 @@ type VSphereConfig struct {
WorkingDir string `gcfg:"working-dir"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
// Deprecated as the virtual machines will be automatically discovered.
// Is required on the controller-manager if it does not run on a VMware machine
// VMUUID is the VM Instance UUID of virtual machine which can be retrieved from instanceUuid
// property in VmConfigInfo, or also set as vc.uuid in VMX file.
// If not set, will be fetched from the machine via sysfs (requires root)
@ -170,6 +180,12 @@ type VSphereConfig struct {
DefaultDatastore string `gcfg:"default-datastore"`
ResourcePoolPath string `gcfg:"resourcepool-path"`
}
// Tag categories and tags which correspond to "built-in node labels: zones and region"
Labels struct {
Zone string `gcfg:"zone"`
Region string `gcfg:"region"`
}
}
type Volumes interface {
@ -225,7 +241,7 @@ func init() {
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) {
func (vs *VSphere) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
}
// Initialize Node Informers
@ -248,13 +264,13 @@ func (vs *VSphere) SetInformers(informerFactory informers.SharedInformerFactory)
// Only on controller node it is required to register listeners.
// Register callbacks for node updates
glog.V(4).Infof("Setting up node informers for vSphere Cloud Provider")
klog.V(4).Infof("Setting up node informers for vSphere Cloud Provider")
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: vs.NodeAdded,
DeleteFunc: vs.NodeDeleted,
})
glog.V(4).Infof("Node informers in vSphere cloud provider initialized")
klog.V(4).Infof("Node informers in vSphere cloud provider initialized")
}
@ -264,12 +280,12 @@ func newWorkerNode() (*VSphere, error) {
vs := VSphere{}
vs.hostName, err = os.Hostname()
if err != nil {
glog.Errorf("Failed to get hostname. err: %+v", err)
klog.Errorf("Failed to get hostname. err: %+v", err)
return nil, err
}
vs.vmUUID, err = GetVMUUID()
if err != nil {
glog.Errorf("Failed to get uuid. err: %+v", err)
klog.Errorf("Failed to get uuid. err: %+v", err)
return nil, err
}
return &vs, nil
@ -280,18 +296,18 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance
isSecretInfoProvided := true
if cfg.Global.SecretName == "" || cfg.Global.SecretNamespace == "" {
glog.Warningf("SecretName and/or SecretNamespace is not provided. " +
klog.Warningf("SecretName and/or SecretNamespace is not provided. " +
"VCP will use username and password from config file")
isSecretInfoProvided = false
}
if isSecretInfoProvided {
if cfg.Global.User != "" {
glog.Warning("Global.User and Secret info provided. VCP will use secret to get credentials")
klog.Warning("Global.User and Secret info provided. VCP will use secret to get credentials")
cfg.Global.User = ""
}
if cfg.Global.Password != "" {
glog.Warning("Global.Password and Secret info provided. VCP will use secret to get credentials")
klog.Warning("Global.Password and Secret info provided. VCP will use secret to get credentials")
cfg.Global.Password = ""
}
}
@ -299,28 +315,28 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance
// Check if the vsphere.conf is in old format. In this
// format the cfg.VirtualCenter will be nil or empty.
if cfg.VirtualCenter == nil || len(cfg.VirtualCenter) == 0 {
glog.V(4).Infof("Config is not per virtual center and is in old format.")
klog.V(4).Infof("Config is not per virtual center and is in old format.")
if !isSecretInfoProvided {
if cfg.Global.User == "" {
glog.Error("Global.User is empty!")
klog.Error("Global.User is empty!")
return nil, ErrUsernameMissing
}
if cfg.Global.Password == "" {
glog.Error("Global.Password is empty!")
klog.Error("Global.Password is empty!")
return nil, ErrPasswordMissing
}
}
if cfg.Global.WorkingDir == "" {
glog.Error("Global.WorkingDir is empty!")
klog.Error("Global.WorkingDir is empty!")
return nil, errors.New("Global.WorkingDir is empty!")
}
if cfg.Global.VCenterIP == "" {
glog.Error("Global.VCenterIP is empty!")
klog.Error("Global.VCenterIP is empty!")
return nil, errors.New("Global.VCenterIP is empty!")
}
if cfg.Global.Datacenter == "" {
glog.Error("Global.Datacenter is empty!")
klog.Error("Global.Datacenter is empty!")
return nil, errors.New("Global.Datacenter is empty!")
}
cfg.Workspace.VCenterIP = cfg.Global.VCenterIP
@ -334,6 +350,7 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance
VCenterPort: cfg.Global.VCenterPort,
Datacenters: cfg.Global.Datacenter,
RoundTripperCount: cfg.Global.RoundTripperCount,
Thumbprint: cfg.Global.Thumbprint,
}
// Note: If secrets info is provided username and password will be populated
@ -345,7 +362,10 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: vcConfig.RoundTripperCount,
Port: vcConfig.VCenterPort,
CACert: cfg.Global.CAFile,
Thumbprint: cfg.Global.Thumbprint,
}
vsphereIns := VSphereInstance{
conn: &vSphereConn,
cfg: &vcConfig,
@ -355,14 +375,14 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance
if cfg.Workspace.VCenterIP == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
msg := fmt.Sprintf("All fields in workspace are mandatory."+
" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
glog.Error(msg)
klog.Error(msg)
return nil, errors.New(msg)
}
for vcServer, vcConfig := range cfg.VirtualCenter {
glog.V(4).Infof("Initializing vc server %s", vcServer)
klog.V(4).Infof("Initializing vc server %s", vcServer)
if vcServer == "" {
glog.Error("vsphere.conf does not have the VirtualCenter IP address specified")
klog.Error("vsphere.conf does not have the VirtualCenter IP address specified")
return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
}
@ -370,24 +390,24 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance
if vcConfig.User == "" {
vcConfig.User = cfg.Global.User
if vcConfig.User == "" {
glog.Errorf("vcConfig.User is empty for vc %s!", vcServer)
klog.Errorf("vcConfig.User is empty for vc %s!", vcServer)
return nil, ErrUsernameMissing
}
}
if vcConfig.Password == "" {
vcConfig.Password = cfg.Global.Password
if vcConfig.Password == "" {
glog.Errorf("vcConfig.Password is empty for vc %s!", vcServer)
klog.Errorf("vcConfig.Password is empty for vc %s!", vcServer)
return nil, ErrPasswordMissing
}
}
} else {
if vcConfig.User != "" {
glog.Warningf("vcConfig.User for server %s and Secret info provided. VCP will use secret to get credentials", vcServer)
klog.Warningf("vcConfig.User for server %s and Secret info provided. VCP will use secret to get credentials", vcServer)
vcConfig.User = ""
}
if vcConfig.Password != "" {
glog.Warningf("vcConfig.Password for server %s and Secret info provided. VCP will use secret to get credentials", vcServer)
klog.Warningf("vcConfig.Password for server %s and Secret info provided. VCP will use secret to get credentials", vcServer)
vcConfig.Password = ""
}
}
@ -417,6 +437,8 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: vcConfig.RoundTripperCount,
Port: vcConfig.VCenterPort,
CACert: cfg.Global.CAFile,
Thumbprint: vcConfig.Thumbprint,
}
vsphereIns := VSphereInstance{
conn: &vSphereConn,
@ -439,13 +461,17 @@ func newControllerNode(cfg VSphereConfig) (*VSphere, error) {
}
vs.hostName, err = os.Hostname()
if err != nil {
glog.Errorf("Failed to get hostname. err: %+v", err)
klog.Errorf("Failed to get hostname. err: %+v", err)
return nil, err
}
vs.vmUUID, err = getVMUUID()
if err != nil {
glog.Errorf("Failed to get uuid. err: %+v", err)
return nil, err
if cfg.Global.VMUUID != "" {
vs.vmUUID = cfg.Global.VMUUID
} else {
vs.vmUUID, err = getVMUUID()
if err != nil {
klog.Errorf("Failed to get uuid. err: %+v", err)
return nil, err
}
}
runtime.SetFinalizer(vs, logout)
return vs, nil
@ -461,7 +487,7 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) {
if cfg.Disk.SCSIControllerType == "" {
cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType
} else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) {
glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType)
klog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType)
return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'")
}
if cfg.Global.WorkingDir != "" {
@ -493,11 +519,8 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) {
func logout(vs *VSphere) {
for _, vsphereIns := range vs.vsphereInstanceMap {
if vsphereIns.conn.Client != nil {
vsphereIns.conn.Logout(context.TODO())
}
vsphereIns.conn.Logout(context.TODO())
}
}
// Instances returns an implementation of Instances for vSphere.
@ -509,13 +532,13 @@ func getLocalIP() ([]v1.NodeAddress, error) {
addrs := []v1.NodeAddress{}
ifaces, err := net.Interfaces()
if err != nil {
glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err)
klog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err)
return nil, err
}
for _, i := range ifaces {
localAddrs, err := i.Addrs()
if err != nil {
glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err)
klog.Warningf("Failed to extract addresses for NodeAddresses - %v", err)
} else {
for _, addr := range localAddrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
@ -535,7 +558,7 @@ func getLocalIP() ([]v1.NodeAddress, error) {
},
)
}
glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
klog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
}
}
}
@ -547,7 +570,7 @@ func getLocalIP() ([]v1.NodeAddress, error) {
func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInstance, error) {
vsphereIns, err := vs.nodeManager.GetVSphereInstance(nodeName)
if err != nil {
glog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName)
klog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName)
return nil, err
}
return &vsphereIns, nil
@ -556,13 +579,13 @@ func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInsta
func (vs *VSphere) getVSphereInstanceForServer(vcServer string, ctx context.Context) (*VSphereInstance, error) {
vsphereIns, ok := vs.vsphereInstanceMap[vcServer]
if !ok {
glog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer)
klog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer)
return nil, errors.New(fmt.Sprintf("Cannot find node %q in vsphere configuration map", vcServer))
}
// Ensure client is logged in and session is valid
err := vs.nodeManager.vcConnect(ctx, vsphereIns)
if err != nil {
glog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err)
klog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err)
return nil, err
}
@ -582,7 +605,13 @@ func (vs *VSphere) getVMFromNodeName(ctx context.Context, nodeName k8stypes.Node
func (vs *VSphere) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
// Get local IP addresses if node is local node
if vs.hostName == convertToString(nodeName) {
return getLocalIP()
addrs, err := getLocalIP()
if err != nil {
return nil, err
}
// add the hostname address
v1helper.AddToNodeAddresses(&addrs, v1.NodeAddress{Type: v1.NodeHostName, Address: vs.hostName})
return addrs, nil
}
if vs.cfg == nil {
@ -606,12 +635,12 @@ func (vs *VSphere) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return nil, err
}
vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"})
if err != nil {
glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err)
klog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err)
return nil, err
}
// retrieve VM's ip(s)
@ -663,23 +692,11 @@ func convertToK8sType(vmName string) k8stypes.NodeName {
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
var nodeName string
nodes, err := vs.nodeManager.GetNodeDetails()
nodeName, err := vs.GetNodeNameFromProviderID(providerID)
if err != nil {
glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
klog.Errorf("Error while getting nodename for providerID %s", providerID)
return false, err
}
for _, node := range nodes {
// ProviderID is UUID for nodes v1.9.3+
if node.VMUUID == GetUUIDFromProviderID(providerID) || node.NodeName == providerID {
nodeName = node.NodeName
break
}
}
if nodeName == "" {
msg := fmt.Sprintf("Error while obtaining Kubernetes nodename for providerID %s.", providerID)
return false, errors.New(msg)
}
_, err = vs.InstanceID(ctx, convertToK8sType(nodeName))
if err == nil {
return true, nil
@ -690,7 +707,31 @@ func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID st
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (vs *VSphere) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
return false, cloudprovider.NotImplemented
nodeName, err := vs.GetNodeNameFromProviderID(providerID)
if err != nil {
klog.Errorf("Error while getting nodename for providerID %s", providerID)
return false, err
}
vsi, err := vs.getVSphereInstance(convertToK8sType(nodeName))
if err != nil {
return false, err
}
// Ensure client is logged in and session is valid
if err := vs.nodeManager.vcConnect(ctx, vsi); err != nil {
return false, err
}
vm, err := vs.getVMFromNodeName(ctx, convertToK8sType(nodeName))
if err != nil {
klog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeName, err)
return false, err
}
isActive, err := vm.IsActive(ctx)
if err != nil {
klog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeName, err)
return false, err
}
return !isActive, nil
}
// InstanceID returns the cloud provider ID of the node with the specified Name.
@ -720,31 +761,27 @@ func (vs *VSphere) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) (
}
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
return "", cloudprovider.InstanceNotFound
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return "", err
}
isActive, err := vm.IsActive(ctx)
if err != nil {
glog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err)
klog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err)
return "", err
}
if isActive {
return vs.vmUUID, nil
}
glog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState)
klog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState)
return "", cloudprovider.InstanceNotFound
}
instanceID, err := instanceIDInternal()
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if vclib.IsManagedObjectNotFoundError(err) {
err = vs.nodeManager.RediscoverNode(nodeName)
if err == nil {
glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName))
klog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName))
instanceID, err = instanceIDInternal()
} else if err == vclib.ErrNoVMFound {
return "", cloudprovider.InstanceNotFound
@ -780,10 +817,13 @@ func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return nil, false
}
// Zones returns an implementation of Zones for Google vSphere.
// Zones returns an implementation of Zones for vSphere.
func (vs *VSphere) Zones() (cloudprovider.Zones, bool) {
glog.V(1).Info("The vSphere cloud provider does not support zones")
return nil, false
if vs.cfg == nil {
klog.V(1).Info("The vSphere cloud provider does not support zones")
return nil, false
}
return vs, true
}
// Routes returns a false since the interface is not supported for vSphere.
@ -812,13 +852,13 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return "", err
}
diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyName: storagePolicyName})
if err != nil {
glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err)
klog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err)
return "", err
}
return diskUUID, nil
@ -826,33 +866,20 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
requestTime := time.Now()
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if vclib.IsManagedObjectNotFoundError(err) {
err = vs.nodeManager.RediscoverNode(nodeName)
if err == nil {
glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
klog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err)
klog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err)
}
}
}
glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
klog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
func (vs *VSphere) retry(nodeName k8stypes.NodeName, err error) (bool, error) {
isManagedObjectNotFoundError := false
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) {
isManagedObjectNotFoundError = true
glog.V(4).Infof("error %q ManagedObjectNotFound for node %q", err, convertToString(nodeName))
err = vs.nodeManager.RediscoverNode(nodeName)
}
}
return isManagedObjectNotFoundError, err
}
// DetachDisk detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error {
detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error {
@ -866,7 +893,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
if err != nil {
// If node doesn't exist, disk is already detached from node.
if err == vclib.ErrNoVMFound {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
klog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
return nil
}
return err
@ -880,16 +907,16 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
if err != nil {
// If node doesn't exist, disk is already detached from node.
if err == vclib.ErrNoVMFound {
glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
klog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath)
return nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err)
return err
}
err = vm.DetachDisk(ctx, volPath)
if err != nil {
glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err)
klog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err)
return err
}
return nil
@ -897,9 +924,8 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
requestTime := time.Now()
err := detachDiskInternal(volPath, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if vclib.IsManagedObjectNotFoundError(err) {
err = vs.nodeManager.RediscoverNode(nodeName)
if err == nil {
err = detachDiskInternal(volPath, nodeName)
}
@ -934,30 +960,29 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
vm, err := vs.getVMFromNodeName(ctx, nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath)
klog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath)
// make the disk as detached and return false without error.
return false, nil
}
glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err)
klog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err)
return false, err
}
volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath)
attached, err := vm.IsDiskAttached(ctx, volPath)
if err != nil {
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q",
klog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
vSphereInstance)
}
glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath)
klog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath)
return attached, err
}
requestTime := time.Now()
isAttached, err := diskIsAttachedInternal(volPath, nodeName)
if err != nil {
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if vclib.IsManagedObjectNotFoundError(err) {
err = vs.nodeManager.RediscoverNode(nodeName)
if err == vclib.ErrNoVMFound {
isAttached, err = false, nil
} else if err == nil {
@ -999,7 +1024,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
for nodeName := range nodeVolumes {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
glog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err)
klog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err)
return nodesToRetry, err
}
VC_DC := nodeInfo.vcServer + nodeInfo.dataCenter.String()
@ -1017,7 +1042,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
globalErrMutex.Lock()
globalErr = err
globalErrMutex.Unlock()
glog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err)
klog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err)
}
}
nodesToRetryMutex.Lock()
@ -1040,7 +1065,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
return nodesToRetry, nil
}
glog.V(4).Info("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
klog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -1053,7 +1078,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
vmVolumes, err := vs.convertVolPathsToDevicePaths(ctx, nodeVolumes)
if err != nil {
glog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
klog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
return nil, err
}
attached := make(map[string]map[string]bool)
@ -1069,10 +1094,10 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
err = vs.nodeManager.RediscoverNode(nodeName)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.V(4).Infof("node %s not found. err: %+v", nodeName, err)
klog.V(4).Infof("node %s not found. err: %+v", nodeName, err)
continue
}
glog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err)
klog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err)
return nil, err
}
remainingNodesVolumes[nodeName] = nodeVolumes[nodeName]
@ -1082,7 +1107,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
if len(remainingNodesVolumes) != 0 {
nodesToRetry, err = disksAreAttach(ctx, remainingNodesVolumes, attached, true)
if err != nil || len(nodesToRetry) != 0 {
glog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err)
klog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err)
return nil, err
}
}
@ -1091,7 +1116,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
disksAttached[convertToK8sType(nodeName)] = volPaths
}
}
glog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached)
klog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached)
return disksAttached, nil
}
requestTime := time.Now()
@ -1105,7 +1130,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
// return value will be [DatastoreCluster/sharedVmfs-0] kubevols/<volume-name>.vmdk
// else return value will be [sharedVmfs-0] kubevols/<volume-name>.vmdk
func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions)
klog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions)
createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
var datastore string
// If datastore not specified, then use default datastore
@ -1135,21 +1160,21 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
// This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime.
cleanUpRoutineInitLock.Lock()
if !cleanUpRoutineInitialized {
glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's")
klog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's")
go vs.cleanUpDummyVMs(DummyVMPrefixName)
cleanUpRoutineInitialized = true
}
cleanUpRoutineInitLock.Unlock()
vmOptions, err = vs.setVMOptions(ctx, dc, vs.cfg.Workspace.ResourcePoolPath)
if err != nil {
glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err)
klog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err)
return "", err
}
}
if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" {
datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager)
if err != nil {
glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err)
klog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err)
return "", err
}
} else {
@ -1157,7 +1182,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
// if the given datastore is a shared datastore across all node VMs.
sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager)
if err != nil {
glog.Errorf("Failed to get shared datastore: %+v", err)
klog.Errorf("Failed to get shared datastore: %+v", err)
return "", err
}
found := false
@ -1180,7 +1205,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
err = ds.CreateDirectory(ctx, kubeVolsPath, false)
if err != nil && err != vclib.ErrFileAlreadyExist {
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
klog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk"
@ -1191,13 +1216,13 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
}
volumePath, err = disk.Create(ctx, ds)
if err != nil {
glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err)
klog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err)
return "", err
}
// Get the canonical path for the volume path.
canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err)
klog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err)
return "", err
}
if filepath.Base(datastore) != datastore {
@ -1209,13 +1234,13 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
requestTime := time.Now()
canonicalVolumePath, err = createVolumeInternal(volumeOptions)
vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err)
glog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath)
klog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath)
return canonicalVolumePath, err
}
// DeleteVolume deletes a volume given volume name.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath)
klog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath)
deleteVolumeInternal := func(vmDiskPath string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
@ -1235,7 +1260,7 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
}
err = disk.Delete(ctx, dc)
if err != nil {
glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err)
klog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err)
}
return err
}
@ -1254,11 +1279,11 @@ func (vs *VSphere) HasClusterID() bool {
func (vs *VSphere) NodeAdded(obj interface{}) {
node, ok := obj.(*v1.Node)
if node == nil || !ok {
glog.Warningf("NodeAdded: unrecognized object %+v", obj)
klog.Warningf("NodeAdded: unrecognized object %+v", obj)
return
}
glog.V(4).Infof("Node added: %+v", node)
klog.V(4).Infof("Node added: %+v", node)
vs.nodeManager.RegisterNode(node)
}
@ -1266,11 +1291,11 @@ func (vs *VSphere) NodeAdded(obj interface{}) {
func (vs *VSphere) NodeDeleted(obj interface{}) {
node, ok := obj.(*v1.Node)
if node == nil || !ok {
glog.Warningf("NodeDeleted: unrecognized object %+v", obj)
klog.Warningf("NodeDeleted: unrecognized object %+v", obj)
return
}
glog.V(4).Infof("Node deleted: %+v", node)
klog.V(4).Infof("Node deleted: %+v", node)
vs.nodeManager.UnRegisterNode(node)
}
@ -1280,3 +1305,112 @@ func (vs *VSphere) NodeManager() (nodeManager *NodeManager) {
}
return vs.nodeManager
}
func withTagsClient(ctx context.Context, connection *vclib.VSphereConnection, f func(c *rest.Client) error) error {
c := rest.NewClient(connection.Client)
user := url.UserPassword(connection.Username, connection.Password)
if err := c.Login(ctx, user); err != nil {
return err
}
defer c.Logout(ctx)
return f(c)
}
// GetZone implements Zones.GetZone
func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
nodeName, err := vs.CurrentNodeName(ctx, vs.hostName)
if err != nil {
klog.Errorf("Cannot get node name.")
return cloudprovider.Zone{}, err
}
zone := cloudprovider.Zone{}
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
klog.Errorf("Cannot connent to vsphere. Get zone for node %s error", nodeName)
return cloudprovider.Zone{}, err
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
klog.Errorf("Cannot connent to datacenter. Get zone for node %s error", nodeName)
return cloudprovider.Zone{}, err
}
vmHost, err := dc.GetHostByVMUUID(ctx, vs.vmUUID)
if err != nil {
klog.Errorf("Cannot find VM runtime host. Get zone for node %s error", nodeName)
return cloudprovider.Zone{}, err
}
pc := vsi.conn.Client.ServiceContent.PropertyCollector
err = withTagsClient(ctx, vsi.conn, func(c *rest.Client) error {
client := tags.NewManager(c)
// example result: ["Folder", "Datacenter", "Cluster", "Host"]
objects, err := mo.Ancestors(ctx, vsi.conn.Client, pc, *vmHost)
if err != nil {
return err
}
// search the hierarchy, example order: ["Host", "Cluster", "Datacenter", "Folder"]
for i := range objects {
obj := objects[len(objects)-1-i]
tags, err := client.ListAttachedTags(ctx, obj)
if err != nil {
klog.Errorf("Cannot list attached tags. Get zone for node %s: %s", nodeName, err)
return err
}
for _, value := range tags {
tag, err := client.GetTag(ctx, value)
if err != nil {
klog.Errorf("Get tag %s: %s", value, err)
return err
}
category, err := client.GetCategory(ctx, tag.CategoryID)
if err != nil {
klog.Errorf("Get category %s error", value)
return err
}
found := func() {
klog.Errorf("Found %q tag (%s) for %s attached to %s", category.Name, tag.Name, vs.vmUUID, obj.Reference())
}
switch {
case category.Name == vs.cfg.Labels.Zone:
zone.FailureDomain = tag.Name
found()
case category.Name == vs.cfg.Labels.Region:
zone.Region = tag.Name
found()
}
if zone.FailureDomain != "" && zone.Region != "" {
return nil
}
}
}
if zone.Region == "" {
if vs.cfg.Labels.Region != "" {
return fmt.Errorf("vSphere region category %q does not match any tags for node %s [%s]", vs.cfg.Labels.Region, nodeName, vs.vmUUID)
}
}
if zone.FailureDomain == "" {
if vs.cfg.Labels.Zone != "" {
return fmt.Errorf("vSphere zone category %q does not match any tags for node %s [%s]", vs.cfg.Labels.Zone, nodeName, vs.vmUUID)
}
}
return nil
})
if err != nil {
klog.Errorf("Get zone for node %s: %s", nodeName, err)
return cloudprovider.Zone{}, err
}
return zone, nil
}
func (vs *VSphere) GetZoneByNodeName(ctx context.Context, nodeName k8stypes.NodeName) (cloudprovider.Zone, error) {
return cloudprovider.Zone{}, cloudprovider.NotImplemented
}
func (vs *VSphere) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
return cloudprovider.Zone{}, cloudprovider.NotImplemented
}

View File

@ -19,6 +19,8 @@ package vsphere
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"log"
"os"
"strconv"
@ -26,13 +28,20 @@ import (
"testing"
lookup "github.com/vmware/govmomi/lookup/simulator"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/simulator"
"github.com/vmware/govmomi/simulator/vpx"
sts "github.com/vmware/govmomi/sts/simulator"
"github.com/vmware/govmomi/vapi/rest"
vapi "github.com/vmware/govmomi/vapi/simulator"
"github.com/vmware/govmomi/vapi/tags"
"github.com/vmware/govmomi/vim25/mo"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/fixtures"
)
// localhostCert was generated from crypto/tls/generate_cert.go with the following command:
@ -90,7 +99,15 @@ func configFromEnv() (cfg VSphereConfig, ok bool) {
}
// configFromSim starts a vcsim instance and returns config for use against the vcsim instance.
// The vcsim instance is configured with an empty tls.Config.
func configFromSim() (VSphereConfig, func()) {
return configFromSimWithTLS(new(tls.Config), true)
}
// configFromSimWithTLS starts a vcsim instance and returns config for use against the vcsim instance.
// The vcsim instance is configured with a tls.Config. The returned client
// config can be configured to allow/decline insecure connections.
func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (VSphereConfig, func()) {
var cfg VSphereConfig
model := simulator.VPX()
@ -99,17 +116,22 @@ func configFromSim() (VSphereConfig, func()) {
log.Fatal(err)
}
model.Service.TLS = new(tls.Config)
model.Service.TLS = tlsConfig
s := model.Service.NewServer()
// STS simulator
path, handler := sts.New(s.URL, vpx.Setting)
model.Service.ServeMux.Handle(path, handler)
// vAPI simulator
path, handler = vapi.New(s.URL, vpx.Setting)
model.Service.ServeMux.Handle(path, handler)
// Lookup Service simulator
model.Service.RegisterSDK(lookup.New())
cfg.Global.InsecureFlag = true
cfg.Global.InsecureFlag = insecureAllowed
cfg.Global.VCenterIP = s.URL.Hostname()
cfg.Global.VCenterPort = s.URL.Port()
cfg.Global.User = s.URL.User.Username()
@ -160,6 +182,7 @@ insecure-flag = true
datacenter = us-west
vm-uuid = 1234
vm-name = vmname
ca-file = /some/path/to/a/ca.pem
`))
if err != nil {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
@ -180,6 +203,10 @@ vm-name = vmname
if cfg.Global.VMName != "vmname" {
t.Errorf("incorrect vm-name: %s", cfg.Global.VMName)
}
if cfg.Global.CAFile != "/some/path/to/a/ca.pem" {
t.Errorf("incorrect ca-file: %s", cfg.Global.CAFile)
}
}
func TestNewVSphere(t *testing.T) {
@ -250,18 +277,213 @@ func TestVSphereLoginByToken(t *testing.T) {
vcInstance.conn.Logout(ctx)
}
func TestZones(t *testing.T) {
cfg := VSphereConfig{}
cfg.Global.Datacenter = "myDatacenter"
// Create vSphere configuration object
vs := VSphere{
cfg: &cfg,
func TestVSphereLoginWithCaCert(t *testing.T) {
caCertPEM, err := ioutil.ReadFile(fixtures.CaCertPath)
if err != nil {
t.Fatalf("Could not read ca cert from file")
}
_, ok := vs.Zones()
serverCert, err := tls.LoadX509KeyPair(fixtures.ServerCertPath, fixtures.ServerKeyPath)
if err != nil {
t.Fatalf("Could not load server cert and server key from files: %#v", err)
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(caCertPEM); !ok {
t.Fatalf("Cannot add CA to CAPool")
}
tlsConfig := tls.Config{
Certificates: []tls.Certificate{serverCert},
RootCAs: certPool,
}
cfg, cleanup := configFromSimWithTLS(&tlsConfig, false)
defer cleanup()
cfg.Global.CAFile = fixtures.CaCertPath
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
ctx := context.Background()
// Create vSphere client
vcInstance, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vcInstance.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
vcInstance.conn.Logout(ctx)
}
func TestZonesNoConfig(t *testing.T) {
_, ok := new(VSphere).Zones()
if ok {
t.Fatalf("Zones() returned true")
t.Fatalf("Zones() should return false without VCP configured")
}
}
func TestZones(t *testing.T) {
// Any context will do
ctx := context.Background()
// Create a vcsim instance
cfg, cleanup := configFromSim()
defer cleanup()
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
// Configure region and zone categories
vs.cfg.Labels.Region = "k8s-region"
vs.cfg.Labels.Zone = "k8s-zone"
// Create vSphere client
vsi, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vsi.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
// Lookup Datacenter for this test's Workspace
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
t.Fatal(err)
}
// Lookup VM's host where we'll attach tags
host, err := dc.GetHostByVMUUID(ctx, vs.vmUUID)
if err != nil {
t.Fatal(err)
}
// Property Collector instance
pc := property.DefaultCollector(vsi.conn.Client)
// Tag manager instance
m := tags.NewManager(rest.NewClient(vsi.conn.Client))
// Create a region category
regionID, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Region})
if err != nil {
t.Fatal(err)
}
// Create a region tag
regionID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: regionID, Name: "k8s-region-US"})
if err != nil {
t.Fatal(err)
}
// Create a zone category
zoneID, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Zone})
if err != nil {
t.Fatal(err)
}
// Create a zone tag
zoneID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: zoneID, Name: "k8s-zone-US-CA1"})
if err != nil {
t.Fatal(err)
}
// Create a random category
randomID, err := m.CreateCategory(ctx, &tags.Category{Name: "random-cat"})
if err != nil {
t.Fatal(err)
}
// Create a random tag
randomID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: randomID, Name: "random-tag"})
if err != nil {
t.Fatal(err)
}
// Attach a random tag to VM's host
if err = m.AttachTag(ctx, randomID, host); err != nil {
t.Fatal(err)
}
// Expecting Zones() to return true, indicating VCP supports the Zones interface
zones, ok := vs.Zones()
if !ok {
t.Fatalf("zones=%t", ok)
}
// GetZone() tests, covering error and success paths
tests := []struct {
name string // name of the test for logging
fail bool // expect GetZone() to return error if true
prep func() // prepare vCenter state for the test
}{
{"no tags", true, func() {
// no prep
}},
{"no zone tag", true, func() {
if err = m.AttachTag(ctx, regionID, host); err != nil {
t.Fatal(err)
}
}},
{"host tags set", false, func() {
if err = m.AttachTag(ctx, zoneID, host); err != nil {
t.Fatal(err)
}
}},
{"host tags removed", true, func() {
if err = m.DetachTag(ctx, zoneID, host); err != nil {
t.Fatal(err)
}
if err = m.DetachTag(ctx, regionID, host); err != nil {
t.Fatal(err)
}
}},
{"dc region, cluster zone", false, func() {
var h mo.HostSystem
if err = pc.RetrieveOne(ctx, host.Reference(), []string{"parent"}, &h); err != nil {
t.Fatal(err)
}
// Attach region tag to Datacenter
if err = m.AttachTag(ctx, regionID, dc); err != nil {
t.Fatal(err)
}
// Attach zone tag to Cluster
if err = m.AttachTag(ctx, zoneID, h.Parent); err != nil {
t.Fatal(err)
}
}},
}
for _, test := range tests {
test.prep()
zone, err := zones.GetZone(ctx)
if test.fail {
if err == nil {
t.Errorf("%s: expected error", test.name)
} else {
t.Logf("%s: expected error=%s", test.name, err)
}
} else {
if err != nil {
t.Errorf("%s: %s", test.name, err)
}
t.Logf("zone=%#v", zone)
}
}
}
@ -293,7 +515,7 @@ func TestInstances(t *testing.T) {
}
t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceID)
instanceID, err = i.InstanceID(context.TODO(), nonExistingVM)
_, err = i.InstanceID(context.TODO(), nonExistingVM)
if err == cloudprovider.InstanceNotFound {
t.Logf("VM %s was not found as expected\n", nonExistingVM)
} else if err == nil {
@ -306,6 +528,15 @@ func TestInstances(t *testing.T) {
if err != nil {
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", nodeName, err)
}
found := false
for _, addr := range addrs {
if addr.Type == v1.NodeHostName {
found = true
}
}
if found == false {
t.Fatalf("NodeAddresses does not report hostname, %s %s", nodeName, addrs)
}
t.Logf("Found NodeAddresses(%s) = %s\n", nodeName, addrs)
}
@ -366,6 +597,7 @@ func TestSecretVSphereConfig(t *testing.T) {
expectedUsername string
expectedPassword string
expectedError error
expectedThumbprints map[string]string
}{
{
testName: "Username and password with old configuration",
@ -535,6 +767,69 @@ func TestSecretVSphereConfig(t *testing.T) {
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "virtual centers with a thumbprint",
conf: `[Global]
server = global
user = user
password = password
datacenter = us-west
thumbprint = "thumbprint:global"
working-dir = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
expectedThumbprints: map[string]string{
"global": "thumbprint:global",
},
},
{
testName: "Multiple virtual centers with different thumbprints",
conf: `[Global]
user = user
password = password
datacenter = us-west
[VirtualCenter "0.0.0.0"]
thumbprint = thumbprint:0
[VirtualCenter "no_thumbprint"]
[VirtualCenter "1.1.1.1"]
thumbprint = thumbprint:1
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
expectedThumbprints: map[string]string{
"0.0.0.0": "thumbprint:0",
"1.1.1.1": "thumbprint:1",
},
},
{
testName: "Multiple virtual centers use the global CA cert",
conf: `[Global]
user = user
password = password
datacenter = us-west
ca-file = /some/path/to/my/trusted/ca.pem
[VirtualCenter "0.0.0.0"]
user = user
password = password
[VirtualCenter "1.1.1.1"]
user = user
password = password
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
}
for _, testcase := range testcases {
@ -564,9 +859,31 @@ func TestSecretVSphereConfig(t *testing.T) {
t.Fatalf("Expected password %s doesn't match actual password %s in config %s. error: %s",
testcase.expectedPassword, vsInstance.conn.Password, testcase.conf, err)
}
}
}
// Check, if all the expected thumbprints are configured
for instanceName, expectedThumbprint := range testcase.expectedThumbprints {
instanceConfig, ok := vs.vsphereInstanceMap[instanceName]
if !ok {
t.Fatalf("Could not find configuration for instance %s", instanceName)
}
if actualThumbprint := instanceConfig.conn.Thumbprint; actualThumbprint != expectedThumbprint {
t.Fatalf(
"Expected thumbprint for instance '%s' to be '%s', got '%s'",
instanceName, expectedThumbprint, actualThumbprint,
)
}
}
// Check, if all all connections are configured with the global CA certificate
if expectedCaPath := cfg.Global.CAFile; expectedCaPath != "" {
for name, instance := range vs.vsphereInstanceMap {
if actualCaPath := instance.conn.CACert; actualCaPath != expectedCaPath {
t.Fatalf(
"Expected CA certificate path for instance '%s' to be the globally configured one ('%s'), got '%s'",
name, expectedCaPath, actualCaPath,
)
}
}
}
}
}

View File

@ -19,24 +19,23 @@ package vsphere
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/golang/glog"
"github.com/vmware/govmomi/vim25"
"fmt"
"github.com/vmware/govmomi/vim25/mo"
"io/ioutil"
"k8s.io/klog"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
"k8s.io/kubernetes/pkg/util/version"
"path/filepath"
)
const (
@ -81,18 +80,6 @@ func getVSphereConfig() (*VSphereConfig, error) {
return &cfg, nil
}
func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection {
vSphereConn := &vclib.VSphereConnection{
Username: cfg.Global.User,
Password: cfg.Global.Password,
Hostname: cfg.Global.VCenterIP,
Insecure: cfg.Global.InsecureFlag,
RoundTripperCount: cfg.Global.RoundTripperCount,
Port: cfg.Global.VCenterPort,
}
return vSphereConn
}
// Returns the accessible datastores for the given node VM.
func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx)
@ -100,27 +87,27 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod
// Check if the node VM is not found which indicates that the node info in the node manager is stale.
// If so, rediscover the node and retry.
if vclib.IsManagedObjectNotFoundError(err) {
glog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName)
klog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName)
err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName))
if err == nil {
glog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName)
klog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName)
nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName))
if err != nil {
glog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail)
klog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail)
return nil, err
}
accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx)
if err != nil {
glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
return nil, err
}
} else {
glog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail)
klog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail)
return nil, err
}
} else {
glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail)
return nil, err
}
}
@ -131,22 +118,22 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod
func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
nodeVmDetails, err := nodeManager.GetNodeDetails()
if err != nil {
glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return nil, err
}
if len(nodeVmDetails) == 0 {
msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails)
glog.Error(msg)
klog.Error(msg)
return nil, fmt.Errorf(msg)
}
var sharedDatastores []*vclib.DatastoreInfo
for _, nodeVmDetail := range nodeVmDetails {
glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName)
klog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName)
accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager)
if err != nil {
if err == vclib.ErrNoVMFound {
glog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName)
klog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName)
continue
}
return nil, err
@ -161,19 +148,19 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter,
}
}
}
glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
klog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
if err != nil {
glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
klog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
return nil, err
}
glog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores)
klog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores)
return sharedDatastores, nil
}
func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo {
glog.V(9).Infof("list1: %+v", list1)
glog.V(9).Infof("list2: %+v", list2)
klog.V(9).Infof("list1: %+v", list1)
klog.V(9).Infof("list2: %+v", list2)
var sharedDs []*vclib.DatastoreInfo
for _, val1 := range list1 {
// Check if val1 is found in list2
@ -215,10 +202,10 @@ func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, share
if ok {
datastores = append(datastores, dsInfo)
} else {
glog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url)
klog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url)
}
}
glog.V(9).Infof("Datastore from endpoint VC: %+v", datastores)
klog.V(9).Infof("Datastore from endpoint VC: %+v", datastores)
return datastores, nil
}
@ -229,32 +216,32 @@ func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storag
}
storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
if err != nil {
glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
return "", err
}
sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager)
if err != nil {
glog.Errorf("Failed to get shared datastores. err: %+v", err)
klog.Errorf("Failed to get shared datastores. err: %+v", err)
return "", err
}
if len(sharedDs) == 0 {
msg := "No shared datastores found in the endpoint virtual center"
glog.Errorf(msg)
klog.Errorf(msg)
return "", errors.New(msg)
}
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs)
if err != nil {
glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
sharedDs, storagePolicyID, err)
return "", err
}
glog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores)
if err != nil {
glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
return "", err
}
glog.V(4).Infof("Most free datastore : %+s", datastore)
klog.V(4).Infof("Most free datastore : %+s", datastore)
return datastore, err
}
@ -264,7 +251,7 @@ func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resou
if err != nil {
return nil, err
}
glog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool)
klog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool)
folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
if err != nil {
return nil, err
@ -283,26 +270,30 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute)
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
if err != nil {
glog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err)
klog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err)
continue
}
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err)
klog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err)
continue
}
// Get the folder reference for global working directory where the dummy VM needs to be created.
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
if err != nil {
glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err)
klog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err)
continue
}
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
defer cleanUpDummyVMLock.Lock()
err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc)
if err != nil {
glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err)
cleanUpDummyVMs := func() {
cleanUpDummyVMLock.Lock()
defer cleanUpDummyVMLock.Unlock()
err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc)
if err != nil {
klog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err)
}
}
cleanUpDummyVMs()
}
}
@ -369,7 +360,7 @@ func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPa
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath)
if err != nil {
glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
klog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return "", err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
@ -396,7 +387,7 @@ func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes
for i, volPath := range volPaths {
deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath)
if err != nil {
glog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
klog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
return nil, err
}
volPaths[i] = deviceVolPath
@ -432,7 +423,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"})
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) && !retry {
glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList)
klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList)
// Property Collector Query failed
// VerifyVolumePaths per VM
for _, nodeName := range nodes {
@ -443,13 +434,13 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
devices, err := nodeInfo.vm.VirtualMachine.Device(ctx)
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) {
glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm)
klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm)
nodesToRetry = append(nodesToRetry, nodeName)
continue
}
return nodesToRetry, err
}
glog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached)
}
}
@ -459,14 +450,14 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
vmMoMap := make(map[string]mo.VirtualMachine)
for _, vmMo := range vmMoList {
if vmMo.Config == nil {
glog.Errorf("Config is not available for VM: %q", vmMo.Name)
klog.Errorf("Config is not available for VM: %q", vmMo.Name)
continue
}
glog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid))
klog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid))
vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo
}
glog.V(9).Infof("vmMoMap: +%v", vmMoMap)
klog.V(9).Infof("vmMoMap: +%v", vmMoMap)
for _, nodeName := range nodes {
node, err := vs.nodeManager.GetNode(nodeName)
@ -475,11 +466,11 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
}
nodeUUID, err := GetNodeUUID(&node)
if err != nil {
glog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
return nodesToRetry, err
}
nodeUUID = strings.ToLower(nodeUUID)
glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %s", nodeName, nodeUUID, vmMoMap)
klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap)
vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
}
return nodesToRetry, nil
@ -522,6 +513,27 @@ func (vs *VSphere) IsDummyVMPresent(vmName string) (bool, error) {
return isDummyVMPresent, nil
}
func (vs *VSphere) GetNodeNameFromProviderID(providerID string) (string, error) {
var nodeName string
nodes, err := vs.nodeManager.GetNodeDetails()
if err != nil {
klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return "", err
}
for _, node := range nodes {
// ProviderID is UUID for nodes v1.9.3+
if node.VMUUID == GetUUIDFromProviderID(providerID) || node.NodeName == providerID {
nodeName = node.NodeName
break
}
}
if nodeName == "" {
msg := fmt.Sprintf("Error while obtaining Kubernetes nodename for providerID %s.", providerID)
return "", errors.New(msg)
}
return nodeName, nil
}
func GetVMUUID() (string, error) {
id, err := ioutil.ReadFile(UUIDPath)
if err != nil {
@ -552,12 +564,12 @@ func GetUUIDFromProviderID(providerID string) string {
func IsUUIDSupportedNode(node *v1.Node) (bool, error) {
newVersion, err := version.ParseSemantic("v1.9.4")
if err != nil {
glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
return false, err
}
nodeVersion, err := version.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
if err != nil {
glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err)
return false, err
}
if nodeVersion.LessThan(newVersion) {
@ -569,7 +581,7 @@ func IsUUIDSupportedNode(node *v1.Node) (bool, error) {
func GetNodeUUID(node *v1.Node) (string, error) {
oldNode, err := IsUUIDSupportedNode(node)
if err != nil {
glog.Errorf("Failed to get node UUID for node %+v with error %v", node, err)
klog.Errorf("Failed to get node UUID for node %+v with error %v", node, err)
return "", err
}
if oldNode {