mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
Fresh dep ensure
This commit is contained in:
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
@ -37,33 +37,39 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -72,6 +78,7 @@ go_test(
|
||||
srcs = [
|
||||
"azure_backoff_test.go",
|
||||
"azure_cache_test.go",
|
||||
"azure_instances_test.go",
|
||||
"azure_loadbalancer_test.go",
|
||||
"azure_metrics_test.go",
|
||||
"azure_routes_test.go",
|
||||
@ -82,22 +89,24 @@ go_test(
|
||||
"azure_vmss_cache_test.go",
|
||||
"azure_vmss_test.go",
|
||||
"azure_wrap_test.go",
|
||||
"azure_zones_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
@ -1,16 +1,13 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
||||
reviewers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- justaugustus
|
||||
- karataliu
|
||||
- khenidak
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
@ -8,8 +8,8 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// AzureAuthConfig holds auth related part of cloud config
|
||||
@ -44,30 +44,41 @@ type AzureAuthConfig struct {
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
|
||||
// UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used.
|
||||
// More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview
|
||||
// For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true.
|
||||
UserAssignedIdentityID string `json:"userAssignedIdentityID" yaml:"userAssignedIdentityID"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
klog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
if len(config.UserAssignedIdentityID) > 0 {
|
||||
klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token")
|
||||
return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint,
|
||||
env.ServiceManagementEndpoint,
|
||||
config.UserAssignedIdentityID)
|
||||
}
|
||||
klog.V(4).Info("azure: using System Assigned MSI to retrieve access token")
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
@ -76,7 +87,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (
|
||||
}
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
|
267
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
267
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
@ -21,19 +21,28 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -52,6 +61,9 @@ const (
|
||||
|
||||
loadBalancerSkuBasic = "basic"
|
||||
loadBalancerSkuStandard = "standard"
|
||||
|
||||
externalResourceGroupLabel = "kubernetes.azure.com/resource-group"
|
||||
managedByAzureLabel = "kubernetes.azure.com/managed"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -59,6 +71,9 @@ var (
|
||||
defaultExcludeMasterFromStandardLB = true
|
||||
)
|
||||
|
||||
// Azure implements PVLabeler.
|
||||
var _ cloudprovider.PVLabeler = (*Cloud)(nil)
|
||||
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
// All fields are required unless otherwise specified
|
||||
type Config struct {
|
||||
@ -144,13 +159,37 @@ type Cloud struct {
|
||||
DisksClient DisksClient
|
||||
FileClient FileClient
|
||||
resourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadata
|
||||
metadata *InstanceMetadataService
|
||||
vmSet VMSet
|
||||
|
||||
// Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes.
|
||||
nodeCachesLock sync.Mutex
|
||||
// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
|
||||
// it is updated by the nodeInformer
|
||||
nodeZones map[string]sets.String
|
||||
// nodeResourceGroups holds nodes external resource groups
|
||||
nodeResourceGroups map[string]string
|
||||
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
|
||||
unmanagedNodes sets.String
|
||||
// nodeInformerSynced is for determining if the informer has synced.
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
|
||||
// routeCIDRsLock holds lock for routeCIDRs cache.
|
||||
routeCIDRsLock sync.Mutex
|
||||
// routeCIDRs holds cache for route CIDRs.
|
||||
routeCIDRs map[string]string
|
||||
|
||||
// Clients for vmss.
|
||||
VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient
|
||||
|
||||
// client for vm sizes list
|
||||
VirtualMachineSizesClient VirtualMachineSizesClient
|
||||
|
||||
kubeClient clientset.Interface
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
vmCache *timedCache
|
||||
lbCache *timedCache
|
||||
nsgCache *timedCache
|
||||
@ -216,11 +255,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
klog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPS,
|
||||
config.CloudProviderRateLimitBucket)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
klog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
}
|
||||
@ -238,8 +277,12 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
rateLimiterWriter: operationPollRateLimiterWrite,
|
||||
}
|
||||
az := Cloud{
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
nodeZones: map[string]sets.String{},
|
||||
nodeResourceGroups: map[string]string{},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
routeCIDRs: map[string]string{},
|
||||
|
||||
DisksClient: newAzDisksClient(azClientConfig),
|
||||
RoutesClient: newAzRoutesClient(azClientConfig),
|
||||
@ -251,6 +294,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
StorageAccountClient: newAzStorageAccountClient(azClientConfig),
|
||||
VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig),
|
||||
PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig),
|
||||
VirtualMachineSizesClient: newAzVirtualMachineSizesClient(azClientConfig),
|
||||
VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig),
|
||||
VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig),
|
||||
FileClient: &azureFileClient{env: *env},
|
||||
@ -277,14 +321,17 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
Duration: time.Duration(az.CloudProviderBackoffDuration) * time.Second,
|
||||
Jitter: az.CloudProviderBackoffJitter,
|
||||
}
|
||||
glog.V(2).Infof("Azure cloudprovider using retry backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
|
||||
klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
|
||||
az.CloudProviderBackoffRetries,
|
||||
az.CloudProviderBackoffExponent,
|
||||
az.CloudProviderBackoffDuration,
|
||||
az.CloudProviderBackoffJitter)
|
||||
}
|
||||
|
||||
az.metadata = NewInstanceMetadata()
|
||||
az.metadata, err = NewInstanceMetadataService(metadataURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if az.MaximumLoadBalancerRuleCount == 0 {
|
||||
az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
|
||||
@ -346,7 +393,12 @@ func parseConfig(configReader io.Reader) (*Config, error) {
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
|
||||
az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider")
|
||||
az.eventBroadcaster = record.NewBroadcaster()
|
||||
az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")})
|
||||
az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"})
|
||||
}
|
||||
|
||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
@ -424,3 +476,194 @@ func initDiskControllers(az *Cloud) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetInformers sets informers for Azure cloud provider.
|
||||
func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
|
||||
klog.Infof("Setting up informers for Azure cloud provider")
|
||||
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
node := obj.(*v1.Node)
|
||||
az.updateNodeCaches(nil, node)
|
||||
},
|
||||
UpdateFunc: func(prev, obj interface{}) {
|
||||
prevNode := prev.(*v1.Node)
|
||||
newNode := obj.(*v1.Node)
|
||||
if newNode.Labels[kubeletapis.LabelZoneFailureDomain] ==
|
||||
prevNode.Labels[kubeletapis.LabelZoneFailureDomain] {
|
||||
return
|
||||
}
|
||||
az.updateNodeCaches(prevNode, newNode)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
node, isNode := obj.(*v1.Node)
|
||||
// We can get DeletedFinalStateUnknown instead of *v1.Node here
|
||||
// and we need to handle that correctly.
|
||||
if !isNode {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
klog.Errorf("Received unexpected object: %v", obj)
|
||||
return
|
||||
}
|
||||
node, ok = deletedState.Obj.(*v1.Node)
|
||||
if !ok {
|
||||
klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
az.updateNodeCaches(node, nil)
|
||||
},
|
||||
})
|
||||
az.nodeInformerSynced = nodeInformer.HasSynced
|
||||
}
|
||||
|
||||
// updateNodeCaches updates local cache for node's zones and external resource groups.
|
||||
func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
|
||||
if prevNode != nil {
|
||||
// Remove from nodeZones cache.
|
||||
prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok && az.isAvailabilityZone(prevZone) {
|
||||
az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
|
||||
if az.nodeZones[prevZone].Len() == 0 {
|
||||
az.nodeZones[prevZone] = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from nodeResourceGroups cache.
|
||||
_, ok = prevNode.ObjectMeta.Labels[externalResourceGroupLabel]
|
||||
if ok {
|
||||
delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
// Remove from unmanagedNodes cache.
|
||||
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
|
||||
if ok && managed == "false" {
|
||||
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if newNode != nil {
|
||||
// Add to nodeZones cache.
|
||||
newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok && az.isAvailabilityZone(newZone) {
|
||||
if az.nodeZones[newZone] == nil {
|
||||
az.nodeZones[newZone] = sets.NewString()
|
||||
}
|
||||
az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
// Add to nodeResourceGroups cache.
|
||||
newRG, ok := newNode.ObjectMeta.Labels[externalResourceGroupLabel]
|
||||
if ok && len(newRG) > 0 {
|
||||
az.nodeResourceGroups[newNode.ObjectMeta.Name] = newRG
|
||||
}
|
||||
|
||||
// Add to unmanagedNodes cache.
|
||||
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
|
||||
if ok && managed == "false" {
|
||||
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetActiveZones returns all the zones in which k8s nodes are currently running.
|
||||
func (az *Cloud) GetActiveZones() (sets.String, error) {
|
||||
if az.nodeInformerSynced == nil {
|
||||
return nil, fmt.Errorf("Azure cloud provider doesn't have informers set")
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones")
|
||||
}
|
||||
|
||||
zones := sets.NewString()
|
||||
for zone, nodes := range az.nodeZones {
|
||||
if len(nodes) > 0 {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// GetLocation returns the location in which k8s cluster is currently running.
|
||||
func (az *Cloud) GetLocation() string {
|
||||
return az.Location
|
||||
}
|
||||
|
||||
// GetNodeResourceGroup gets resource group for given node.
|
||||
func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) {
|
||||
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
|
||||
if az.nodeInformerSynced == nil {
|
||||
return az.ResourceGroup, nil
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return "", fmt.Errorf("node informer is not synced when trying to GetNodeResourceGroup")
|
||||
}
|
||||
|
||||
// Return external resource group if it has been cached.
|
||||
if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok {
|
||||
return cachedRG, nil
|
||||
}
|
||||
|
||||
// Return resource group from cloud provider options.
|
||||
return az.ResourceGroup, nil
|
||||
}
|
||||
|
||||
// GetResourceGroups returns a set of resource groups that all nodes are running on.
|
||||
func (az *Cloud) GetResourceGroups() (sets.String, error) {
|
||||
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
|
||||
if az.nodeInformerSynced == nil {
|
||||
return sets.NewString(az.ResourceGroup), nil
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups")
|
||||
}
|
||||
|
||||
resourceGroups := sets.NewString(az.ResourceGroup)
|
||||
for _, rg := range az.nodeResourceGroups {
|
||||
resourceGroups.Insert(rg)
|
||||
}
|
||||
|
||||
return resourceGroups, nil
|
||||
}
|
||||
|
||||
// GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes).
|
||||
func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
|
||||
// Kubelet won't set az.nodeInformerSynced, always return nil.
|
||||
if az.nodeInformerSynced == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes")
|
||||
}
|
||||
|
||||
return sets.NewString(az.unmanagedNodes.List()...), nil
|
||||
}
|
||||
|
||||
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
|
||||
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
|
||||
labels := node.ObjectMeta.Labels
|
||||
if rg, ok := labels[externalResourceGroupLabel]; ok && rg != az.ResourceGroup {
|
||||
return true
|
||||
}
|
||||
|
||||
if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
149
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
149
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
@ -18,16 +18,18 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
|
||||
// requestBackoff if backoff is disabled in cloud provider it
|
||||
@ -45,6 +47,13 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
|
||||
return resourceRequestBackoff
|
||||
}
|
||||
|
||||
// Event creates a event for the specified object.
|
||||
func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) {
|
||||
if obj != nil && reason != "" {
|
||||
az.eventRecorder.Event(obj, eventtype, reason, message)
|
||||
}
|
||||
}
|
||||
|
||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
@ -55,10 +64,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
return true, cloudprovider.InstanceNotFound
|
||||
}
|
||||
if retryErr != nil {
|
||||
glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
|
||||
klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
|
||||
klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
@ -69,20 +78,20 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
}
|
||||
|
||||
// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) {
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry(resourceGroup string) ([]compute.VirtualMachine, error) {
|
||||
allNodes := []compute.VirtualMachine{}
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, az.ResourceGroup)
|
||||
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, resourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
klog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
resourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
klog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -99,24 +108,24 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
|
||||
var retryErr error
|
||||
ip, publicIP, retryErr = az.getIPForMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
|
||||
klog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
return ip, publicIP, err
|
||||
}
|
||||
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(service *v1.Service, sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateSecurityGroup", resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.nsgCache.Delete(*sg.Name)
|
||||
@ -126,14 +135,14 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
}
|
||||
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(service *v1.Service, lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
@ -143,7 +152,7 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
}
|
||||
|
||||
// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
|
||||
func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
func (az *Cloud) ListLBWithRetry(service *v1.Service) ([]network.LoadBalancer, error) {
|
||||
var allLBs []network.LoadBalancer
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
@ -153,12 +162,13 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
|
||||
allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", retryErr.Error())
|
||||
klog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
klog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -169,7 +179,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in the given resource group
|
||||
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
func (az *Cloud) ListPIPWithRetry(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
var allPIPs []network.PublicIPAddress
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
@ -179,12 +189,13 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
|
||||
|
||||
allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.Event(service, v1.EventTypeWarning, "ListPublicIPs", retryErr.Error())
|
||||
klog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
|
||||
klog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -195,48 +206,48 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return az.processHTTPRetryResponse(service, "CreateOrUpdatePublicIPAddress", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(service *v1.Service, nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
|
||||
return az.processHTTPRetryResponse(service, "CreateOrUpdateInterface", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
|
||||
func (az *Cloud) DeletePublicIPWithRetry(service *v1.Service, pipResourceGroup string, pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(service, "DeletePublicIPAddress", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
func (az *Cloud) DeleteLBWithRetry(service *v1.Service, lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
done, err := az.processHTTPRetryResponse(service, "DeleteLoadBalancer", resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after deleting
|
||||
az.lbCache.Delete(lbName)
|
||||
@ -252,7 +263,7 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -263,8 +274,8 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -275,20 +286,20 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.ResourceGroup, vmName, newVM)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM)
|
||||
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -296,40 +307,13 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualM
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetryAPIRequest(resp, err) {
|
||||
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior
|
||||
func shouldRetryAPIRequest(resp autorest.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
// HTTP 4xx or 5xx suggests we should retry
|
||||
if 399 < resp.StatusCode && resp.StatusCode < 600 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isSuccessHTTPResponse determines if the response from an HTTP request suggests success
|
||||
func isSuccessHTTPResponse(resp autorest.Response) bool {
|
||||
func isSuccessHTTPResponse(resp http.Response) bool {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true
|
||||
@ -352,16 +336,21 @@ func shouldRetryHTTPRequest(resp *http.Response, err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
func (az *Cloud) processHTTPRetryResponse(service *v1.Service, reason string, resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil && isSuccessHTTPResponse(*resp) {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
if err != nil {
|
||||
az.Event(service, v1.EventTypeWarning, reason, err.Error())
|
||||
klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err)
|
||||
} else {
|
||||
az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode))
|
||||
klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
@ -20,11 +20,9 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
func TestShouldRetry(t *testing.T) {
|
||||
func TestShouldRetryHTTPRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
@ -54,12 +52,10 @@ func TestShouldRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
resp := &http.Response{
|
||||
StatusCode: test.code,
|
||||
}
|
||||
res := shouldRetryAPIRequest(resp, test.err)
|
||||
res := shouldRetryHTTPRequest(resp, test.err)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
@ -86,10 +82,8 @@ func TestIsSuccessResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
resp := http.Response{
|
||||
StatusCode: test.code,
|
||||
}
|
||||
res := isSuccessHTTPResponse(resp)
|
||||
if res != test.expected {
|
||||
@ -99,6 +93,7 @@ func TestIsSuccessResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessRetryResponse(t *testing.T) {
|
||||
az := &Cloud{}
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
@ -132,12 +127,10 @@ func TestProcessRetryResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
resp := &http.Response{
|
||||
StatusCode: test.code,
|
||||
}
|
||||
res, err := processRetryResponse(resp, test.err)
|
||||
res, err := az.processHTTPRetryResponse(nil, "", resp, test.err)
|
||||
if res != test.stop {
|
||||
t.Errorf("expected: %v, saw: %v", test.stop, res)
|
||||
}
|
||||
|
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
@ -27,11 +27,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
|
||||
azstorage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"github.com/rubiojr/go-vhd/vhd"
|
||||
"k8s.io/klog"
|
||||
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
@ -67,7 +68,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
|
||||
// get accounts
|
||||
accounts, err := c.getAllStorageAccounts()
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
|
||||
klog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
|
||||
c.accounts = make(map[string]*storageAccountState)
|
||||
return &c, nil
|
||||
}
|
||||
@ -79,7 +80,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix)
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, string(defaultStorageAccountKind), c.common.resourceGroup, location, dedicatedDiskAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
@ -96,24 +97,24 @@ func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, lo
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
klog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
|
||||
glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
|
||||
klog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
|
||||
accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse vhd URI %v", err)
|
||||
}
|
||||
key, err := c.common.cloud.getStorageAccesskey(accountName)
|
||||
key, err := c.common.cloud.getStorageAccesskey(accountName, c.common.resourceGroup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
|
||||
}
|
||||
err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
|
||||
if err != nil {
|
||||
glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
|
||||
klog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseIDMissing) {
|
||||
// disk is still being used
|
||||
@ -122,7 +123,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error {
|
||||
}
|
||||
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
|
||||
}
|
||||
glog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
|
||||
klog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
|
||||
return nil
|
||||
|
||||
}
|
||||
@ -152,7 +153,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["createdby"] = "k8sAzureDataDisk"
|
||||
glog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
|
||||
klog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
|
||||
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
blob.Properties.ContentLength = vhdSize
|
||||
@ -184,7 +185,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC
|
||||
End: uint64(vhdSize - 1),
|
||||
}
|
||||
if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
|
||||
glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
|
||||
klog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
|
||||
vhdName, containerName, accountName, err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
@ -214,7 +215,7 @@ func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName str
|
||||
|
||||
//CreateBlobDisk : create a blob disk in a node
|
||||
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
|
||||
klog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
|
||||
|
||||
storageAccountName, err := c.findSANameForDisk(storageAccountType)
|
||||
if err != nil {
|
||||
@ -246,7 +247,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
_, ok := c.accounts[storageAccountName]
|
||||
if !ok {
|
||||
// the storage account is specified by user
|
||||
glog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
|
||||
klog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
|
||||
return c.DeleteVolume(diskURI)
|
||||
}
|
||||
|
||||
@ -255,7 +256,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
|
||||
klog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
|
||||
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
@ -265,7 +266,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
|
||||
c.accounts[storageAccountName].diskCount = int32(diskCount)
|
||||
} else {
|
||||
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
|
||||
klog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
|
||||
return nil // we have failed to acquire a new count. not an error condition
|
||||
}
|
||||
}
|
||||
@ -290,7 +291,7 @@ func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error)
|
||||
for _, v := range *listKeysResult.Keys {
|
||||
if v.Value != nil && *v.Value == "key1" {
|
||||
if _, ok := c.accounts[SAName]; !ok {
|
||||
glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
|
||||
klog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
|
||||
return *v.Value, nil
|
||||
}
|
||||
}
|
||||
@ -365,7 +366,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
_, provisionState, err := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
|
||||
klog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
|
||||
return false, nil // error performing the query - retryable
|
||||
}
|
||||
|
||||
@ -373,7 +374,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
return true, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
|
||||
klog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
|
||||
return false, nil // back off and see if the account becomes ready on next retry
|
||||
})
|
||||
// we have failed to ensure that account is ready for us to create
|
||||
@ -396,7 +397,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
return err
|
||||
}
|
||||
if bCreated {
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
|
||||
klog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
|
||||
}
|
||||
|
||||
// flag so we no longer have to check on ARM
|
||||
@ -428,7 +429,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
|
||||
klog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
|
||||
c.accounts[SAName].diskCount = int32(len(response.Blobs))
|
||||
|
||||
return int(c.accounts[SAName].diskCount), nil
|
||||
@ -448,13 +449,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount
|
||||
accounts := make(map[string]*storageAccountState)
|
||||
for _, v := range *accountListResult.Value {
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Info("azureDisk - accountListResult Name or Sku is nil")
|
||||
klog.Info("azureDisk - accountListResult Name or Sku is nil")
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
|
||||
sastate := &storageAccountState{
|
||||
name: *v.Name,
|
||||
@ -485,10 +486,12 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto
|
||||
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
|
||||
klog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
|
||||
Kind: defaultStorageAccountKind,
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
@ -539,7 +542,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
countAccounts = countAccounts + 1
|
||||
// empty account
|
||||
if dCount == 0 {
|
||||
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
|
||||
klog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
|
||||
return v.name, nil // short circuit, avg is good and no need to adjust
|
||||
}
|
||||
// if this account is less allocated
|
||||
@ -552,7 +555,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
|
||||
// if we failed to find storageaccount
|
||||
if SAName == "" {
|
||||
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
klog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
@ -568,7 +571,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
|
||||
// avg are not create and we should create more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
klog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
@ -579,7 +582,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
|
||||
// averages are not ok and we are at capacity (max storage accounts allowed)
|
||||
if aboveAvg && countAccounts == maxStorageAccounts {
|
||||
glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
klog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
|
||||
}
|
||||
|
||||
|
343
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
343
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
@ -22,23 +22,23 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
// Helpers for rate limiting error/error channel creation
|
||||
func createARMRateLimitErr(isWrite bool, opName string) error {
|
||||
func createRateLimitErr(isWrite bool, opName string) error {
|
||||
opType := "read"
|
||||
if isWrite {
|
||||
opType = "write"
|
||||
}
|
||||
return fmt.Errorf("azure - ARM rate limited(%s) for operation:%s", opType, opName)
|
||||
return fmt.Errorf("azure - cloud provider rate limited(%s) for operation:%s", opType, opName)
|
||||
}
|
||||
|
||||
// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient
|
||||
@ -131,6 +131,11 @@ type DisksClient interface {
|
||||
Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error)
|
||||
}
|
||||
|
||||
// VirtualMachineSizesClient defines needed functions for azure compute.VirtualMachineSizesClient
|
||||
type VirtualMachineSizesClient interface {
|
||||
List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error)
|
||||
}
|
||||
|
||||
// azClientConfig contains all essential information to create an Azure client.
|
||||
type azClientConfig struct {
|
||||
subscriptionID string
|
||||
@ -170,13 +175,13 @@ func newAzVirtualMachinesClient(config *azClientConfig) *azVirtualMachinesClient
|
||||
func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) {
|
||||
// /* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "VMCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName)
|
||||
klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName)
|
||||
klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -185,20 +190,20 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMGet")
|
||||
err = createRateLimitErr(false, "VMGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName)
|
||||
klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName)
|
||||
klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -209,13 +214,13 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st
|
||||
|
||||
func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMList")
|
||||
err = createRateLimitErr(false, "VMList")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName)
|
||||
klog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName)
|
||||
klog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -261,13 +266,13 @@ func newAzInterfacesClient(config *azClientConfig) *azInterfacesClient {
|
||||
func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "NiCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "NiCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName)
|
||||
klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName)
|
||||
klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -277,20 +282,20 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "NicGet")
|
||||
err = createRateLimitErr(false, "NicGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName)
|
||||
klog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName)
|
||||
klog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -301,13 +306,13 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string,
|
||||
|
||||
func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface")
|
||||
err = createRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
|
||||
klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
|
||||
klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -340,13 +345,13 @@ func newAzLoadBalancersClient(config *azClientConfig) *azLoadBalancersClient {
|
||||
func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "LBCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "LBCreateOrUpdate")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -356,7 +361,7 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -364,13 +369,13 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro
|
||||
func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "LBDelete")
|
||||
err = createRateLimitErr(true, "LBDelete")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -380,20 +385,20 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "LBGet")
|
||||
err = createRateLimitErr(false, "LBGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -404,13 +409,13 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri
|
||||
|
||||
func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err := createARMRateLimitErr(false, "LBList")
|
||||
err := createRateLimitErr(false, "LBList")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName)
|
||||
klog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -456,13 +461,13 @@ func newAzPublicIPAddressesClient(config *azClientConfig) *azPublicIPAddressesCl
|
||||
func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "PublicIPCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "PublicIPCreateOrUpdate")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -472,7 +477,7 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -480,13 +485,13 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc
|
||||
func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "PublicIPDelete")
|
||||
err = createRateLimitErr(true, "PublicIPDelete")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -496,20 +501,20 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "PublicIPGet")
|
||||
err = createRateLimitErr(false, "PublicIPGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -520,12 +525,12 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName
|
||||
|
||||
func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
return nil, createARMRateLimitErr(false, "PublicIPList")
|
||||
return nil, createRateLimitErr(false, "PublicIPList")
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName)
|
||||
klog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -571,13 +576,13 @@ func newAzSubnetsClient(config *azClientConfig) *azSubnetsClient {
|
||||
func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "SubnetCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "SubnetCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
|
||||
klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
|
||||
klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -587,7 +592,7 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -595,13 +600,13 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "SubnetDelete")
|
||||
err = createRateLimitErr(true, "SubnetDelete")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
|
||||
klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
|
||||
klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -611,20 +616,20 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string,
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "SubnetGet")
|
||||
err = createRateLimitErr(false, "SubnetGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
|
||||
klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
|
||||
klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -635,12 +640,12 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi
|
||||
|
||||
func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) ([]network.Subnet, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
return nil, createARMRateLimitErr(false, "SubnetList")
|
||||
return nil, createRateLimitErr(false, "SubnetList")
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName)
|
||||
klog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName)
|
||||
klog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -686,13 +691,13 @@ func newAzSecurityGroupsClient(config *azClientConfig) *azSecurityGroupsClient {
|
||||
func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "NSGCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "NSGCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -702,7 +707,7 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -710,13 +715,13 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr
|
||||
func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "NSGDelete")
|
||||
err = createRateLimitErr(true, "NSGDelete")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -726,20 +731,20 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "NSGGet")
|
||||
err = createRateLimitErr(false, "NSGGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -750,12 +755,12 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str
|
||||
|
||||
func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
return nil, createARMRateLimitErr(false, "NSGList")
|
||||
return nil, createRateLimitErr(false, "NSGList")
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName)
|
||||
klog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -801,13 +806,13 @@ func newAzVirtualMachineScaleSetsClient(config *azClientConfig) *azVirtualMachin
|
||||
func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMSSCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "VMSSCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -817,20 +822,20 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, r
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSGet")
|
||||
err = createRateLimitErr(false, "VMSSGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -841,13 +846,13 @@ func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGrou
|
||||
|
||||
func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSList")
|
||||
err = createRateLimitErr(false, "VMSSList")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -872,13 +877,13 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro
|
||||
func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMSSUpdateInstances")
|
||||
err = createRateLimitErr(true, "VMSSUpdateInstances")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -888,7 +893,7 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context,
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -916,13 +921,13 @@ func newAzVirtualMachineScaleSetVMsClient(config *azClientConfig) *azVirtualMach
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSGet")
|
||||
err = createRateLimitErr(false, "VMSSGet")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -933,13 +938,13 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGr
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSGetInstanceView")
|
||||
err = createRateLimitErr(false, "VMSSGetInstanceView")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -950,13 +955,13 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSList")
|
||||
err = createRateLimitErr(false, "VMSSList")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -980,13 +985,13 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMSSUpdate")
|
||||
err = createRateLimitErr(true, "VMSSUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
|
||||
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmssvm", "update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -996,7 +1001,7 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -1025,13 +1030,13 @@ func newAzRoutesClient(config *azClientConfig) *azRoutesClient {
|
||||
func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "RouteCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "RouteCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
|
||||
klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
|
||||
klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1041,7 +1046,7 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -1049,13 +1054,13 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "RouteDelete")
|
||||
err = createRateLimitErr(true, "RouteDelete")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
|
||||
klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
|
||||
klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1065,7 +1070,7 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string,
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -1094,13 +1099,13 @@ func newAzRouteTablesClient(config *azClientConfig) *azRouteTablesClient {
|
||||
func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "RouteTableCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "RouteTableCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName)
|
||||
klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName)
|
||||
klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1110,20 +1115,20 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "GetRouteTable")
|
||||
err = createRateLimitErr(false, "GetRouteTable")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName)
|
||||
klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName)
|
||||
klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1155,13 +1160,13 @@ func newAzStorageAccountClient(config *azClientConfig) *azStorageAccountClient {
|
||||
func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) (result *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "StorageAccountCreate")
|
||||
err = createRateLimitErr(true, "StorageAccountCreate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1170,20 +1175,20 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "DeleteStorageAccount")
|
||||
err = createRateLimitErr(false, "DeleteStorageAccount")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1194,13 +1199,13 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName
|
||||
|
||||
func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "ListStorageAccountKeys")
|
||||
err = createRateLimitErr(false, "ListStorageAccountKeys")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1211,13 +1216,13 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam
|
||||
|
||||
func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result storage.AccountListResult, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "ListStorageAccountsByResourceGroup")
|
||||
err = createRateLimitErr(false, "ListStorageAccountsByResourceGroup")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName)
|
||||
klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName)
|
||||
klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1228,13 +1233,13 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou
|
||||
|
||||
func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "GetStorageAccount/Properties")
|
||||
err = createRateLimitErr(false, "GetStorageAccount/Properties")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName)
|
||||
klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1266,13 +1271,13 @@ func newAzDisksClient(config *azClientConfig) *azDisksClient {
|
||||
func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "DiskCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "DiskCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName)
|
||||
klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName)
|
||||
klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1282,7 +1287,7 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@ -1290,13 +1295,13 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s
|
||||
func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "DiskDelete")
|
||||
err = createRateLimitErr(true, "DiskDelete")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName)
|
||||
klog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName)
|
||||
klog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1306,20 +1311,20 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "GetDisk")
|
||||
err = createRateLimitErr(false, "GetDisk")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName)
|
||||
klog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName)
|
||||
klog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID)
|
||||
@ -1327,3 +1332,41 @@ func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, disk
|
||||
mc.Observe(err)
|
||||
return
|
||||
}
|
||||
|
||||
// azVirtualMachineSizesClient implements VirtualMachineSizesClient.
|
||||
type azVirtualMachineSizesClient struct {
|
||||
client compute.VirtualMachineSizesClient
|
||||
rateLimiterReader flowcontrol.RateLimiter
|
||||
rateLimiterWriter flowcontrol.RateLimiter
|
||||
}
|
||||
|
||||
func newAzVirtualMachineSizesClient(config *azClientConfig) *azVirtualMachineSizesClient {
|
||||
VirtualMachineSizesClient := compute.NewVirtualMachineSizesClient(config.subscriptionID)
|
||||
VirtualMachineSizesClient.BaseURI = config.resourceManagerEndpoint
|
||||
VirtualMachineSizesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken)
|
||||
VirtualMachineSizesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&VirtualMachineSizesClient.Client)
|
||||
|
||||
return &azVirtualMachineSizesClient{
|
||||
rateLimiterReader: config.rateLimiterReader,
|
||||
rateLimiterWriter: config.rateLimiterWriter,
|
||||
client: VirtualMachineSizesClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createRateLimitErr(false, "VMSizesList")
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): start", location)
|
||||
defer func() {
|
||||
klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID)
|
||||
result, err = az.client.List(ctx, location)
|
||||
mc.Observe(err)
|
||||
return
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
@ -20,12 +20,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -119,7 +119,7 @@ func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
@ -139,7 +139,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
klog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
68
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
68
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
@ -34,6 +34,12 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
return err
|
||||
}
|
||||
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
@ -67,31 +73,24 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, vmName, diskName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.resourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
defer as.cloud.vmCache.Delete(vmName)
|
||||
|
||||
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
|
||||
klog.Errorf("azureDisk - attach disk(%s) failed, err: %v", diskName, err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
klog.V(2).Infof("azureDisk - err %v, try detach disk(%s)", err, diskName)
|
||||
as.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -102,10 +101,16 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
klog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
}
|
||||
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
@ -113,7 +118,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
@ -132,25 +137,18 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, vmName, diskName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
defer as.cloud.vmCache.Delete(vmName)
|
||||
|
||||
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
|
||||
klog.Errorf("azureDisk - detach disk(%s) failed, err: %v", diskName, err)
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure disk detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
68
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
68
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
@ -29,7 +29,13 @@ import (
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -65,27 +71,22 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
if err != nil {
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
klog.Infof("azureDisk - err %s, try detach disk(%s)", detail, diskName)
|
||||
ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -93,7 +94,13 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -108,7 +115,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
@ -122,22 +129,17 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach %q from %s failed, err: %v", diskName, nodeName, err)
|
||||
klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err)
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName)
|
||||
}
|
||||
|
||||
return err
|
||||
|
47
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
@ -27,11 +27,11 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
@ -207,6 +207,13 @@ func (fAPC *fakeAzurePIPClient) List(ctx context.Context, resourceGroupName stri
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (fAPC *fakeAzurePIPClient) setFakeStore(store map[string]map[string]network.PublicIPAddress) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
|
||||
fAPC.FakeStore = store
|
||||
}
|
||||
|
||||
type fakeAzureInterfacesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Interface
|
||||
@ -247,7 +254,24 @@ func (fIC *fakeAzureInterfacesClient) Get(ctx context.Context, resourceGroupName
|
||||
}
|
||||
|
||||
func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
return result, nil
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Interface",
|
||||
}
|
||||
}
|
||||
|
||||
func (fIC *fakeAzureInterfacesClient) setFakeStore(store map[string]map[string]network.Interface) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
|
||||
fIC.FakeStore = store
|
||||
}
|
||||
|
||||
type fakeAzureVirtualMachinesClient struct {
|
||||
@ -302,6 +326,13 @@ func (fVMC *fakeAzureVirtualMachinesClient) List(ctx context.Context, resourceGr
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) setFakeStore(store map[string]map[string]compute.VirtualMachine) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
fVMC.FakeStore = store
|
||||
}
|
||||
|
||||
type fakeAzureSubnetsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Subnet
|
||||
@ -867,11 +898,11 @@ func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availa
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (f *fakeVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
@ -886,3 +917,7 @@ func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPowerStatusByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
|
||||
azs "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -58,22 +58,11 @@ func (f *azureFileClient) createFileShare(accountName, accountKey, name string,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create a file share and set quota
|
||||
// Note. Per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share,
|
||||
// setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare
|
||||
// receives error "The metadata specified is invalid. It has characters that are not permitted."
|
||||
// As a result,breaking into two API calls: create share and set quota
|
||||
share := fileClient.GetShareReference(name)
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.Create(nil); err != nil {
|
||||
return fmt.Errorf("failed to create file share, err: %v", err)
|
||||
}
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
if err := share.Delete(nil); err != nil {
|
||||
glog.Errorf("Error deleting share: %v", err)
|
||||
}
|
||||
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -93,7 +82,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string,
|
||||
}
|
||||
share := fileClient.GetShareReference(name)
|
||||
if share.Properties.Quota >= sizeGiB {
|
||||
glog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
|
||||
klog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
|
||||
share.Properties.Quota, sizeGiB, accountName, name)
|
||||
return nil
|
||||
}
|
||||
@ -101,7 +90,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string,
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
|
||||
}
|
||||
glog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
|
||||
klog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
114
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go
generated
vendored
114
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go
generated
vendored
@ -18,11 +18,17 @@ package azure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const metadataURL = "http://169.254.169.254/metadata/"
|
||||
const (
|
||||
metadataCacheTTL = time.Minute
|
||||
metadataCacheKey = "InstanceMetadata"
|
||||
metadataURL = "http://169.254.169.254/metadata/instance"
|
||||
)
|
||||
|
||||
// NetworkMetadata contains metadata about an instance's network
|
||||
type NetworkMetadata struct {
|
||||
@ -54,60 +60,100 @@ type Subnet struct {
|
||||
Prefix string `json:"prefix"`
|
||||
}
|
||||
|
||||
// InstanceMetadata knows how to query the Azure instance metadata server.
|
||||
// ComputeMetadata represents compute information
|
||||
type ComputeMetadata struct {
|
||||
SKU string `json:"sku,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Zone string `json:"zone,omitempty"`
|
||||
VMSize string `json:"vmSize,omitempty"`
|
||||
OSType string `json:"osType,omitempty"`
|
||||
Location string `json:"location,omitempty"`
|
||||
FaultDomain string `json:"platformFaultDomain,omitempty"`
|
||||
UpdateDomain string `json:"platformUpdateDomain,omitempty"`
|
||||
ResourceGroup string `json:"resourceGroupName,omitempty"`
|
||||
VMScaleSetName string `json:"vmScaleSetName,omitempty"`
|
||||
}
|
||||
|
||||
// InstanceMetadata represents instance information.
|
||||
type InstanceMetadata struct {
|
||||
baseURL string
|
||||
Compute *ComputeMetadata `json:"compute,omitempty"`
|
||||
Network *NetworkMetadata `json:"network,omitempty"`
|
||||
}
|
||||
|
||||
// NewInstanceMetadata creates an instance of the InstanceMetadata accessor object.
|
||||
func NewInstanceMetadata() *InstanceMetadata {
|
||||
return &InstanceMetadata{
|
||||
baseURL: metadataURL,
|
||||
// InstanceMetadataService knows how to query the Azure instance metadata server.
|
||||
type InstanceMetadataService struct {
|
||||
metadataURL string
|
||||
imsCache *timedCache
|
||||
}
|
||||
|
||||
// NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object.
|
||||
func NewInstanceMetadataService(metadataURL string) (*InstanceMetadataService, error) {
|
||||
ims := &InstanceMetadataService{
|
||||
metadataURL: metadataURL,
|
||||
}
|
||||
}
|
||||
|
||||
// makeMetadataURL makes a complete metadata URL from the given path.
|
||||
func (i *InstanceMetadata) makeMetadataURL(path string) string {
|
||||
return i.baseURL + path
|
||||
}
|
||||
|
||||
// Object queries the metadata server and populates the passed in object
|
||||
func (i *InstanceMetadata) Object(path string, obj interface{}) error {
|
||||
data, err := i.queryMetadataBytes(path, "json")
|
||||
imsCache, err := newTimedcache(metadataCacheTTL, ims.getInstanceMetadata)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return json.Unmarshal(data, obj)
|
||||
|
||||
ims.imsCache = imsCache
|
||||
return ims, nil
|
||||
}
|
||||
|
||||
// Text queries the metadata server and returns the corresponding text
|
||||
func (i *InstanceMetadata) Text(path string) (string, error) {
|
||||
data, err := i.queryMetadataBytes(path, "text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func (i *InstanceMetadata) queryMetadataBytes(path, format string) ([]byte, error) {
|
||||
client := &http.Client{}
|
||||
|
||||
req, err := http.NewRequest("GET", i.makeMetadataURL(path), nil)
|
||||
func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}, error) {
|
||||
req, err := http.NewRequest("GET", ims.metadataURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Metadata", "True")
|
||||
req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("format", format)
|
||||
q.Add("api-version", "2017-04-02")
|
||||
q.Add("format", "json")
|
||||
q.Add("api-version", "2017-12-01")
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("failure of getting instance metadata with response %q", resp.Status)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj := InstanceMetadata{}
|
||||
err = json.Unmarshal(data, &obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &obj, nil
|
||||
}
|
||||
|
||||
// GetMetadata gets instance metadata from cache.
|
||||
func (ims *InstanceMetadataService) GetMetadata() (*InstanceMetadata, error) {
|
||||
cache, err := ims.imsCache.Get(metadataCacheKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Cache shouldn't be nil, but added a check incase something wrong.
|
||||
if cache == nil {
|
||||
return nil, fmt.Errorf("failure of getting instance metadata")
|
||||
}
|
||||
|
||||
if metadata, ok := cache.(*InstanceMetadata); ok {
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failure of getting instance metadata")
|
||||
}
|
||||
|
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -18,22 +18,39 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
vmPowerStatePrefix = "PowerState/"
|
||||
vmPowerStateStopped = "stopped"
|
||||
vmPowerStateDeallocated = "deallocated"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
unmanaged, err := az.IsNodeUnmanaged(string(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unmanaged {
|
||||
klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
|
||||
klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -51,7 +68,16 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if metadata.Compute == nil || metadata.Network == nil {
|
||||
return nil, fmt.Errorf("failure of getting instance metadata")
|
||||
}
|
||||
|
||||
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -61,21 +87,38 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
ipAddress := IPAddress{}
|
||||
err = az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(metadata.Network.Interface) == 0 {
|
||||
return nil, fmt.Errorf("no interface is found for the instance")
|
||||
}
|
||||
|
||||
// Use ip address got from instance metadata.
|
||||
ipAddress := metadata.Network.Interface[0]
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
if len(ipAddress.PublicIP) > 0 {
|
||||
addr := v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: ipAddress.PublicIP,
|
||||
for _, address := range ipAddress.IPV4.IPAddress {
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
})
|
||||
if len(address.PublicIP) > 0 {
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: address.PublicIP,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, address := range ipAddress.IPV6.IPAddress {
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
})
|
||||
if len(address.PublicIP) > 0 {
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: address.PublicIP,
|
||||
})
|
||||
}
|
||||
addresses = append(addresses, addr)
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
@ -87,6 +130,12 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -98,6 +147,12 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
// Returns true for unmanaged nodes because azure cloud provider always assumes them exists.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -116,35 +171,60 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
klog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
|
||||
|
||||
return strings.ToLower(powerStatus) == vmPowerStateStopped || strings.ToLower(powerStatus) == vmPowerStateDeallocated, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
|
||||
var err error
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
if az.VMType == vmTypeVMSS {
|
||||
// VMSS vmName is not same with hostname, use hostname instead.
|
||||
metadataName, err = os.Hostname()
|
||||
metadataVMName, err = os.Hostname()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
metadataName = strings.ToLower(metadataName)
|
||||
return (metadataName == nodeName), err
|
||||
metadataVMName = strings.ToLower(metadataVMName)
|
||||
return (metadataVMName == nodeName), err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if unmanaged {
|
||||
// InstanceID is same with nodeName for unmanaged nodes.
|
||||
klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name)
|
||||
return nodeName, nil
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if metadata.Compute == nil {
|
||||
return "", fmt.Errorf("failure of getting instance metadata")
|
||||
}
|
||||
|
||||
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -154,26 +234,25 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// Get resource group name.
|
||||
resourceGroup := metadata.Compute.ResourceGroup
|
||||
|
||||
// Compose instanceID based on nodeName for standard instance.
|
||||
if az.VMType == vmTypeStandard {
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
return az.getStandardMachineID(resourceGroup, nodeName), nil
|
||||
}
|
||||
|
||||
// Get scale set name and instanceID from vmName for vmss.
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ssName, instanceID, err := extractVmssVMName(metadataName)
|
||||
ssName, instanceID, err := extractVmssVMName(metadata.Compute.Name)
|
||||
if err != nil {
|
||||
if err == ErrorNotVmssInstance {
|
||||
// Compose machineID for standard Node.
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
return az.getStandardMachineID(resourceGroup, nodeName), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
return az.getVmssMachineID(ssName, instanceID), nil
|
||||
return az.getVmssMachineID(resourceGroup, ssName, instanceID), nil
|
||||
}
|
||||
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
@ -183,6 +262,12 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -196,15 +281,33 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
unmanaged, err := az.IsNodeUnmanaged(string(name))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if unmanaged {
|
||||
klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if metadata.Compute == nil {
|
||||
return "", fmt.Errorf("failure of getting instance metadata")
|
||||
}
|
||||
|
||||
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isLocalInstance {
|
||||
machineType, err := az.metadata.Text("instance/compute/vmSize")
|
||||
if err == nil {
|
||||
return machineType, nil
|
||||
if metadata.Compute.VMSize != "" {
|
||||
return metadata.Compute.VMSize, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
218
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
Normal file
218
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// setTestVirtualMachines sets test virtual machine with powerstate.
|
||||
func setTestVirtualMachines(c *Cloud, vmList map[string]string) {
|
||||
virtualMachineClient := c.VirtualMachinesClient.(*fakeAzureVirtualMachinesClient)
|
||||
store := map[string]map[string]compute.VirtualMachine{
|
||||
"rg": make(map[string]compute.VirtualMachine),
|
||||
}
|
||||
|
||||
for nodeName, powerState := range vmList {
|
||||
instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName)
|
||||
vm := compute.VirtualMachine{
|
||||
Name: &nodeName,
|
||||
ID: &instanceID,
|
||||
Location: &c.Location,
|
||||
}
|
||||
if powerState != "" {
|
||||
status := []compute.InstanceViewStatus{
|
||||
{
|
||||
Code: to.StringPtr(powerState),
|
||||
},
|
||||
{
|
||||
Code: to.StringPtr("ProvisioningState/succeeded"),
|
||||
},
|
||||
}
|
||||
vm.VirtualMachineProperties = &compute.VirtualMachineProperties{
|
||||
InstanceView: &compute.VirtualMachineInstanceView{
|
||||
Statuses: &status,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
store["rg"][nodeName] = vm
|
||||
}
|
||||
|
||||
virtualMachineClient.setFakeStore(store)
|
||||
}
|
||||
|
||||
func TestInstanceID(t *testing.T) {
|
||||
cloud := getTestCloud()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
vmList []string
|
||||
nodeName string
|
||||
metadataName string
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "InstanceID should get instanceID if node's name are equal to metadataName",
|
||||
vmList: []string{"vm1"},
|
||||
nodeName: "vm1",
|
||||
metadataName: "vm1",
|
||||
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
|
||||
},
|
||||
{
|
||||
name: "InstanceID should get instanceID from Azure API if node is not local instance",
|
||||
vmList: []string{"vm2"},
|
||||
nodeName: "vm2",
|
||||
metadataName: "vm1",
|
||||
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
|
||||
},
|
||||
{
|
||||
name: "InstanceID should report error if VM doesn't exist",
|
||||
vmList: []string{"vm1"},
|
||||
nodeName: "vm3",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, fmt.Sprintf(`{"compute":{"name":"%s"}}`, test.metadataName))
|
||||
}))
|
||||
go func() {
|
||||
http.Serve(listener, mux)
|
||||
}()
|
||||
defer listener.Close()
|
||||
|
||||
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
vmListWithPowerState := make(map[string]string)
|
||||
for _, vm := range test.vmList {
|
||||
vmListWithPowerState[vm] = ""
|
||||
}
|
||||
setTestVirtualMachines(cloud, vmListWithPowerState)
|
||||
instanceID, err := cloud.InstanceID(context.Background(), types.NodeName(test.nodeName))
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test [%s] unexpected nil err", test.name)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if instanceID != test.expected {
|
||||
t.Errorf("Test [%s] unexpected instanceID: %s, expected %q", test.name, instanceID, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstanceShutdownByProviderID(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
vmList map[string]string
|
||||
nodeName string
|
||||
expected bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Running status",
|
||||
vmList: map[string]string{"vm1": "PowerState/Running"},
|
||||
nodeName: "vm1",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Deallocated status",
|
||||
vmList: map[string]string{"vm2": "PowerState/Deallocated"},
|
||||
nodeName: "vm2",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Deallocating status",
|
||||
vmList: map[string]string{"vm3": "PowerState/Deallocating"},
|
||||
nodeName: "vm3",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Starting status",
|
||||
vmList: map[string]string{"vm4": "PowerState/Starting"},
|
||||
nodeName: "vm4",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Stopped status",
|
||||
vmList: map[string]string{"vm5": "PowerState/Stopped"},
|
||||
nodeName: "vm5",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Stopping status",
|
||||
vmList: map[string]string{"vm6": "PowerState/Stopping"},
|
||||
nodeName: "vm6",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Unknown status",
|
||||
vmList: map[string]string{"vm7": "PowerState/Unknown"},
|
||||
nodeName: "vm7",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should report error if VM doesn't exist",
|
||||
vmList: map[string]string{"vm1": "PowerState/running"},
|
||||
nodeName: "vm8",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachines(cloud, test.vmList)
|
||||
providerID := "azure://" + cloud.getStandardMachineID("rg", test.nodeName)
|
||||
hasShutdown, err := cloud.InstanceShutdownByProviderID(context.Background(), providerID)
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test [%s] unexpected nil err", test.name)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if hasShutdown != test.expected {
|
||||
t.Errorf("Test [%s] unexpected hasShutdown: %v, expected %v", test.name, hasShutdown, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
718
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
718
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
File diff suppressed because it is too large
Load Diff
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
@ -1,77 +0,0 @@
|
||||
# Azure LoadBalancer
|
||||
|
||||
The way azure define LoadBalancer is different with GCE or AWS. Azure's LB can have multiple frontend IP refs. The GCE and AWS can only allow one, if you want more, you better to have another LB. Because of the fact, Public IP is not part of the LB in Azure. NSG is not part of LB in Azure either. However, you cannot delete them in parallel, Public IP can only be delete after LB's frontend IP ref is removed.
|
||||
|
||||
For different Azure Resources, such as LB, Public IP, NSG. They are the same tier azure resources. We need to make sure there is no connection in their own ensure loops. In another words, They would be eventually reconciled regardless of other resources' state. They should only depends on service state.
|
||||
|
||||
Despite the ideal philosophy above, we have to face the reality. NSG depends on LB's frontend ip to adjust NSG rules. So when we want to reconcile NSG, the LB should contain the corresponding frontend ip config.
|
||||
|
||||
And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict.
|
||||
|
||||
There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost in case of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...)
|
||||
|
||||
## Introduce Functions
|
||||
|
||||
- reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error)
|
||||
- Go through lb's properties, update based on wantLb
|
||||
- If any change on the lb, no matter if the lb exists or not
|
||||
- Call az cloud to CreateOrUpdate on this lb, or Delete if nothing left
|
||||
- return lb, err
|
||||
|
||||
- reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error)
|
||||
- Go though NSG' properties, update based on wantLb
|
||||
- Use destinationIPAddress as target address if possible
|
||||
- Consolidate NSG rules if possible
|
||||
- If any change on the NSG, (the NSG should always exists)
|
||||
- Call az cloud to CreateOrUpdate on this NSG
|
||||
- return sg, err
|
||||
|
||||
- reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error)
|
||||
- List all the public ip in the resource group
|
||||
- Make sure we only touch Public IP resources has tags[service] = "namespace/serviceName"
|
||||
- skip for wantLb && !isInternal && pipName == desiredPipName
|
||||
- delete other public ip resources if any
|
||||
- if !isInternal && wantLb
|
||||
- ensure Public IP with desiredPipName exists
|
||||
|
||||
- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error)
|
||||
- gets the loadbalancer for the service if it already exists
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selection helps distribute the services across load balancers
|
||||
- In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE
|
||||
- By default - cluster default LB is returned
|
||||
|
||||
## Define interface behaviors
|
||||
|
||||
### GetLoadBalancer
|
||||
|
||||
- Get LoadBalancer status, return status, error
|
||||
- return the load balancer status for this service
|
||||
- it will not create or update or delete any resource
|
||||
|
||||
### EnsureLoadBalancer
|
||||
|
||||
- Reconcile LB for the flipped service
|
||||
- Call reconcileLoadBalancer(clusterName, flipedService, nil, false/* wantLb */)
|
||||
- Reconcile Public IP
|
||||
- Call reconcilePublicIP(cluster, service, true)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
|
||||
- Reconcile NSG rules, it need to be called after reconcileLB
|
||||
- Call reconcileSecurityGroup(clusterName, service, lbStatus, true /* wantLb */)
|
||||
|
||||
### UpdateLoadBalancer
|
||||
|
||||
- Has no difference with EnsureLoadBalancer
|
||||
|
||||
### EnsureLoadBalancerDeleted
|
||||
|
||||
- Reconcile NSG first, before reconcile LB, because SG need LB to be there
|
||||
- Call reconcileSecurityGroup(clusterName, service, nil, false /* wantLb */)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, false)
|
||||
- Reconcile Public IP, public IP needs related LB reconciled first
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
33
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
@ -18,11 +18,13 @@ package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestFindProbe(t *testing.T) {
|
||||
@ -210,3 +212,34 @@ func TestFindRule(t *testing.T) {
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIdleTimeout(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
desc string
|
||||
annotations map[string]string
|
||||
i *int32
|
||||
err bool
|
||||
}{
|
||||
{desc: "no annotation"},
|
||||
{desc: "annotation empty value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: ""}, err: true},
|
||||
{desc: "annotation not a number", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "cookies"}, err: true},
|
||||
{desc: "annotation negative value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "-6"}, err: true},
|
||||
{desc: "annotation zero value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "0"}, err: true},
|
||||
{desc: "annotation too low value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "3"}, err: true},
|
||||
{desc: "annotation too high value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "31"}, err: true},
|
||||
{desc: "annotation good value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "24"}, i: to.Int32Ptr(24)},
|
||||
} {
|
||||
t.Run(c.desc, func(t *testing.T) {
|
||||
s := &v1.Service{}
|
||||
s.Annotations = c.annotations
|
||||
i, err := getIdleTimeout(s)
|
||||
|
||||
if !reflect.DeepEqual(c.i, i) {
|
||||
t.Fatalf("got unexpected value: %d", to.Int32(i))
|
||||
}
|
||||
if (err != nil) != c.err {
|
||||
t.Fatalf("expected error=%v, got %v", c.err, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
177
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
177
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
@ -17,40 +17,78 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd
|
||||
defaultDiskIOPSReadWrite = 500
|
||||
defaultDiskMBpsReadWrite = 100
|
||||
)
|
||||
|
||||
//ManagedDiskController : managed disk controller struct
|
||||
type ManagedDiskController struct {
|
||||
common *controllerCommon
|
||||
}
|
||||
|
||||
// ManagedDiskOptions specifies the options of managed disks.
|
||||
type ManagedDiskOptions struct {
|
||||
// The name of the disk.
|
||||
DiskName string
|
||||
// The size in GB.
|
||||
SizeGB int
|
||||
// The name of PVC.
|
||||
PVCName string
|
||||
// The name of resource group.
|
||||
ResourceGroup string
|
||||
// The AvailabilityZone to create the disk.
|
||||
AvailabilityZone string
|
||||
// The tags of the disk.
|
||||
Tags map[string]string
|
||||
// The SKU of storage account.
|
||||
StorageAccountType compute.DiskStorageAccountTypes
|
||||
// IOPS Caps for UltraSSD disk
|
||||
DiskIOPSReadWrite string
|
||||
// Throughput Cap (MBps) for UltraSSD disk
|
||||
DiskMBpsReadWrite string
|
||||
}
|
||||
|
||||
func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) {
|
||||
return &ManagedDiskController{common: common}, nil
|
||||
}
|
||||
|
||||
//CreateManagedDisk : create managed disk
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, resourceGroup string,
|
||||
sizeGB int, tags map[string]string) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) {
|
||||
var err error
|
||||
klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
|
||||
|
||||
var createZones *[]string
|
||||
if len(options.AvailabilityZone) > 0 {
|
||||
zoneList := []string{c.common.cloud.GetZoneID(options.AvailabilityZone)}
|
||||
createZones = &zoneList
|
||||
}
|
||||
|
||||
// insert original tags to newTags
|
||||
newTags := make(map[string]*string)
|
||||
azureDDTag := "kubernetes-azure-dd"
|
||||
newTags["created-by"] = &azureDDTag
|
||||
|
||||
// insert original tags to newTags
|
||||
if tags != nil {
|
||||
for k, v := range tags {
|
||||
if options.Tags != nil {
|
||||
for k, v := range options.Tags {
|
||||
// Azure won't allow / (forward slash) in tags
|
||||
newKey := strings.Replace(k, "/", "-", -1)
|
||||
newValue := strings.Replace(v, "/", "-", -1)
|
||||
@ -58,25 +96,59 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
}
|
||||
}
|
||||
|
||||
diskSizeGB := int32(sizeGB)
|
||||
diskSizeGB := int32(options.SizeGB)
|
||||
diskSku := compute.DiskStorageAccountTypes(options.StorageAccountType)
|
||||
diskProperties := compute.DiskProperties{
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &compute.CreationData{CreateOption: compute.Empty},
|
||||
}
|
||||
|
||||
if diskSku == compute.UltraSSDLRS {
|
||||
diskIOPSReadWrite := int64(defaultDiskIOPSReadWrite)
|
||||
if options.DiskIOPSReadWrite != "" {
|
||||
v, err := strconv.Atoi(options.DiskIOPSReadWrite)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %v", err)
|
||||
}
|
||||
diskIOPSReadWrite = int64(v)
|
||||
}
|
||||
diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite)
|
||||
|
||||
diskMBpsReadWrite := int32(defaultDiskMBpsReadWrite)
|
||||
if options.DiskMBpsReadWrite != "" {
|
||||
v, err := strconv.Atoi(options.DiskMBpsReadWrite)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %v", err)
|
||||
}
|
||||
diskMBpsReadWrite = int32(v)
|
||||
}
|
||||
diskProperties.DiskMBpsReadWrite = to.Int32Ptr(diskMBpsReadWrite)
|
||||
} else {
|
||||
if options.DiskIOPSReadWrite != "" {
|
||||
return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type")
|
||||
}
|
||||
if options.DiskMBpsReadWrite != "" {
|
||||
return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type")
|
||||
}
|
||||
}
|
||||
|
||||
model := compute.Disk{
|
||||
Location: &c.common.location,
|
||||
Tags: newTags,
|
||||
Zones: createZones,
|
||||
Sku: &compute.DiskSku{
|
||||
Name: compute.StorageAccountTypes(storageAccountType),
|
||||
Name: diskSku,
|
||||
},
|
||||
DiskProperties: &compute.DiskProperties{
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &compute.CreationData{CreateOption: compute.Empty},
|
||||
}}
|
||||
DiskProperties: &diskProperties,
|
||||
}
|
||||
|
||||
if resourceGroup == "" {
|
||||
resourceGroup = c.common.resourceGroup
|
||||
if options.ResourceGroup == "" {
|
||||
options.ResourceGroup = c.common.resourceGroup
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, model)
|
||||
_, err = c.common.cloud.DisksClient.CreateOrUpdate(ctx, options.ResourceGroup, options.DiskName, model)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -84,7 +156,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisionState, id, err := c.getDisk(resourceGroup, diskName)
|
||||
provisionState, id, err := c.getDisk(options.ResourceGroup, options.DiskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
@ -99,9 +171,9 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB)
|
||||
klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB)
|
||||
} else {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
|
||||
}
|
||||
|
||||
return diskID, nil
|
||||
@ -125,7 +197,7 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
// We don't need poll here, k8s will immediately stop referencing the disk
|
||||
// the disk will be eventually deleted - cleanly - by ARM
|
||||
|
||||
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
|
||||
klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -172,7 +244,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
|
||||
requestGiB := int32(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
||||
|
||||
glog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
|
||||
klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
|
||||
// If disk already of greater or equal size than requested we return
|
||||
if *result.DiskProperties.DiskSizeGB >= requestGiB {
|
||||
return newSizeQuant, nil
|
||||
@ -186,7 +258,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
|
||||
klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
|
||||
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
@ -201,3 +273,58 @@ func getResourceGroupFromDiskURI(diskURI string) (string, error) {
|
||||
}
|
||||
return fields[4], nil
|
||||
}
|
||||
|
||||
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
|
||||
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
// Ignore if not AzureDisk.
|
||||
if pv.Spec.AzureDisk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ignore any volumes that are being provisioned
|
||||
if pv.Spec.AzureDisk.DiskName == volume.ProvisionedVolumeName {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return c.GetAzureDiskLabels(pv.Spec.AzureDisk.DataDiskURI)
|
||||
}
|
||||
|
||||
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
|
||||
func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
|
||||
// Get disk's resource group.
|
||||
diskName := path.Base(diskURI)
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get information of the disk.
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
disk, err := c.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check whether availability zone is specified.
|
||||
if disk.Zones == nil || len(*disk.Zones) == 0 {
|
||||
klog.V(4).Infof("Azure disk %q is not zoned", diskName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
zones := *disk.Zones
|
||||
zoneID, err := strconv.Atoi(zones[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse zone %v for AzureDisk %v: %v", zones, diskName, err)
|
||||
}
|
||||
|
||||
zone := c.makeZone(zoneID)
|
||||
klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
|
||||
labels := map[string]string{
|
||||
kubeletapis.LabelZoneRegion: c.Location,
|
||||
kubeletapis.LabelZoneFailureDomain: zone,
|
||||
}
|
||||
return labels, nil
|
||||
}
|
||||
|
94
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
94
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
@ -20,19 +20,41 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
routes, err := processRoutes(routeTable, existsRouteTable, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compose routes for unmanaged routes so that node controller won't retry creating routes for them.
|
||||
unmanagedNodes, err := az.GetUnmanagedNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
az.routeCIDRsLock.Lock()
|
||||
defer az.routeCIDRsLock.Unlock()
|
||||
for _, nodeName := range unmanagedNodes.List() {
|
||||
if cidr, ok := az.routeCIDRs[nodeName]; ok {
|
||||
routes = append(routes, &cloudprovider.Route{
|
||||
Name: nodeName,
|
||||
TargetNode: mapRouteNameToNodeName(nodeName),
|
||||
DestinationCIDR: cidr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
// Injectable for testing
|
||||
@ -50,7 +72,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.AddressPrefix
|
||||
glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
|
||||
klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
@ -60,13 +82,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(10).Info("ListRoutes: FINISH")
|
||||
klog.V(10).Info("ListRoutes: FINISH")
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
return nil
|
||||
@ -81,17 +103,17 @@ func (az *Cloud) createRouteTable() error {
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
klog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
klog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
klog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -107,7 +129,21 @@ func (az *Cloud) createRouteTable() error {
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
nodeName := string(kubeRoute.TargetNode)
|
||||
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unmanaged {
|
||||
klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
|
||||
az.routeCIDRsLock.Lock()
|
||||
defer az.routeCIDRsLock.Unlock()
|
||||
az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,51 +162,65 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
klog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
nodeName := string(kubeRoute.TargetNode)
|
||||
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unmanaged {
|
||||
klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
|
||||
az.routeCIDRsLock.Lock()
|
||||
defer az.routeCIDRsLock.Unlock()
|
||||
delete(az.routeCIDRs, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||
klog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.DeleteRouteWithRetry(routeName)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
@ -22,7 +22,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
@ -38,6 +39,8 @@ func TestDeleteRoute(t *testing.T) {
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
@ -62,6 +65,28 @@ func TestDeleteRoute(t *testing.T) {
|
||||
ob, found := mp[routeName]
|
||||
if found {
|
||||
t.Errorf("unexpectedly found: %v that should have been deleted.", ob)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// test delete route for unmanaged nodes.
|
||||
nodeName := "node1"
|
||||
nodeCIDR := "4.3.2.1/24"
|
||||
cloud.unmanagedNodes.Insert(nodeName)
|
||||
cloud.routeCIDRs = map[string]string{
|
||||
nodeName: nodeCIDR,
|
||||
}
|
||||
route1 := cloudprovider.Route{
|
||||
TargetNode: mapRouteNameToNodeName(nodeName),
|
||||
DestinationCIDR: nodeCIDR,
|
||||
}
|
||||
err = cloud.DeleteRoute(context.TODO(), "cluster", &route1)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error deleting route: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
cidr, found := cloud.routeCIDRs[nodeName]
|
||||
if found {
|
||||
t.Errorf("unexpected CIDR item (%q) for %s", cidr, nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,6 +104,8 @@ func TestCreateRoute(t *testing.T) {
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
@ -122,6 +149,29 @@ func TestCreateRoute(t *testing.T) {
|
||||
if *routeInfo.NextHopIPAddress != nodeIP {
|
||||
t.Errorf("Expected IP address: %s, saw %s", nodeIP, *routeInfo.NextHopIPAddress)
|
||||
}
|
||||
|
||||
// test create route for unmanaged nodes.
|
||||
nodeName := "node1"
|
||||
nodeCIDR := "4.3.2.1/24"
|
||||
cloud.unmanagedNodes.Insert(nodeName)
|
||||
cloud.routeCIDRs = map[string]string{}
|
||||
route1 := cloudprovider.Route{
|
||||
TargetNode: mapRouteNameToNodeName(nodeName),
|
||||
DestinationCIDR: nodeCIDR,
|
||||
}
|
||||
err = cloud.CreateRoute(context.TODO(), "cluster", "unused", &route1)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error creating route: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
cidr, found := cloud.routeCIDRs[nodeName]
|
||||
if !found {
|
||||
t.Errorf("unexpected missing item for %s", nodeName)
|
||||
t.FailNow()
|
||||
}
|
||||
if cidr != nodeCIDR {
|
||||
t.Errorf("unexpected cidr %s, saw %s", nodeCIDR, cidr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) {
|
||||
|
230
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
230
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
@ -26,16 +27,16 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -61,22 +62,24 @@ const (
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
||||
var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
|
||||
var publicIPResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/publicIPAddresses/(?:.*)`)
|
||||
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
func (az *Cloud) getStandardMachineID(resourceGroup, machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
machineName)
|
||||
}
|
||||
|
||||
// returns the full identifier of an availabilitySet
|
||||
func (az *Cloud) getAvailabilitySetID(availabilitySetName string) string {
|
||||
func (az *Cloud) getAvailabilitySetID(resourceGroup, availabilitySetName string) string {
|
||||
return fmt.Sprintf(
|
||||
availabilitySetIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
availabilitySetName)
|
||||
}
|
||||
|
||||
@ -123,7 +126,7 @@ func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (
|
||||
// Thus Azure do not allow mixed type (public and internal) load balancer.
|
||||
// So we'd have a separate name for internal load balancer.
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
lbNamePrefix = clusterName
|
||||
@ -220,20 +223,22 @@ func getBackendPoolName(clusterName string) string {
|
||||
return clusterName
|
||||
}
|
||||
|
||||
func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
|
||||
func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
|
||||
prefix := az.getRulePrefix(service)
|
||||
if subnetName == nil {
|
||||
return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port)
|
||||
return fmt.Sprintf("%s-%s-%d", prefix, port.Protocol, port.Port)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s-%d", getRulePrefix(service), *subnetName, port.Protocol, port.Port)
|
||||
return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, port.Protocol, port.Port)
|
||||
}
|
||||
|
||||
func getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
|
||||
func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
|
||||
if useSharedSecurityRule(service) {
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("%s-%s-%d-%s", getRulePrefix(service), port.Protocol, port.Port, safePrefix)
|
||||
rulePrefix := az.getRulePrefix(service)
|
||||
return fmt.Sprintf("%s-%s-%d-%s", rulePrefix, port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
|
||||
// This returns a human-readable version of the Service used to tag some resources.
|
||||
@ -243,26 +248,26 @@ func getServiceName(service *v1.Service) string {
|
||||
}
|
||||
|
||||
// This returns a prefix for loadbalancer/security rules.
|
||||
func getRulePrefix(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
func (az *Cloud) getRulePrefix(service *v1.Service) string {
|
||||
return az.GetLoadBalancerName(context.TODO(), "", service)
|
||||
}
|
||||
|
||||
func getPublicIPName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, cloudprovider.GetLoadBalancerName(service))
|
||||
func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, az.GetLoadBalancerName(context.TODO(), clusterName, service))
|
||||
}
|
||||
|
||||
func serviceOwnsRule(service *v1.Service, rule string) bool {
|
||||
prefix := getRulePrefix(service)
|
||||
func (az *Cloud) serviceOwnsRule(service *v1.Service, rule string) bool {
|
||||
prefix := az.getRulePrefix(service)
|
||||
return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
|
||||
}
|
||||
|
||||
func serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
|
||||
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
|
||||
return strings.HasPrefix(*fip.Name, baseName)
|
||||
}
|
||||
|
||||
func getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
func (az *Cloud) getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
|
||||
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
|
||||
if subnetName != nil {
|
||||
return fmt.Sprintf("%s-%s", baseName, *subnetName)
|
||||
}
|
||||
@ -303,51 +308,6 @@ func MakeCRC32(str string) string {
|
||||
return strconv.FormatUint(uint64(hash), 10)
|
||||
}
|
||||
|
||||
//ExtractVMData : extract dataDisks, storageProfile from a map struct
|
||||
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
|
||||
storageProfile map[string]interface{},
|
||||
hardwareProfile map[string]interface{}, err error) {
|
||||
props, ok := vmData["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
|
||||
}
|
||||
|
||||
storageProfile, ok = props["storageProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
|
||||
}
|
||||
|
||||
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
|
||||
}
|
||||
|
||||
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
|
||||
}
|
||||
return dataDisks, storageProfile, hardwareProfile, nil
|
||||
}
|
||||
|
||||
//ExtractDiskData : extract provisioningState, diskState from a map struct
|
||||
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
|
||||
fragment, ok := diskData.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData to map error")
|
||||
}
|
||||
|
||||
properties, ok := fragment["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData(properties) to map error")
|
||||
}
|
||||
|
||||
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
|
||||
if ref, ok := properties["diskState"]; ok {
|
||||
diskState = ref.(string)
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
||||
|
||||
// availabilitySet implements VMSet interface for Azure availability sets.
|
||||
type availabilitySet struct {
|
||||
*Cloud
|
||||
@ -373,10 +333,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
}
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||
klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||
klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
@ -386,6 +346,26 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
return powerState, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
|
||||
statuses := *vm.InstanceView.Statuses
|
||||
for _, status := range statuses {
|
||||
state := to.String(status.Code)
|
||||
if strings.HasPrefix(state, vmPowerStatePrefix) {
|
||||
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to get power status for node %q", name)
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is part of providerID for standard instances.
|
||||
@ -401,21 +381,36 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets zone from instance view.
|
||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||
// with availability zone, then it returns fault domain.
|
||||
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
var failureDomain string
|
||||
if vm.Zones != nil && len(*vm.Zones) > 0 {
|
||||
// Get availability zone for the node.
|
||||
zones := *vm.Zones
|
||||
zoneID, err := strconv.Atoi(zones[0])
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
|
||||
}
|
||||
|
||||
failureDomain = as.makeZone(zoneID)
|
||||
} else {
|
||||
// Availability zone is not used for the node, falling back to fault domain.
|
||||
failureDomain = strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
}
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
@ -438,7 +433,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
klog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
@ -465,9 +460,9 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s.
|
||||
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := as.VirtualMachineClientListWithRetry()
|
||||
vms, err := as.VirtualMachineClientListWithRetry(as.ResourceGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
klog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
|
||||
@ -486,7 +481,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
|
||||
}
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
@ -495,7 +490,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
|
||||
}
|
||||
asName, err := getLastSegment(asID)
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
@ -521,11 +516,11 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
|
||||
}
|
||||
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
@ -545,7 +540,7 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
klog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
|
||||
}
|
||||
}
|
||||
@ -560,13 +555,33 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interfa
|
||||
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
|
||||
}
|
||||
|
||||
// extractResourceGroupByNicID extracts the resource group name by nicID.
|
||||
func extractResourceGroupByNicID(nicID string) (string, error) {
|
||||
matches := nicResourceGroupRE.FindStringSubmatch(nicID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// extractResourceGroupByPipID extracts the resource group name by publicIP ID.
|
||||
func extractResourceGroupByPipID(pipID string) (string, error) {
|
||||
matches := publicIPResourceGroupRE.FindStringSubmatch(pipID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from pipID %q", pipID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
||||
klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
@ -578,6 +593,10 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(nodeName)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set name. Note that vmSetName is empty string when getting
|
||||
// the Node's IP address. While vmSetName is not empty, it should be checked with
|
||||
@ -587,17 +606,22 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !as.useStandardLoadBalancer() {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
klog.V(3).Infof(
|
||||
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
}
|
||||
|
||||
nicResourceGroup, err := extractResourceGroupByNicID(primaryNicID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := as.InterfacesClient.Get(ctx, as.ResourceGroup, nicName, "")
|
||||
nic, err := as.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
@ -607,21 +631,22 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
|
||||
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
serviceName := getServiceName(service)
|
||||
nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
|
||||
if err != nil {
|
||||
if err == errNotInVMSet {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
|
||||
klog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
klog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because its primdary nic %s is in Failed state", nodeName, nic.Name)
|
||||
klog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -654,7 +679,7 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName)
|
||||
klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -669,17 +694,17 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
nicName := *nic.Name
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
klog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(service, nic)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
klog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -691,19 +716,24 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
hostUpdates := make([]func() error, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
|
||||
klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
|
||||
continue
|
||||
}
|
||||
|
||||
if as.ShouldNodeExcludedFromLoadBalancer(node) {
|
||||
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
err := as.ensureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -719,7 +749,7 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
// Do nothing for availability set.
|
||||
return nil
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
@ -171,7 +171,7 @@ func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancerName(t *testing.T) {
|
||||
func TestGetAzureLoadBalancerName(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
@ -247,7 +247,7 @@ func TestGetLoadBalancerName(t *testing.T) {
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
loadbalancerName := az.getLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
|
||||
loadbalancerName := az.getAzureLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
|
||||
assert.Equal(t, c.expected, loadbalancerName, c.description)
|
||||
}
|
||||
}
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
@ -19,20 +19,26 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageAccountType = string(storage.StandardLRS)
|
||||
defaultStorageAccountKind = storage.StorageV2
|
||||
fileShareAccountNamePrefix = "f"
|
||||
sharedDiskAccountNamePrefix = "ds"
|
||||
dedicatedDiskAccountNamePrefix = "dd"
|
||||
)
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) {
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix)
|
||||
// CreateFileShare creates a file share, using a matching storage account type, account kind, etc.
|
||||
// storage account will be created if specified account is not found
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, accountKind, resourceGroup, location string, requestGiB int) (string, string, error) {
|
||||
if resourceGroup == "" {
|
||||
resourceGroup = az.resourceGroup
|
||||
}
|
||||
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, fileShareAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
@ -40,7 +46,7 @@ func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location s
|
||||
if err := az.createFileShare(account, key, shareName, requestGiB); err != nil {
|
||||
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err)
|
||||
}
|
||||
glog.V(4).Infof("created share %s in account %s", shareName, account)
|
||||
klog.V(4).Infof("created share %s in account %s", shareName, account)
|
||||
return account, key, nil
|
||||
}
|
||||
|
||||
@ -49,7 +55,7 @@ func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) erro
|
||||
if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("share %s deleted", shareName)
|
||||
klog.V(4).Infof("share %s deleted", shareName)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
@ -19,7 +19,7 @@ package azure
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
|
||||
)
|
||||
|
||||
func TestCreateFileShare(t *testing.T) {
|
||||
@ -30,6 +30,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
|
||||
name := "baz"
|
||||
sku := "sku"
|
||||
kind := "StorageV2"
|
||||
location := "centralus"
|
||||
value := "foo key"
|
||||
bogus := "bogus"
|
||||
@ -38,6 +39,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
name string
|
||||
acct string
|
||||
acctType string
|
||||
acctKind string
|
||||
loc string
|
||||
gb int
|
||||
accounts storage.AccountListResult
|
||||
@ -52,6 +54,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
name: "foo",
|
||||
acct: "bar",
|
||||
acctType: "type",
|
||||
acctKind: "StorageV2",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
@ -60,6 +63,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: "type",
|
||||
acctKind: "StorageV2",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
@ -68,11 +72,12 @@ func TestCreateFileShare(t *testing.T) {
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
acctKind: kind,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Kind: storage.Kind(kind), Location: &location},
|
||||
},
|
||||
},
|
||||
keys: storage.AccountListKeysResult{
|
||||
@ -87,6 +92,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
acctKind: kind,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
@ -100,6 +106,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
acctKind: kind,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
@ -116,7 +123,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
fake.Keys = test.keys
|
||||
fake.Err = test.err
|
||||
|
||||
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.loc, test.gb)
|
||||
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.acctKind, "rg", test.loc, test.gb)
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
continue
|
||||
|
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
@ -20,9 +20,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type accountWithLocation struct {
|
||||
@ -30,15 +30,15 @@ type accountWithLocation struct {
|
||||
}
|
||||
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingAccountKind, resourceGroup, matchingLocation string) ([]accountWithLocation, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, az.ResourceGroup)
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Value == nil {
|
||||
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", az.ResourceGroup)
|
||||
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", resourceGroup)
|
||||
}
|
||||
|
||||
accounts := []accountWithLocation{}
|
||||
@ -49,6 +49,10 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string
|
||||
continue
|
||||
}
|
||||
|
||||
if matchingAccountKind != "" && !strings.EqualFold(matchingAccountKind, string(acct.Kind)) {
|
||||
continue
|
||||
}
|
||||
|
||||
location := *acct.Location
|
||||
if matchingLocation != "" && !strings.EqualFold(matchingLocation, location) {
|
||||
continue
|
||||
@ -61,11 +65,11 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string
|
||||
}
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
func (az *Cloud) getStorageAccesskey(account, resourceGroup string) (string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := az.StorageAccountClient.ListKeys(ctx, az.ResourceGroup, account)
|
||||
result, err := az.StorageAccountClient.ListKeys(ctx, resourceGroup, account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -86,17 +90,17 @@ func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
}
|
||||
|
||||
// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) {
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, genAccountNamePrefix string) (string, string, error) {
|
||||
if len(accountName) == 0 {
|
||||
// find a storage account that matches accountType
|
||||
accounts, err := az.getStorageAccounts(accountType, location)
|
||||
accounts, err := az.getStorageAccounts(accountType, accountKind, resourceGroup, location)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
|
||||
}
|
||||
|
||||
if len(accounts) > 0 {
|
||||
accountName = accounts[0].Name
|
||||
glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
|
||||
klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
|
||||
}
|
||||
|
||||
if len(accountName) == 0 {
|
||||
@ -109,19 +113,23 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
|
||||
accountType = defaultStorageAccountType
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
// use StorageV2 by default per https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
|
||||
kind := defaultStorageAccountKind
|
||||
if accountKind != "" {
|
||||
kind = storage.Kind(accountKind)
|
||||
}
|
||||
klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s",
|
||||
accountName, resourceGroup, location, accountType, kind)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
|
||||
Kind: storage.StorageV2,
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Kind: kind,
|
||||
AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{EnableHTTPSTrafficOnly: to.BoolPtr(true)},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := az.StorageAccountClient.Create(ctx, az.ResourceGroup, accountName, cp)
|
||||
_, err := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
@ -129,7 +137,7 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
|
||||
}
|
||||
|
||||
// find the access key with this account
|
||||
accountKey, err := az.getStorageAccesskey(accountName)
|
||||
accountKey, err := az.getStorageAccesskey(accountName, resourceGroup)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
|
||||
)
|
||||
|
||||
func TestGetStorageAccessKeys(t *testing.T) {
|
||||
@ -64,7 +64,7 @@ func TestGetStorageAccessKeys(t *testing.T) {
|
||||
expectedKey := test.expectedKey
|
||||
fake.Keys = test.results
|
||||
fake.Err = test.err
|
||||
key, err := cloud.getStorageAccesskey("acct")
|
||||
key, err := cloud.getStorageAccesskey("acct", "rg")
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("Unexpected non-error")
|
||||
continue
|
||||
|
319
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
319
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
@ -19,23 +19,22 @@ package azure
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -859,13 +858,13 @@ func TestReconcilePublicIPWithNewService(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
|
||||
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
validatePublicIP(t, pip, &svc, true)
|
||||
|
||||
pip2, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb */)
|
||||
pip2, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb */)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -880,7 +879,7 @@ func TestReconcilePublicIPRemoveService(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
|
||||
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -888,7 +887,7 @@ func TestReconcilePublicIPRemoveService(t *testing.T) {
|
||||
validatePublicIP(t, pip, &svc, true)
|
||||
|
||||
// Remove the service
|
||||
pip, err = az.reconcilePublicIP(testClusterName, &svc, false /* wantLb */)
|
||||
pip, err = az.reconcilePublicIP(testClusterName, &svc, nil, false /* wantLb */)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -900,7 +899,7 @@ func TestReconcilePublicIPWithInternalService(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getInternalTestService("servicea", 80, 443)
|
||||
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -912,7 +911,7 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getInternalTestService("servicea", 80, 443)
|
||||
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
|
||||
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -920,14 +919,14 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) {
|
||||
|
||||
// Update to external service
|
||||
svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80)
|
||||
pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, true /* wantLb*/)
|
||||
pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, nil, true /* wantLb*/)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
validatePublicIP(t, pip, &svcUpdated, true)
|
||||
|
||||
// Update to internal service again
|
||||
pip, err = az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
|
||||
pip, err = az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -950,8 +949,13 @@ func getTestCloud() (az *Cloud) {
|
||||
RouteTableName: "rt",
|
||||
PrimaryAvailabilitySetName: "as",
|
||||
MaximumLoadBalancerRuleCount: 250,
|
||||
VMType: vmTypeStandard,
|
||||
VMType: vmTypeStandard,
|
||||
},
|
||||
nodeZones: map[string]sets.String{},
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
nodeResourceGroups: map[string]string{},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
routeCIDRs: map[string]string{},
|
||||
}
|
||||
az.DisksClient = newFakeDisksClient()
|
||||
az.InterfacesClient = newFakeAzureInterfacesClient()
|
||||
@ -1061,7 +1065,7 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
|
||||
az.InterfacesClient.CreateOrUpdate(ctx, az.Config.ResourceGroup, nicName, newNIC)
|
||||
|
||||
// create vm
|
||||
asID := az.getAvailabilitySetID(asName)
|
||||
asID := az.getAvailabilitySetID(az.Config.ResourceGroup, asName)
|
||||
newVM := compute.VirtualMachine{
|
||||
Name: &vmName,
|
||||
Location: &az.Config.Location,
|
||||
@ -1159,7 +1163,7 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
|
||||
for _, port := range service.Spec.Ports {
|
||||
sources := getServiceSourceRanges(&service)
|
||||
for _, src := range sources {
|
||||
ruleName := getSecurityRuleName(&service, port, src)
|
||||
ruleName := az.getSecurityRuleName(&service, port, src)
|
||||
rules = append(rules, network.SecurityRule{
|
||||
Name: to.StringPtr(ruleName),
|
||||
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
|
||||
@ -1190,6 +1194,7 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
|
||||
}
|
||||
|
||||
func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, services ...v1.Service) {
|
||||
az := getTestCloud()
|
||||
expectedRuleCount := 0
|
||||
expectedFrontendIPCount := 0
|
||||
expectedProbeCount := 0
|
||||
@ -1198,14 +1203,14 @@ func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, serv
|
||||
if len(svc.Spec.Ports) > 0 {
|
||||
expectedFrontendIPCount++
|
||||
expectedFrontendIP := ExpectedFrontendIPInfo{
|
||||
Name: getFrontendIPConfigName(&svc, subnet(&svc)),
|
||||
Name: az.getFrontendIPConfigName(&svc, subnet(&svc)),
|
||||
Subnet: subnet(&svc),
|
||||
}
|
||||
expectedFrontendIPs = append(expectedFrontendIPs, expectedFrontendIP)
|
||||
}
|
||||
for _, wantedRule := range svc.Spec.Ports {
|
||||
expectedRuleCount++
|
||||
wantedRuleName := getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc))
|
||||
wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc))
|
||||
foundRule := false
|
||||
for _, actualRule := range *loadBalancer.LoadBalancingRules {
|
||||
if strings.EqualFold(*actualRule.Name, wantedRuleName) &&
|
||||
@ -1396,12 +1401,13 @@ func securityRuleMatches(serviceSourceRange string, servicePort v1.ServicePort,
|
||||
}
|
||||
|
||||
func validateSecurityGroup(t *testing.T, securityGroup *network.SecurityGroup, services ...v1.Service) {
|
||||
az := getTestCloud()
|
||||
seenRules := make(map[string]string)
|
||||
for _, svc := range services {
|
||||
for _, wantedRule := range svc.Spec.Ports {
|
||||
sources := getServiceSourceRanges(&svc)
|
||||
for _, source := range sources {
|
||||
wantedRuleName := getSecurityRuleName(&svc, wantedRule, source)
|
||||
wantedRuleName := az.getSecurityRuleName(&svc, wantedRule, source)
|
||||
seenRules[wantedRuleName] = wantedRuleName
|
||||
foundRule := false
|
||||
for _, actualRule := range *securityGroup.SecurityRules {
|
||||
@ -1667,60 +1673,67 @@ func validateEmptyConfig(t *testing.T, config string) {
|
||||
t.Errorf("got incorrect value for CloudProviderRateLimit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetZone(t *testing.T) {
|
||||
data := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, data)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
cloud := &Cloud{}
|
||||
cloud.Location = "eastus"
|
||||
|
||||
zone, err := cloud.getZoneFromURL(ts.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
cloud := &Cloud{
|
||||
Config: Config{
|
||||
Location: "eastus",
|
||||
},
|
||||
}
|
||||
if zone.FailureDomain != "99" {
|
||||
t.Errorf("Unexpected value: %s, expected '99'", zone.FailureDomain)
|
||||
}
|
||||
if zone.Region != cloud.Location {
|
||||
t.Errorf("Expected: %s, saw: %s", cloud.Location, zone.Region)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchFaultDomain(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, `{"ID":"_azdev","UD":"0","FD":"99"}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
faultDomain, err := fetchFaultDomain(ts.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if faultDomain == nil {
|
||||
t.Errorf("Unexpected nil fault domain")
|
||||
}
|
||||
if *faultDomain != "99" {
|
||||
t.Errorf("Expected '99', saw '%s'", *faultDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeInstanceInfo(t *testing.T) {
|
||||
response := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
|
||||
faultDomain, err := readFaultDomain(strings.NewReader(response))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error in ReadFaultDomain: %v", err)
|
||||
testcases := []struct {
|
||||
name string
|
||||
zone string
|
||||
faultDomain string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "GetZone should get real zone if only node's zone is set",
|
||||
zone: "1",
|
||||
expected: "eastus-1",
|
||||
},
|
||||
{
|
||||
name: "GetZone should get real zone if both node's zone and FD are set",
|
||||
zone: "1",
|
||||
faultDomain: "99",
|
||||
expected: "eastus-1",
|
||||
},
|
||||
{
|
||||
name: "GetZone should get faultDomain if node's zone isn't set",
|
||||
faultDomain: "99",
|
||||
expected: "99",
|
||||
},
|
||||
}
|
||||
|
||||
if faultDomain == nil {
|
||||
t.Error("Fault domain was unexpectedly nil")
|
||||
}
|
||||
for _, test := range testcases {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
if *faultDomain != "99" {
|
||||
t.Error("got incorrect fault domain")
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, fmt.Sprintf(`{"compute":{"zone":"%s", "platformFaultDomain":"%s"}}`, test.zone, test.faultDomain))
|
||||
}))
|
||||
go func() {
|
||||
http.Serve(listener, mux)
|
||||
}()
|
||||
defer listener.Close()
|
||||
|
||||
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
zone, err := cloud.GetZone(context.Background())
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
if zone.FailureDomain != test.expected {
|
||||
t.Errorf("Test [%s] unexpected zone: %s, expected %q", test.name, zone.FailureDomain, test.expected)
|
||||
}
|
||||
if zone.Region != cloud.Location {
|
||||
t.Errorf("Test [%s] unexpected region: %s, expected: %s", test.name, zone.Region, cloud.Location)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1776,73 +1789,6 @@ func TestGetNodeNameByProviderID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataURLGeneration(t *testing.T) {
|
||||
metadata := NewInstanceMetadata()
|
||||
fullPath := metadata.makeMetadataURL("some/path")
|
||||
if fullPath != "http://169.254.169.254/metadata/some/path" {
|
||||
t.Errorf("Expected http://169.254.169.254/metadata/some/path saw %s", fullPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataParsing(t *testing.T) {
|
||||
data := `
|
||||
{
|
||||
"interface": [
|
||||
{
|
||||
"ipv4": {
|
||||
"ipAddress": [
|
||||
{
|
||||
"privateIpAddress": "10.0.1.4",
|
||||
"publicIpAddress": "X.X.X.X"
|
||||
}
|
||||
],
|
||||
"subnet": [
|
||||
{
|
||||
"address": "10.0.1.0",
|
||||
"prefix": "24"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ipv6": {
|
||||
"ipAddress": [
|
||||
|
||||
]
|
||||
},
|
||||
"macAddress": "002248020E1E"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
network := NetworkMetadata{}
|
||||
if err := json.Unmarshal([]byte(data), &network); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
ip := network.Interface[0].IPV4.IPAddress[0].PrivateIP
|
||||
if ip != "10.0.1.4" {
|
||||
t.Errorf("Unexpected value: %s, expected 10.0.1.4", ip)
|
||||
}
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, data)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metadata := &InstanceMetadata{
|
||||
baseURL: server.URL,
|
||||
}
|
||||
|
||||
networkJSON := NetworkMetadata{}
|
||||
if err := metadata.Object("/some/path", &networkJSON); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(network, networkJSON) {
|
||||
t.Errorf("Unexpected inequality:\n%#v\nvs\n%#v", network, networkJSON)
|
||||
}
|
||||
}
|
||||
|
||||
func addTestSubnet(t *testing.T, az *Cloud, svc *v1.Service) {
|
||||
if svc.Annotations[ServiceAnnotationLoadBalancerInternal] != "true" {
|
||||
t.Error("Subnet added to non-internal service")
|
||||
@ -1929,8 +1875,8 @@ func TestIfServiceSpecifiesSharedRuleAndRuleExistsThenTheServicesPortAndAddressA
|
||||
SourceAddressPrefix: to.StringPtr("Internet"),
|
||||
DestinationPortRange: to.StringPtr("80"),
|
||||
DestinationAddressPrefix: to.StringPtr("192.168.33.44"),
|
||||
Access: network.SecurityRuleAccessAllow,
|
||||
Direction: network.SecurityRuleDirectionInbound,
|
||||
Access: network.SecurityRuleAccessAllow,
|
||||
Direction: network.SecurityRuleDirectionInbound,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -2537,8 +2483,8 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
|
||||
|
||||
expectedRuleName13 := "shared-TCP-4444-Internet"
|
||||
expectedRuleName2 := "shared-TCP-8888-Internet"
|
||||
expectedRuleName4 := getSecurityRuleName(&svc4, v1.ServicePort{Port: 4444, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
expectedRuleName5 := getSecurityRuleName(&svc5, v1.ServicePort{Port: 8888, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
expectedRuleName4 := az.getSecurityRuleName(&svc4, v1.ServicePort{Port: 4444, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
expectedRuleName5 := az.getSecurityRuleName(&svc5, v1.ServicePort{Port: 8888, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
|
||||
sg := getTestSecurityGroup(az)
|
||||
|
||||
@ -2735,3 +2681,100 @@ func TestGetResourceGroupFromDiskURI(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetResourceGroups(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeResourceGroups map[string]string
|
||||
expected sets.String
|
||||
informerSynced bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "cloud provider configured RG should be returned by default",
|
||||
nodeResourceGroups: map[string]string{},
|
||||
informerSynced: true,
|
||||
expected: sets.NewString("rg"),
|
||||
},
|
||||
{
|
||||
name: "cloud provider configured RG and node RGs should be returned",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: true,
|
||||
expected: sets.NewString("rg", "rg1", "rg2"),
|
||||
},
|
||||
{
|
||||
name: "error should be returned if informer hasn't synced yet",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
az := getTestCloud()
|
||||
for _, test := range tests {
|
||||
az.nodeResourceGroups = test.nodeResourceGroups
|
||||
if test.informerSynced {
|
||||
az.nodeInformerSynced = func() bool { return true }
|
||||
} else {
|
||||
az.nodeInformerSynced = func() bool { return false }
|
||||
}
|
||||
actual, err := az.GetResourceGroups()
|
||||
if test.expectError {
|
||||
assert.NotNil(t, err, test.name)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Nil(t, err, test.name)
|
||||
assert.Equal(t, test.expected, actual, test.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeResourceGroup(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeResourceGroups map[string]string
|
||||
node string
|
||||
expected string
|
||||
informerSynced bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "cloud provider configured RG should be returned by default",
|
||||
nodeResourceGroups: map[string]string{},
|
||||
informerSynced: true,
|
||||
node: "node1",
|
||||
expected: "rg",
|
||||
},
|
||||
{
|
||||
name: "node RGs should be returned",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: true,
|
||||
node: "node1",
|
||||
expected: "rg1",
|
||||
},
|
||||
{
|
||||
name: "error should be returned if informer hasn't synced yet",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
az := getTestCloud()
|
||||
for _, test := range tests {
|
||||
az.nodeResourceGroups = test.nodeResourceGroups
|
||||
if test.informerSynced {
|
||||
az.nodeInformerSynced = func() bool { return true }
|
||||
} else {
|
||||
az.nodeInformerSynced = func() bool { return false }
|
||||
}
|
||||
actual, err := az.GetNodeResourceGroup(test.node)
|
||||
if test.expectError {
|
||||
assert.NotNil(t, err, test.name)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Nil(t, err, test.name)
|
||||
assert.Equal(t, test.expected, actual, test.name)
|
||||
}
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
@ -17,12 +17,12 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
|
||||
// VMSet defines functions all vmsets (including scale set and availability
|
||||
@ -54,9 +54,9 @@ type VMSet interface {
|
||||
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
|
||||
EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
||||
EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
@ -64,4 +64,7 @@ type VMSet interface {
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
GetPowerStatusByNodeName(name string) (string, error)
|
||||
}
|
||||
|
321
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
321
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
@ -24,24 +24,26 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
|
||||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
|
||||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
resourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`)
|
||||
vmssNicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines/(?:.*)/networkInterfaces/(?:.*)`)
|
||||
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
|
||||
)
|
||||
|
||||
// scaleSet implements VMSet interface for Azure scale set.
|
||||
@ -106,31 +108,58 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co
|
||||
return "", "", vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(ss.makeVmssVMName(ssName, instanceID))
|
||||
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
|
||||
if err != nil {
|
||||
return "", "", vm, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
cachedVM, err := ss.vmssVMCache.Get(key)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("Can't find node (%q) in any scale sets", nodeName)
|
||||
klog.Errorf("Can't find node (%q) in any scale sets", nodeName)
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return powerState, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
|
||||
statuses := *vm.InstanceView.Statuses
|
||||
for _, status := range statuses {
|
||||
state := to.String(status.Code)
|
||||
if strings.HasPrefix(state, vmPowerStatePrefix) {
|
||||
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to get power status for node %q", name)
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
key := buildVmssCacheKey(resourceGroup, vmName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(key)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("cound't find vmss virtual machine by scaleSetName (%q) and instanceID (%q)", scaleSetName, instanceID)
|
||||
klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID)
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
@ -143,7 +172,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm c
|
||||
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
@ -164,17 +193,22 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
||||
// NodeName is not part of providerID for vmss instances.
|
||||
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
klog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
resourceGroup, err := extractResourceGroupByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
vm, err := ss.getVmssVMByInstanceID(scaleSetName, instanceID)
|
||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -191,7 +225,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
||||
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
@ -211,11 +245,12 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||
// with availability zone, then it returns fault domain.
|
||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
@ -228,14 +263,25 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)),
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
var failureDomain string
|
||||
if vm.Zones != nil && len(*vm.Zones) > 0 {
|
||||
// Get availability zone for the node.
|
||||
zones := *vm.Zones
|
||||
zoneID, err := strconv.Atoi(zones[0])
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
|
||||
}
|
||||
|
||||
failureDomain = ss.makeZone(zoneID)
|
||||
} else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
// Availability zone is not used for the node, falling back to fault domain.
|
||||
failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain))
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{}, nil
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
@ -245,23 +291,43 @@ func (ss *scaleSet) GetPrimaryVMSetName() string {
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet,
|
||||
// fix this after it is supported.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
|
||||
klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
klog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, "", nil
|
||||
internalIP := *ipConfig.PrivateIPAddress
|
||||
publicIP := ""
|
||||
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
|
||||
pipID := *ipConfig.PublicIPAddress.ID
|
||||
pipName, err := getLastSegment(pipID)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get publicIP name for node %q with pipID %q", nodeName, pipID)
|
||||
}
|
||||
|
||||
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
pip, existsPip, err := ss.getPublicIPAddress(resourceGroup, pipName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if existsPip {
|
||||
publicIP = *pip.IPAddress
|
||||
}
|
||||
}
|
||||
|
||||
return internalIP, publicIP, nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
@ -296,7 +362,7 @@ func getScaleSetVMInstanceID(machineName string) (string, error) {
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByProviderID extracts the scaleset name by node's ProviderID.
|
||||
// extractScaleSetNameByProviderID extracts the scaleset name by vmss node's ProviderID.
|
||||
func extractScaleSetNameByProviderID(providerID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
@ -306,15 +372,25 @@ func extractScaleSetNameByProviderID(providerID string) (string, error) {
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// extractResourceGroupByProviderID extracts the resource group name by vmss node's ProviderID.
|
||||
func extractResourceGroupByProviderID(providerID string) (string, error) {
|
||||
matches := resourceGroupRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// listScaleSets lists all scale sets.
|
||||
func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, ss.ResourceGroup)
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
|
||||
klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -327,14 +403,14 @@ func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]compute.VirtualMachineScaleSetVM, error) {
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compute.VirtualMachineScaleSetVM, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView))
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, "", "", string(compute.InstanceView))
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -350,6 +426,10 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
@ -357,7 +437,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
|
||||
klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -381,11 +461,11 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
||||
|
||||
scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
|
||||
klog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*scaleSetNames) == 0 {
|
||||
glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
|
||||
@ -407,7 +487,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
|
||||
klog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
|
||||
return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx])
|
||||
}
|
||||
}
|
||||
@ -417,11 +497,21 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
||||
return vmSetNames, nil
|
||||
}
|
||||
|
||||
// extractResourceGroupByVMSSNicID extracts the resource group name by vmss nicID.
|
||||
func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
|
||||
matches := vmssNicResourceGroupRE.FindStringSubmatch(nicID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
@ -431,27 +521,36 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
|
||||
// VM is availability set, but not cached yet in availabilitySetNodesCache.
|
||||
if err == ErrorNotVmssInstance {
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
||||
}
|
||||
|
||||
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(primaryInterfaceID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||
klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
resourceGroup, err := extractResourceGroupByVMSSNicID(primaryInterfaceID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
@ -465,17 +564,18 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
var result compute.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
cached, retryErr := ss.vmssCache.Get(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
|
||||
ss.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error())
|
||||
klog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Info("backoff: success for scale set %q", name)
|
||||
klog.V(4).Infof("backoff: success for scale set %q", name)
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
@ -522,24 +622,24 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine
|
||||
}
|
||||
|
||||
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(service *v1.Service, virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
|
||||
return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSS", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
|
||||
return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSSInstance", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -550,13 +650,18 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
|
||||
|
||||
for _, curNode := range nodes {
|
||||
if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name)
|
||||
klog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if ss.ShouldNodeExcludedFromLoadBalancer(curNode) {
|
||||
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
|
||||
klog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
|
||||
standardNodes = append(standardNodes, curNode)
|
||||
continue
|
||||
}
|
||||
@ -567,7 +672,7 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err)
|
||||
klog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@ -579,16 +684,17 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
|
||||
|
||||
// ensureHostsInVMSetPool ensures the given Node's primary IP configurations are
|
||||
// participating in the vmSet's LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
|
||||
glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
|
||||
klog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
|
||||
serviceName := getServiceName(service)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
klog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
errorMessage := fmt.Errorf("Scale set %q not found", vmSetName)
|
||||
glog.Errorf("%v", errorMessage)
|
||||
klog.Errorf("%v", errorMessage)
|
||||
return errorMessage
|
||||
}
|
||||
|
||||
@ -629,7 +735,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName)
|
||||
klog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -644,15 +750,15 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName)
|
||||
klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName)
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -667,13 +773,13 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(service, vmSetName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -685,10 +791,11 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
serviceName := getServiceName(service)
|
||||
scalesets, standardNodes, err := ss.getNodesScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
|
||||
klog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -700,22 +807,22 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
|
||||
if instanceIDs.Len() == 0 {
|
||||
// This may happen when scaling a vmss capacity to 0.
|
||||
glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName)
|
||||
klog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName)
|
||||
// InstanceIDs is required to update vmss, use * instead here since there are no nodes actually.
|
||||
instanceIDs.Insert("*")
|
||||
}
|
||||
|
||||
err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal)
|
||||
err := ss.ensureHostsInVMSetPool(service, backendPoolID, ssName, instanceIDs.List(), isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
|
||||
klog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ss.useStandardLoadBalancer() && len(standardNodes) > 0 {
|
||||
err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal)
|
||||
err := ss.availabilitySet.EnsureHostsInPool(service, standardNodes, backendPoolID, "", isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
|
||||
klog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -724,15 +831,15 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
}
|
||||
|
||||
// ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset.
|
||||
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error {
|
||||
glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName)
|
||||
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID, ssName string) error {
|
||||
klog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
|
||||
klog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName)
|
||||
klog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -759,7 +866,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
for i := len(existingBackendPools) - 1; i >= 0; i-- {
|
||||
curPool := existingBackendPools[i]
|
||||
if strings.EqualFold(poolID, *curPool.ID) {
|
||||
glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName)
|
||||
klog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName)
|
||||
foundPool = true
|
||||
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
|
||||
}
|
||||
@ -771,17 +878,17 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
|
||||
// Update scale set with backoff.
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName)
|
||||
klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -796,13 +903,13 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
instanceCtx, instanceCancel := getContextWithCancel()
|
||||
defer instanceCancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
|
||||
klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(service, ssName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -814,14 +921,14 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
if len(newBackendPools) == 0 {
|
||||
updateCtx, updateCancel := getContextWithCancel()
|
||||
defer updateCancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName)
|
||||
klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -830,7 +937,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
if backendAddressPools == nil {
|
||||
return nil
|
||||
}
|
||||
@ -845,7 +952,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
|
||||
|
||||
ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
|
||||
klog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -861,9 +968,9 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
|
||||
continue
|
||||
}
|
||||
|
||||
err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName)
|
||||
err := ss.ensureScaleSetBackendPoolDeleted(service, poolID, ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
|
||||
klog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -872,11 +979,11 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
|
||||
func (az *Cloud) getVmssMachineID(resourceGroup, scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf(
|
||||
vmssMachineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
scaleSetName,
|
||||
instanceID)
|
||||
}
|
||||
|
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
@ -21,21 +21,22 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
var (
|
||||
vmssNameSeparator = "_"
|
||||
vmssNameSeparator = "_"
|
||||
vmssCacheSeparator = "#"
|
||||
|
||||
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
vmssCacheTTL = time.Minute
|
||||
vmssVMCacheTTL = time.Minute
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 15 * time.Minute
|
||||
availabilitySetNodesCacheTTL = 5 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute
|
||||
)
|
||||
|
||||
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
|
||||
@ -49,19 +50,19 @@ func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
split := strings.SplitAfter(name, vmssNameSeparator)
|
||||
if len(split) < 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
klog.V(3).Infof("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
ssName := strings.Join(split[0:len(split)-1], "")
|
||||
// removing the trailing `vmssNameSeparator` since we used SplitAfter
|
||||
ssName = ssName[:len(ssName)-1]
|
||||
|
||||
instanceID := split[len(split)-1]
|
||||
|
||||
return ssName, instanceID, nil
|
||||
}
|
||||
|
||||
// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups
|
||||
// will be excluded from LB backends.
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
@ -73,7 +74,7 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
|
||||
klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -85,26 +86,34 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
|
||||
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
scaleSetNames, err := ss.listScaleSets()
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
|
||||
allResourceGroups, err := ss.GetResourceGroups()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName)
|
||||
for _, resourceGroup := range allResourceGroups.List() {
|
||||
scaleSetNames, err := ss.listScaleSets(resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
|
||||
continue
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName, resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
klog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
|
||||
continue
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,14 +125,23 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
|
||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry()
|
||||
localCache := sets.NewString()
|
||||
resourceGroups, err := ss.GetResourceGroups()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := sets.NewString()
|
||||
for _, vm := range vmList {
|
||||
localCache.Insert(*vm.Name)
|
||||
for _, resourceGroup := range resourceGroups.List() {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry(resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vmList {
|
||||
if vm.Name != nil {
|
||||
localCache.Insert(*vm.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
@ -132,10 +150,33 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func buildVmssCacheKey(resourceGroup, name string) string {
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
return fmt.Sprintf("%s%s%s", resourceGroup, vmssCacheSeparator, name)
|
||||
}
|
||||
|
||||
func extractVmssCacheKey(key string) (string, string, error) {
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
keyItems := strings.Split(key, vmssCacheSeparator)
|
||||
if len(keyItems) != 2 {
|
||||
return "", "", fmt.Errorf("key %q is not in format '<resouceGroup>#<vmName>'", key)
|
||||
}
|
||||
|
||||
resourceGroup := keyItems[0]
|
||||
vmName := keyItems[1]
|
||||
return resourceGroup, vmName, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// vmssVM name's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(key)
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
resourceGroup, vmName, err := extractVmssCacheKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// vmName's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(vmName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -147,17 +188,35 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get instanceView for vmssVM.
|
||||
if result.InstanceView == nil {
|
||||
viewCtx, viewCancel := getContextWithCancel()
|
||||
defer viewCancel()
|
||||
view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID)
|
||||
// It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again.
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
if !exists {
|
||||
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result.InstanceView = &view
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
|
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
@ -20,13 +20,20 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
const (
|
||||
fakePrivateIP = "10.240.0.10"
|
||||
fakePublicIP = "10.10.10.10"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName, zone string, faultDomain int32, vmList []string) (*scaleSet, error) {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, vmList)
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, zone, faultDomain, vmList)
|
||||
ss, err := newScaleSet(cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -35,8 +42,13 @@ func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
return ss.(*scaleSet), nil
|
||||
}
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomain int32, vmList []string) {
|
||||
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
publicIPAddressesClient := newFakeAzurePIPClient("rg")
|
||||
interfaceClient := newFakeAzureInterfacesClient()
|
||||
|
||||
// set test scale sets.
|
||||
scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{
|
||||
scaleSetName: {
|
||||
@ -45,20 +57,30 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
||||
}
|
||||
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
|
||||
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM)
|
||||
testInterfaces := map[string]map[string]network.Interface{
|
||||
"rg": make(map[string]network.Interface),
|
||||
}
|
||||
testPIPs := map[string]map[string]network.PublicIPAddress{
|
||||
"rg": make(map[string]network.PublicIPAddress),
|
||||
}
|
||||
ssVMs := map[string]map[string]compute.VirtualMachineScaleSetVM{
|
||||
"rg": make(map[string]compute.VirtualMachineScaleSetVM),
|
||||
}
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
interfaceID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s", scaleSetName, i, nodeName)
|
||||
publicAddressID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s/ipConfigurations/ipconfig1/publicIPAddresses/%s", scaleSetName, i, nodeName, nodeName)
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
|
||||
// set vmss virtual machine.
|
||||
networkInterfaces := []compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
ID: &interfaceID,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmName] = compute.VirtualMachineScaleSetVM{
|
||||
vmssVM := compute.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
@ -66,17 +88,55 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &networkInterfaces,
|
||||
},
|
||||
InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{
|
||||
PlatformFaultDomain: &faultDomain,
|
||||
},
|
||||
},
|
||||
ID: &ID,
|
||||
InstanceID: &instanceID,
|
||||
Name: &vmName,
|
||||
Location: &ss.Location,
|
||||
}
|
||||
if zone != "" {
|
||||
zones := []string{zone}
|
||||
vmssVM.Zones = &zones
|
||||
}
|
||||
ssVMs["rg"][vmName] = vmssVM
|
||||
|
||||
// set interfaces.
|
||||
testInterfaces["rg"][nodeName] = network.Interface{
|
||||
ID: &interfaceID,
|
||||
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
|
||||
IPConfigurations: &[]network.InterfaceIPConfiguration{
|
||||
{
|
||||
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
|
||||
Primary: to.BoolPtr(true),
|
||||
PrivateIPAddress: to.StringPtr(fakePrivateIP),
|
||||
PublicIPAddress: &network.PublicIPAddress{
|
||||
ID: to.StringPtr(publicAddressID),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// set public IPs.
|
||||
testPIPs["rg"][nodeName] = network.PublicIPAddress{
|
||||
ID: to.StringPtr(publicAddressID),
|
||||
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
|
||||
IPAddress: to.StringPtr(fakePublicIP),
|
||||
},
|
||||
}
|
||||
}
|
||||
virtualMachineScaleSetVMsClient.setFakeStore(ssVMs)
|
||||
interfaceClient.setFakeStore(testInterfaces)
|
||||
publicIPAddressesClient.setFakeStore(testPIPs)
|
||||
|
||||
ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
ss.InterfacesClient = interfaceClient
|
||||
ss.PublicIPAddressesClient = publicIPAddressesClient
|
||||
}
|
||||
|
||||
func TestGetScaleSetVMInstanceID(t *testing.T) {
|
||||
@ -141,7 +201,7 @@ func TestGetInstanceIDByNodeName(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.vmList)
|
||||
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
|
||||
@ -154,3 +214,96 @@ func TestGetInstanceIDByNodeName(t *testing.T) {
|
||||
assert.Equal(t, test.expected, real, test.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetZoneByNodeName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
nodeName string
|
||||
zone string
|
||||
faultDomain int32
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "scaleSet should get faultDomain for non-zoned nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
faultDomain: 3,
|
||||
expected: "3",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should get availability zone for zoned nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
zone: "2",
|
||||
faultDomain: 3,
|
||||
expected: "westus-2",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should return error for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
faultDomain: 3,
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.zone, test.faultDomain, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetZoneByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, real.FailureDomain, test.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIPByNodeName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
nodeName string
|
||||
expected []string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "GetIPByNodeName should get node's privateIP and publicIP",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
expected: []string{fakePrivateIP, fakePublicIP},
|
||||
},
|
||||
{
|
||||
description: "GetIPByNodeName should return error for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
privateIP, publicIP, err := ss.GetIPByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, []string{privateIP, publicIP}, test.description)
|
||||
}
|
||||
}
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
@ -19,16 +19,16 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -36,6 +36,8 @@ var (
|
||||
lbCacheTTL = 2 * time.Minute
|
||||
nsgCacheTTL = 2 * time.Minute
|
||||
rtCacheTTL = 2 * time.Minute
|
||||
|
||||
azureNodeProviderIDRE = regexp.MustCompile(`^azure:///subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/(?:.*)`)
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
@ -115,7 +117,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
|
||||
klog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
|
||||
return pip, false, nil
|
||||
}
|
||||
|
||||
@ -142,7 +144,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
|
||||
klog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
|
||||
return subnet, false, nil
|
||||
}
|
||||
|
||||
@ -189,14 +191,20 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView)
|
||||
|
||||
resourceGroup, err := az.GetNodeResourceGroup(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceView)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
|
||||
klog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -218,7 +226,7 @@ func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
|
||||
klog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -239,7 +247,7 @@ func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Security group %q not found with message: %q", key, message)
|
||||
klog.V(2).Infof("Security group %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -260,7 +268,7 @@ func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Route table %q not found with message: %q", key, message)
|
||||
klog.V(2).Infof("Route table %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -277,3 +285,21 @@ func (az *Cloud) useStandardLoadBalancer() bool {
|
||||
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
|
||||
return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
|
||||
}
|
||||
|
||||
// IsNodeUnmanaged returns true if the node is not managed by Azure cloud provider.
|
||||
// Those nodes includes on-prem or VMs from other clouds. They will not be added to load balancer
|
||||
// backends. Azure routes and managed disks are also not supported for them.
|
||||
func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) {
|
||||
unmanagedNodes, err := az.GetUnmanagedNodes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return unmanagedNodes.Has(nodeName), nil
|
||||
}
|
||||
|
||||
// IsNodeUnmanagedByProviderID returns true if the node is not managed by Azure cloud provider.
|
||||
// All managed node's providerIDs are in format 'azure:///subscriptions/<id>/resourceGroups/<rg>/providers/Microsoft.Compute/.*'
|
||||
func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool {
|
||||
return !azureNodeProviderIDRE.Match([]byte(providerID))
|
||||
}
|
||||
|
91
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
91
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
func TestExtractNotFound(t *testing.T) {
|
||||
@ -51,3 +53,92 @@ func TestExtractNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNodeUnmanaged(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
unmanagedNodes sets.String
|
||||
node string
|
||||
expected bool
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "unmanaged node should return true",
|
||||
unmanagedNodes: sets.NewString("node1", "node2"),
|
||||
node: "node1",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "managed node should return false",
|
||||
unmanagedNodes: sets.NewString("node1", "node2"),
|
||||
node: "node3",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty unmanagedNodes should return true",
|
||||
unmanagedNodes: sets.NewString(),
|
||||
node: "node3",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "no synced informer should report error",
|
||||
unmanagedNodes: sets.NewString(),
|
||||
node: "node1",
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
az := getTestCloud()
|
||||
for _, test := range tests {
|
||||
az.unmanagedNodes = test.unmanagedNodes
|
||||
if test.expectErr {
|
||||
az.nodeInformerSynced = func() bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
real, err := az.IsNodeUnmanaged(test.node)
|
||||
if test.expectErr {
|
||||
assert.Error(t, err, test.name)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.name)
|
||||
assert.Equal(t, test.expected, real, test.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNodeUnmanagedByProviderID(t *testing.T) {
|
||||
tests := []struct {
|
||||
providerID string
|
||||
expected bool
|
||||
name string
|
||||
}{
|
||||
{
|
||||
providerID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
providerID: CloudProviderName + "://",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
providerID: ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
providerID: "aws:///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
providerID: "k8s-agent-AAAAAAAA-0",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
az := getTestCloud()
|
||||
for _, test := range tests {
|
||||
isUnmanagedNode := az.IsNodeUnmanagedByProviderID(test.providerID)
|
||||
assert.Equal(t, test.expected, isUnmanagedNode, test.providerID)
|
||||
}
|
||||
}
|
||||
|
112
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
112
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
@ -18,54 +18,74 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
|
||||
|
||||
var faultMutex = &sync.Mutex{}
|
||||
var faultDomain *string
|
||||
|
||||
type instanceInfo struct {
|
||||
ID string `json:"ID"`
|
||||
UpdateDomain string `json:"UD"`
|
||||
FaultDomain string `json:"FD"`
|
||||
// makeZone returns the zone value in format of <region>-<zone-id>.
|
||||
func (az *Cloud) makeZone(zoneID int) string {
|
||||
return fmt.Sprintf("%s-%d", strings.ToLower(az.Location), zoneID)
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
// isAvailabilityZone returns true if the zone is in format of <region>-<zone-id>.
|
||||
func (az *Cloud) isAvailabilityZone(zone string) bool {
|
||||
return strings.HasPrefix(zone, fmt.Sprintf("%s-", az.Location))
|
||||
}
|
||||
|
||||
// GetZoneID returns the ID of zone from node's zone label.
|
||||
func (az *Cloud) GetZoneID(zoneLabel string) string {
|
||||
if !az.isAvailabilityZone(zoneLabel) {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(zoneLabel, fmt.Sprintf("%s-", az.Location))
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current availability zone and locality region that the program is running in.
|
||||
// If the node is not running with availability zones, then it will fall back to fault domain.
|
||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
return az.getZoneFromURL(instanceInfoURL)
|
||||
}
|
||||
metadata, err := az.metadata.GetMetadata()
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
// This is injectable for testing.
|
||||
func (az *Cloud) getZoneFromURL(url string) (cloudprovider.Zone, error) {
|
||||
faultMutex.Lock()
|
||||
defer faultMutex.Unlock()
|
||||
if faultDomain == nil {
|
||||
var err error
|
||||
faultDomain, err = fetchFaultDomain(url)
|
||||
if metadata.Compute == nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata")
|
||||
}
|
||||
|
||||
zone := ""
|
||||
if metadata.Compute.Zone != "" {
|
||||
zoneID, err := strconv.Atoi(metadata.Compute.Zone)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", metadata.Compute.Zone, err)
|
||||
}
|
||||
zone = az.makeZone(zoneID)
|
||||
} else {
|
||||
klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain")
|
||||
zone = metadata.Compute.FaultDomain
|
||||
}
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: *faultDomain,
|
||||
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: zone,
|
||||
Region: az.Location,
|
||||
}
|
||||
return zone, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID)
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
@ -78,27 +98,15 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
unmanaged, err := az.IsNodeUnmanaged(string(nodeName))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
if unmanaged {
|
||||
klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName)
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
return az.vmSet.GetZoneByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
func fetchFaultDomain(url string) (*string, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return readFaultDomain(resp.Body)
|
||||
}
|
||||
|
||||
func readFaultDomain(reader io.Reader) (*string, error) {
|
||||
var instanceInfo instanceInfo
|
||||
body, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(body, &instanceInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &instanceInfo.FaultDomain, nil
|
||||
}
|
||||
|
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones_test.go
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones_test.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsAvailabilityZone(t *testing.T) {
|
||||
location := "eastus"
|
||||
az := &Cloud{
|
||||
Config: Config{
|
||||
Location: location,
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
desc string
|
||||
zone string
|
||||
expected bool
|
||||
}{
|
||||
{"empty string should return false", "", false},
|
||||
{"wrong farmat should return false", "123", false},
|
||||
{"wrong location should return false", "chinanorth-1", false},
|
||||
{"correct zone should return true", "eastus-1", true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := az.isAvailabilityZone(test.zone)
|
||||
if actual != test.expected {
|
||||
t.Errorf("test [%q] get unexpected result: %v != %v", test.desc, actual, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetZoneID(t *testing.T) {
|
||||
location := "eastus"
|
||||
az := &Cloud{
|
||||
Config: Config{
|
||||
Location: location,
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
desc string
|
||||
zone string
|
||||
expected string
|
||||
}{
|
||||
{"empty string should return empty string", "", ""},
|
||||
{"wrong farmat should return empty string", "123", ""},
|
||||
{"wrong location should return empty string", "chinanorth-1", ""},
|
||||
{"correct zone should return zone ID", "eastus-1", "1"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := az.GetZoneID(test.zone)
|
||||
if actual != test.expected {
|
||||
t.Errorf("test [%q] get unexpected result: %q != %q", test.desc, actual, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user