mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
@ -12,18 +12,25 @@ go_library(
|
||||
"azure.go",
|
||||
"azure_backoff.go",
|
||||
"azure_blobDiskController.go",
|
||||
"azure_controllerCommon.go",
|
||||
"azure_cache.go",
|
||||
"azure_client.go",
|
||||
"azure_controller_common.go",
|
||||
"azure_controller_standard.go",
|
||||
"azure_controller_vmss.go",
|
||||
"azure_fakes.go",
|
||||
"azure_file.go",
|
||||
"azure_instance_metadata.go",
|
||||
"azure_instances.go",
|
||||
"azure_loadbalancer.go",
|
||||
"azure_managedDiskController.go",
|
||||
"azure_metrics.go",
|
||||
"azure_routes.go",
|
||||
"azure_standard.go",
|
||||
"azure_storage.go",
|
||||
"azure_storageaccount.go",
|
||||
"azure_util.go",
|
||||
"azure_util_vmss.go",
|
||||
"azure_vmsets.go",
|
||||
"azure_vmss.go",
|
||||
"azure_vmss_cache.go",
|
||||
"azure_wrap.go",
|
||||
"azure_zones.go",
|
||||
],
|
||||
@ -31,6 +38,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
@ -38,6 +46,7 @@ go_library(
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
@ -45,13 +54,15 @@ go_library(
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -59,25 +70,35 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_backoff_test.go",
|
||||
"azure_cache_test.go",
|
||||
"azure_loadbalancer_test.go",
|
||||
"azure_metrics_test.go",
|
||||
"azure_routes_test.go",
|
||||
"azure_standard_test.go",
|
||||
"azure_storage_test.go",
|
||||
"azure_storageaccount_test.go",
|
||||
"azure_test.go",
|
||||
"azure_util_test.go",
|
||||
"azure_vmss_cache_test.go",
|
||||
"azure_vmss_test.go",
|
||||
"azure_wrap_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -90,6 +111,9 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/azure/auth:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
@ -1,6 +1,16 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
||||
reviewers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["azure_auth.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// AzureAuthConfig holds auth related part of cloud config
|
||||
type AzureAuthConfig struct {
|
||||
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
|
||||
Cloud string `json:"cloud" yaml:"cloud"`
|
||||
// The AAD Tenant ID for the Subscription that the cluster is deployed in
|
||||
TenantID string `json:"tenantId" yaml:"tenantId"`
|
||||
// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
config.AADClientSecret,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding the client certificate: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
certificate,
|
||||
privateKey,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
|
||||
}
|
||||
|
||||
// ParseAzureEnvironment returns azure environment by name
|
||||
func ParseAzureEnvironment(cloudName string) (*azure.Environment, error) {
|
||||
var env azure.Environment
|
||||
var err error
|
||||
if cloudName == "" {
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
env, err = azure.EnvironmentFromName(cloudName)
|
||||
}
|
||||
return &env, err
|
||||
}
|
||||
|
||||
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
|
||||
// the private RSA key
|
||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
|
||||
}
|
||||
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
|
||||
if !isRsaKey {
|
||||
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
|
||||
}
|
||||
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
424
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
424
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
@ -17,30 +17,23 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -61,12 +54,8 @@ const (
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
// All fields are required unless otherwise specified
|
||||
type Config struct {
|
||||
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
|
||||
Cloud string `json:"cloud" yaml:"cloud"`
|
||||
// The AAD Tenant ID for the Subscription that the cluster is deployed in
|
||||
TenantID string `json:"tenantId" yaml:"tenantId"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
auth.AzureAuthConfig
|
||||
|
||||
// The name of the resource group that the cluster is deployed in
|
||||
ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"`
|
||||
// The location of the resource group that the cluster is deployed in
|
||||
@ -87,7 +76,7 @@ type Config struct {
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
|
||||
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
|
||||
// The type of azure nodes. Candidate valudes are: vmss and standard.
|
||||
// The type of azure nodes. Candidate values are: vmss and standard.
|
||||
// If not set, it will be default to standard.
|
||||
VMType string `json:"vmType" yaml:"vmType"`
|
||||
// The name of the scale set that should be used as the load balancer backend.
|
||||
@ -96,15 +85,6 @@ type Config struct {
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (scale sets), you MUST set this field.
|
||||
PrimaryScaleSetName string `json:"primaryScaleSetName" yaml:"primaryScaleSetName"`
|
||||
|
||||
// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Enable exponential backoff to manage resource request retries
|
||||
CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"`
|
||||
// Backoff retry limit
|
||||
@ -117,10 +97,14 @@ type Config struct {
|
||||
CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter" yaml:"cloudProviderBackoffJitter"`
|
||||
// Enable rate limiting
|
||||
CloudProviderRateLimit bool `json:"cloudProviderRateLimit" yaml:"cloudProviderRateLimit"`
|
||||
// Rate limit QPS
|
||||
// Rate limit QPS (Read)
|
||||
CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS" yaml:"cloudProviderRateLimitQPS"`
|
||||
// Rate limit Bucket Size
|
||||
CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"`
|
||||
// Rate limit QPS (Write)
|
||||
CloudProviderRateLimitQPSWrite float32 `json:"cloudProviderRateLimitQPSWrite" yaml:"cloudProviderRateLimitQPSWrite"`
|
||||
// Rate limit Bucket Size
|
||||
CloudProviderRateLimitBucketWrite int `json:"cloudProviderRateLimitBucketWrite" yaml:"cloudProviderRateLimitBucketWrite"`
|
||||
|
||||
// Use instance metadata service where possible
|
||||
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
|
||||
@ -132,76 +116,33 @@ type Config struct {
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"`
|
||||
}
|
||||
|
||||
// VirtualMachinesClient defines needed functions for azure network.VirtualMachinesClient
|
||||
type VirtualMachinesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error)
|
||||
Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error)
|
||||
List(resourceGroupName string) (result compute.VirtualMachineListResult, err error)
|
||||
ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error)
|
||||
}
|
||||
|
||||
// InterfacesClient defines needed functions for azure network.InterfacesClient
|
||||
type InterfacesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error)
|
||||
Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error)
|
||||
GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error)
|
||||
}
|
||||
|
||||
// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient
|
||||
type LoadBalancersClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error)
|
||||
Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error)
|
||||
List(resourceGroupName string) (result network.LoadBalancerListResult, err error)
|
||||
ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error)
|
||||
}
|
||||
|
||||
// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient
|
||||
type PublicIPAddressesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error)
|
||||
Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error)
|
||||
List(resourceGroupName string) (result network.PublicIPAddressListResult, err error)
|
||||
ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error)
|
||||
}
|
||||
|
||||
// SubnetsClient defines needed functions for azure network.SubnetsClient
|
||||
type SubnetsClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error)
|
||||
Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error)
|
||||
List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error)
|
||||
}
|
||||
|
||||
// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient
|
||||
type SecurityGroupsClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error)
|
||||
Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error)
|
||||
List(resourceGroupName string) (result network.SecurityGroupListResult, err error)
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
type Cloud struct {
|
||||
Config
|
||||
Environment azure.Environment
|
||||
RoutesClient network.RoutesClient
|
||||
SubnetsClient SubnetsClient
|
||||
InterfacesClient InterfacesClient
|
||||
RouteTablesClient network.RouteTablesClient
|
||||
LoadBalancerClient LoadBalancersClient
|
||||
PublicIPAddressesClient PublicIPAddressesClient
|
||||
SecurityGroupsClient SecurityGroupsClient
|
||||
VirtualMachinesClient VirtualMachinesClient
|
||||
StorageAccountClient storage.AccountsClient
|
||||
DisksClient disk.DisksClient
|
||||
operationPollRateLimiter flowcontrol.RateLimiter
|
||||
resourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadata
|
||||
Environment azure.Environment
|
||||
RoutesClient RoutesClient
|
||||
SubnetsClient SubnetsClient
|
||||
InterfacesClient InterfacesClient
|
||||
RouteTablesClient RouteTablesClient
|
||||
LoadBalancerClient LoadBalancersClient
|
||||
PublicIPAddressesClient PublicIPAddressesClient
|
||||
SecurityGroupsClient SecurityGroupsClient
|
||||
VirtualMachinesClient VirtualMachinesClient
|
||||
StorageAccountClient StorageAccountClient
|
||||
DisksClient DisksClient
|
||||
FileClient FileClient
|
||||
resourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadata
|
||||
vmSet VMSet
|
||||
|
||||
// Clients for vmss.
|
||||
VirtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient compute.VirtualMachineScaleSetVMsClient
|
||||
VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient
|
||||
|
||||
vmCache *timedCache
|
||||
lbCache *timedCache
|
||||
nsgCache *timedCache
|
||||
rtCache *timedCache
|
||||
|
||||
*BlobDiskController
|
||||
*ManagedDiskController
|
||||
@ -212,179 +153,86 @@ func init() {
|
||||
cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud)
|
||||
}
|
||||
|
||||
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
|
||||
// the private RSA key
|
||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
|
||||
}
|
||||
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
|
||||
if !isRsaKey {
|
||||
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
|
||||
}
|
||||
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
config.AADClientSecret,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding the client certificate: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
certificate,
|
||||
privateKey,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
|
||||
}
|
||||
|
||||
// NewCloud returns a Cloud with initialized clients
|
||||
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
config, env, err := ParseConfig(configReader)
|
||||
config, err := parseConfig(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
env, err := auth.ParseAzureEnvironment(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// operationPollRateLimiter.Accept() is a no-op if rate limits are configured off.
|
||||
operationPollRateLimiter := flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
operationPollRateLimiterWrite := flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
|
||||
// If reader is provided (and no writer) we will
|
||||
// use the same value for both.
|
||||
if config.CloudProviderRateLimit {
|
||||
// Assign rate limit defaults if no configuration was passed in
|
||||
if config.CloudProviderRateLimitQPS == 0 {
|
||||
config.CloudProviderRateLimitQPS = rateLimitQPSDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitBucket == 0 {
|
||||
config.CloudProviderRateLimitBucket = rateLimitBucketDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitQPSWrite == 0 {
|
||||
config.CloudProviderRateLimitQPSWrite = rateLimitQPSDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitBucketWrite == 0 {
|
||||
config.CloudProviderRateLimitBucketWrite = rateLimitBucketDefault
|
||||
}
|
||||
|
||||
operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(
|
||||
config.CloudProviderRateLimitQPS,
|
||||
config.CloudProviderRateLimitBucket)
|
||||
|
||||
operationPollRateLimiterWrite = flowcontrol.NewTokenBucketRateLimiter(
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPS,
|
||||
config.CloudProviderRateLimitBucket)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
|
||||
}
|
||||
|
||||
azClientConfig := &azClientConfig{
|
||||
subscriptionID: config.SubscriptionID,
|
||||
resourceManagerEndpoint: env.ResourceManagerEndpoint,
|
||||
servicePrincipalToken: servicePrincipalToken,
|
||||
rateLimiterReader: operationPollRateLimiter,
|
||||
rateLimiterWriter: operationPollRateLimiterWrite,
|
||||
}
|
||||
az := Cloud{
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := GetServicePrincipalToken(config, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subnetsClient := network.NewSubnetsClient(az.SubscriptionID)
|
||||
subnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
subnetsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&subnetsClient.Client)
|
||||
az.SubnetsClient = subnetsClient
|
||||
|
||||
az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID)
|
||||
az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RouteTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.RouteTablesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RouteTablesClient.Client)
|
||||
|
||||
az.RoutesClient = network.NewRoutesClient(az.SubscriptionID)
|
||||
az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RoutesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.RoutesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RoutesClient.Client)
|
||||
|
||||
interfacesClient := network.NewInterfacesClient(az.SubscriptionID)
|
||||
interfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
interfacesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&interfacesClient.Client)
|
||||
az.InterfacesClient = interfacesClient
|
||||
|
||||
loadBalancerClient := network.NewLoadBalancersClient(az.SubscriptionID)
|
||||
loadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
loadBalancerClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&loadBalancerClient.Client)
|
||||
az.LoadBalancerClient = loadBalancerClient
|
||||
|
||||
virtualMachinesClient := compute.NewVirtualMachinesClient(az.SubscriptionID)
|
||||
virtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
virtualMachinesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachinesClient.Client)
|
||||
az.VirtualMachinesClient = virtualMachinesClient
|
||||
|
||||
publicIPAddressClient := network.NewPublicIPAddressesClient(az.SubscriptionID)
|
||||
publicIPAddressClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
publicIPAddressClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&publicIPAddressClient.Client)
|
||||
az.PublicIPAddressesClient = publicIPAddressClient
|
||||
|
||||
securityGroupsClient := network.NewSecurityGroupsClient(az.SubscriptionID)
|
||||
securityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
securityGroupsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&securityGroupsClient.Client)
|
||||
az.SecurityGroupsClient = securityGroupsClient
|
||||
|
||||
virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(az.SubscriptionID)
|
||||
az.VirtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.VirtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachineScaleSetVMsClient.Client)
|
||||
az.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
|
||||
virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(az.SubscriptionID)
|
||||
az.VirtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.VirtualMachineScaleSetsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachineScaleSetsClient.Client)
|
||||
az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
|
||||
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.StorageAccountClient.Client)
|
||||
|
||||
az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.DisksClient.Client)
|
||||
|
||||
// Conditionally configure rate limits
|
||||
if az.CloudProviderRateLimit {
|
||||
// Assign rate limit defaults if no configuration was passed in
|
||||
if az.CloudProviderRateLimitQPS == 0 {
|
||||
az.CloudProviderRateLimitQPS = rateLimitQPSDefault
|
||||
}
|
||||
if az.CloudProviderRateLimitBucket == 0 {
|
||||
az.CloudProviderRateLimitBucket = rateLimitBucketDefault
|
||||
}
|
||||
az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(
|
||||
az.CloudProviderRateLimitQPS,
|
||||
az.CloudProviderRateLimitBucket)
|
||||
glog.V(2).Infof("Azure cloudprovider using rate limit config: QPS=%g, bucket=%d",
|
||||
az.CloudProviderRateLimitQPS,
|
||||
az.CloudProviderRateLimitBucket)
|
||||
} else {
|
||||
// if rate limits are configured off, az.operationPollRateLimiter.Accept() is a no-op
|
||||
az.operationPollRateLimiter = flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
DisksClient: newAzDisksClient(azClientConfig),
|
||||
RoutesClient: newAzRoutesClient(azClientConfig),
|
||||
SubnetsClient: newAzSubnetsClient(azClientConfig),
|
||||
InterfacesClient: newAzInterfacesClient(azClientConfig),
|
||||
RouteTablesClient: newAzRouteTablesClient(azClientConfig),
|
||||
LoadBalancerClient: newAzLoadBalancersClient(azClientConfig),
|
||||
SecurityGroupsClient: newAzSecurityGroupsClient(azClientConfig),
|
||||
StorageAccountClient: newAzStorageAccountClient(azClientConfig),
|
||||
VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig),
|
||||
PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig),
|
||||
VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig),
|
||||
VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig),
|
||||
FileClient: &azureFileClient{env: *env},
|
||||
}
|
||||
|
||||
// Conditionally configure resource request backoff
|
||||
@ -421,44 +269,59 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
|
||||
}
|
||||
|
||||
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
|
||||
az.vmSet, err = newScaleSet(&az)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
az.vmSet = newAvailabilitySet(&az)
|
||||
}
|
||||
|
||||
az.vmCache, err = az.newVMCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.lbCache, err = az.newLBCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.nsgCache, err = az.newNSGCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.rtCache, err = az.newRouteTableCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := initDiskControllers(&az); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &az, nil
|
||||
}
|
||||
|
||||
// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file
|
||||
func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) {
|
||||
// parseConfig returns a parsed configuration for an Azure cloudprovider config file
|
||||
func parseConfig(configReader io.Reader) (*Config, error) {
|
||||
var config Config
|
||||
var env azure.Environment
|
||||
|
||||
if configReader == nil {
|
||||
return &config, &env, nil
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
configContents, err := ioutil.ReadAll(configReader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(configContents, &config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.Cloud == "" {
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
env, err = azure.EnvironmentFromName(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if config.VMType != "" {
|
||||
config.VMType = strings.ToLower(config.VMType)
|
||||
}
|
||||
|
||||
return &config, &env, nil
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
@ -489,11 +352,6 @@ func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
|
||||
func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (az *Cloud) HasClusterID() bool {
|
||||
return true
|
||||
@ -518,15 +376,9 @@ func initDiskControllers(az *Cloud) error {
|
||||
// needed by both blob disk and managed disk controllers
|
||||
|
||||
common := &controllerCommon{
|
||||
aadResourceEndPoint: az.Environment.ServiceManagementEndpoint,
|
||||
clientID: az.AADClientID,
|
||||
clientSecret: az.AADClientSecret,
|
||||
location: az.Location,
|
||||
storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
|
||||
managementEndpoint: az.Environment.ResourceManagerEndpoint,
|
||||
resourceGroup: az.ResourceGroup,
|
||||
tenantID: az.TenantID,
|
||||
tokenEndPoint: az.Environment.ActiveDirectoryEndpoint,
|
||||
subscriptionID: az.SubscriptionID,
|
||||
cloud: az,
|
||||
}
|
||||
|
193
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
193
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
@ -17,13 +17,17 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// requestBackoff if backoff is disabled in cloud provider it
|
||||
@ -42,53 +46,22 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
|
||||
}
|
||||
|
||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, bool, error) {
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var exists bool
|
||||
var retryErr error
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
machine, exists, retryErr = az.getVirtualMachine(name)
|
||||
machine, retryErr = az.getVirtualMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
glog.V(2).Info("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, exists, err
|
||||
}
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = retryErr
|
||||
}
|
||||
|
||||
// GetScaleSetsVMWithRetry invokes az.getScaleSetsVM with exponential backoff retry
|
||||
func (az *Cloud) GetScaleSetsVMWithRetry(name types.NodeName) (compute.VirtualMachineScaleSetVM, bool, error) {
|
||||
var machine compute.VirtualMachineScaleSetVM
|
||||
var exists bool
|
||||
err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) {
|
||||
var retryErr error
|
||||
machine, exists, retryErr = az.getVmssVirtualMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("GetScaleSetsVMWithRetry backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(10).Infof("GetScaleSetsVMWithRetry backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, exists, err
|
||||
}
|
||||
|
||||
// VirtualMachineClientGetWithRetry invokes az.VirtualMachinesClient.Get with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientGetWithRetry(resourceGroup, vmName string, types compute.InstanceViewTypes) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, retryErr = az.VirtualMachinesClient.Get(resourceGroup, vmName, types)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, err
|
||||
}
|
||||
|
||||
@ -98,10 +71,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine,
|
||||
var result compute.VirtualMachineListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("VirtualMachinesClient.List(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -123,10 +93,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine,
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.ListNextResults(result)
|
||||
glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup, retryErr)
|
||||
@ -155,7 +122,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
glog.V(2).Info("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return ip, err
|
||||
@ -164,26 +131,32 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) {
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): start", *sg.Name)
|
||||
respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.nsgCache.Delete(*sg.Name)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): start", *lb.Name)
|
||||
respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
@ -194,10 +167,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -220,10 +190,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.ListNextResults(result)
|
||||
glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -243,23 +210,20 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
return allLBs, nil
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in az.ResourceGroup
|
||||
func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) {
|
||||
// ListPIPWithRetry list the PIP resources in the given resource group
|
||||
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
allPIPs := []network.PublicIPAddress{}
|
||||
var result network.PublicIPAddressListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.List(pipResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -271,21 +235,18 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) {
|
||||
allPIPs = append(allPIPs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the vms for resource group
|
||||
// follow the next link to get all the pip resources for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.ListNextResults(result)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", az.ResourceGroup)
|
||||
glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -299,14 +260,12 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) {
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error {
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): start", *pip.Name)
|
||||
respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil)
|
||||
respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(pipResourceGroup, *pip.Name, pip, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): end", *pip.Name)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
@ -314,8 +273,6 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error {
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): start", *nic.Name)
|
||||
respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -325,14 +282,11 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipName string) error {
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): start", pipName)
|
||||
respChan, errChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil)
|
||||
respChan, errChan := az.PublicIPAddressesClient.Delete(pipResourceGroup, pipName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): end", pipName)
|
||||
return processRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
@ -340,25 +294,24 @@ func (az *Cloud) DeletePublicIPWithRetry(pipName string) error {
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%s): start", lbName)
|
||||
respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%s): end", lbName)
|
||||
return processRetryResponse(resp, err)
|
||||
done, err := processRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after deleting
|
||||
az.lbCache.Delete(lbName)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): start", *routeTable.Name)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): end", *routeTable.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
@ -366,8 +319,6 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable
|
||||
// CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): start", *route.Name)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -379,8 +330,6 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
// DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -392,8 +341,6 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): start", vmName)
|
||||
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -402,6 +349,15 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualM
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters computepreview.VirtualMachineScaleSetVM) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
@ -413,8 +369,8 @@ func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
// Fall-through: stop periodic backoff, return error object from most recent request
|
||||
return true, err
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior
|
||||
@ -437,3 +393,36 @@ func isSuccessHTTPResponse(resp autorest.Response) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldRetryHTTPRequest(resp *http.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
// HTTP 4xx or 5xx suggests we should retry
|
||||
if 399 < resp.StatusCode && resp.StatusCode < 600 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
||||
|
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
Normal file
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
func TestShouldRetry(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusBadRequest,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
err: fmt.Errorf("some error"),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: 399,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res := shouldRetryAPIRequest(resp, test.err)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSuccessResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusNotFound,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res := isSuccessHTTPResponse(resp)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRetryResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
stop bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusBadRequest,
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusSeeOther,
|
||||
err: fmt.Errorf("some error"),
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusSeeOther,
|
||||
stop: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
stop: true,
|
||||
},
|
||||
{
|
||||
code: 399,
|
||||
stop: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res, err := processRetryResponse(resp, test.err)
|
||||
if res != test.stop {
|
||||
t.Errorf("expected: %v, saw: %v", test.stop, res)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
186
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
186
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
@ -22,10 +22,8 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@ -60,15 +58,11 @@ type BlobDiskController struct {
|
||||
}
|
||||
|
||||
var (
|
||||
defaultContainerName = ""
|
||||
storageAccountNamePrefix = ""
|
||||
storageAccountNameMatch = ""
|
||||
accountsLock = &sync.Mutex{}
|
||||
accountsLock = &sync.Mutex{}
|
||||
)
|
||||
|
||||
func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) {
|
||||
c := BlobDiskController{common: common}
|
||||
c.setUniqueStrings()
|
||||
|
||||
// get accounts
|
||||
accounts, err := c.getAllStorageAccounts()
|
||||
@ -84,46 +78,26 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
|
||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (c *BlobDiskController) CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = c.common.cloud.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", 0, err
|
||||
}
|
||||
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if (storageAccountType == "" || account.StorageType == storageAccountType) && (location == "" || account.Location == location) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := c.common.cloud.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("no key found for storage account %s", account.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account.Name, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account.Name, name, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
return "", "", 0, fmt.Errorf("failed to find a matching storage account")
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account, blobName, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
@ -252,7 +226,7 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB))
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, vhdContainerName, int64(sizeGB))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -281,9 +255,9 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName)
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
_, err = blob.DeleteIfExists(nil)
|
||||
|
||||
@ -292,26 +266,13 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
c.accounts[storageAccountName].diskCount = int32(diskCount)
|
||||
} else {
|
||||
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
|
||||
return nil // we have failed to aquire a new count. not an error condition
|
||||
return nil // we have failed to acquire a new count. not an error condition
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
//Sets unique strings to be used as accountnames && || blob containers names
|
||||
func (c *BlobDiskController) setUniqueStrings() {
|
||||
uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID
|
||||
hash := MakeCRC32(uniqueString)
|
||||
//used to generate a unqie container name used by this cluster PVC
|
||||
defaultContainerName = hash
|
||||
|
||||
storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash)
|
||||
// Used to filter relevant accounts (accounts used by shared PVC)
|
||||
storageAccountNameMatch = storageAccountNamePrefix
|
||||
// Used as a template to create new names for relevant accounts
|
||||
storageAccountNamePrefix = storageAccountNamePrefix + "%s"
|
||||
}
|
||||
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
@ -359,13 +320,13 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
// short circut the check via local cache
|
||||
// short circuit the check via local cache
|
||||
// we are forgiving the fact that account may not be in cache yet
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// not cached, check existance and readiness
|
||||
// not cached, check existence and readiness
|
||||
bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
// account does not exist
|
||||
@ -383,7 +344,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
counter = counter + 1
|
||||
// check if we passed the max sleep
|
||||
if counter >= 20 {
|
||||
return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName)
|
||||
return fmt.Errorf("azureDisk - timeout waiting to acquire lock to validate account:%s readiness", storageAccountName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -392,7 +353,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
c.accounts[storageAccountName].isValidating = 0
|
||||
}()
|
||||
|
||||
// short circut the check again.
|
||||
// short circuit the check again.
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
@ -426,13 +387,13 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
return err
|
||||
}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bCreated {
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName)
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
|
||||
}
|
||||
|
||||
// flag so we no longer have to check on ARM
|
||||
@ -459,7 +420,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
}
|
||||
params := azstorage.ListBlobsParameters{}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
response, err := container.ListBlobs(params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -471,7 +432,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.List()
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(c.common.resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -481,11 +442,11 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount
|
||||
|
||||
accounts := make(map[string]*storageAccountState)
|
||||
for _, v := range *accountListResult.Value {
|
||||
if strings.Index(*v.Name, storageAccountNameMatch) != 0 {
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Info("azureDisk - accountListResult Name or Sku is nil")
|
||||
continue
|
||||
}
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Infof("azureDisk - accountListResult Name or Sku is nil")
|
||||
if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
@ -519,7 +480,7 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto
|
||||
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType))
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
@ -555,13 +516,13 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
countAccounts := 0 // account of this type.
|
||||
for _, v := range c.accounts {
|
||||
// filter out any stand-alone disks/accounts
|
||||
if strings.Index(v.name, storageAccountNameMatch) != 0 {
|
||||
if !strings.HasPrefix(v.name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
// note: we compute avge stratified by type.
|
||||
// this to enable user to grow per SA type to avoid low
|
||||
//avg utilization on one account type skewing all data.
|
||||
// note: we compute avg stratified by type.
|
||||
// this is to enable user to grow per SA type to avoid low
|
||||
// avg utilization on one account type skewing all data.
|
||||
|
||||
if v.saType == storageAccountType {
|
||||
// compute average
|
||||
@ -574,7 +535,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
// empty account
|
||||
if dCount == 0 {
|
||||
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
|
||||
return v.name, nil // shortcircut, avg is good and no need to adjust
|
||||
return v.name, nil // short circuit, avg is good and no need to adjust
|
||||
}
|
||||
// if this account is less allocated
|
||||
if dCount < maxDiskCount {
|
||||
@ -587,7 +548,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
// if we failed to find storageaccount
|
||||
if SAName == "" {
|
||||
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -600,10 +561,10 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
|
||||
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
|
||||
|
||||
// avg are not create and we should craete more accounts if we can
|
||||
// avg are not create and we should create more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -611,43 +572,15 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
// avergates are not ok and we are at capacity(max storage accounts allowed)
|
||||
// averages are not ok and we are at capacity (max storage accounts allowed)
|
||||
if aboveAvg && countAccounts == maxStorageAccounts {
|
||||
glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
|
||||
}
|
||||
|
||||
// we found a storage accounts && [ avg are ok || we reached max sa count ]
|
||||
return SAName, nil
|
||||
}
|
||||
func (c *BlobDiskController) getNextAccountNum() int {
|
||||
max := 0
|
||||
|
||||
for k := range c.accounts {
|
||||
// filter out accounts that are for standalone
|
||||
if strings.Index(k, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
num := getAccountNumFromName(k)
|
||||
if num > max {
|
||||
max = num
|
||||
}
|
||||
}
|
||||
|
||||
return max + 1
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error {
|
||||
resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err)
|
||||
}
|
||||
|
||||
c.removeAccountState(storageAccountName)
|
||||
|
||||
glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName)
|
||||
return nil
|
||||
}
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
@ -667,33 +600,6 @@ func (c *BlobDiskController) addAccountState(key string, state *storageAccountSt
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) removeAccountState(key string) {
|
||||
accountsLock.Lock()
|
||||
defer accountsLock.Unlock()
|
||||
delete(c.accounts, key)
|
||||
}
|
||||
|
||||
// pads account num with zeros as needed
|
||||
func getAccountNameForNum(num int) string {
|
||||
sNum := strconv.Itoa(num)
|
||||
missingZeros := 3 - len(sNum)
|
||||
strZero := ""
|
||||
for missingZeros > 0 {
|
||||
strZero = strZero + "0"
|
||||
missingZeros = missingZeros - 1
|
||||
}
|
||||
|
||||
sNum = strZero + sNum
|
||||
return fmt.Sprintf(storageAccountNamePrefix, sNum)
|
||||
}
|
||||
|
||||
func getAccountNumFromName(accountName string) int {
|
||||
nameLen := len(accountName)
|
||||
num, _ := strconv.Atoi(accountName[nameLen-3:])
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// getFunc defines a getter function for timedCache.
|
||||
type getFunc func(key string) (interface{}, error)
|
||||
|
||||
// cacheEntry is the internal structure stores inside TTLStore.
|
||||
type cacheEntry struct {
|
||||
key string
|
||||
data interface{}
|
||||
|
||||
// The lock to ensure not updating same entry simultaneously.
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// cacheKeyFunc defines the key function required in TTLStore.
|
||||
func cacheKeyFunc(obj interface{}) (string, error) {
|
||||
return obj.(*cacheEntry).key, nil
|
||||
}
|
||||
|
||||
// timedCache is a cache with TTL.
|
||||
type timedCache struct {
|
||||
store cache.Store
|
||||
lock sync.Mutex
|
||||
getter getFunc
|
||||
}
|
||||
|
||||
// newTimedcache creates a new timedCache.
|
||||
func newTimedcache(ttl time.Duration, getter getFunc) (*timedCache, error) {
|
||||
if getter == nil {
|
||||
return nil, fmt.Errorf("getter is not provided")
|
||||
}
|
||||
|
||||
return &timedCache{
|
||||
getter: getter,
|
||||
store: cache.NewTTLStore(cacheKeyFunc, ttl),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getInternal returns cacheEntry by key. If the key is not cached yet,
|
||||
// it returns a cacheEntry with nil data.
|
||||
func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
|
||||
entry, exists, err := t.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
entry, exists, err = t.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
// Still not found, add new entry with nil data.
|
||||
// Note the data will be filled later by getter.
|
||||
newEntry := &cacheEntry{
|
||||
key: key,
|
||||
data: nil,
|
||||
}
|
||||
t.store.Add(newEntry)
|
||||
return newEntry, nil
|
||||
}
|
||||
|
||||
// Get returns the requested item by key.
|
||||
func (t *timedCache) Get(key string) (interface{}, error) {
|
||||
entry, err := t.getInternal(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Data is still not cached yet, cache it by getter.
|
||||
if entry.data == nil {
|
||||
entry.lock.Lock()
|
||||
defer entry.lock.Unlock()
|
||||
|
||||
if entry.data == nil {
|
||||
data, err := t.getter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry.data = data
|
||||
}
|
||||
}
|
||||
|
||||
return entry.data, nil
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (t *timedCache) Delete(key string) error {
|
||||
return t.store.Delete(&cacheEntry{
|
||||
key: key,
|
||||
})
|
||||
}
|
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache_test.go
generated
vendored
Normal file
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache_test.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeCacheTTL = 2 * time.Second
|
||||
)
|
||||
|
||||
type fakeDataObj struct{}
|
||||
|
||||
type fakeDataSource struct {
|
||||
called int
|
||||
data map[string]*fakeDataObj
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (fake *fakeDataSource) get(key string) (interface{}, error) {
|
||||
fake.lock.Lock()
|
||||
defer fake.lock.Unlock()
|
||||
|
||||
fake.called = fake.called + 1
|
||||
if v, ok := fake.data[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fake *fakeDataSource) set(data map[string]*fakeDataObj) {
|
||||
fake.lock.Lock()
|
||||
defer fake.lock.Unlock()
|
||||
|
||||
fake.data = data
|
||||
fake.called = 0
|
||||
}
|
||||
|
||||
func newFakeCache(t *testing.T) (*fakeDataSource, *timedCache) {
|
||||
dataSource := &fakeDataSource{
|
||||
data: make(map[string]*fakeDataObj),
|
||||
}
|
||||
getter := dataSource.get
|
||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||
assert.NoError(t, err)
|
||||
return dataSource, cache
|
||||
}
|
||||
|
||||
func TestCacheGet(t *testing.T) {
|
||||
val := &fakeDataObj{}
|
||||
cases := []struct {
|
||||
name string
|
||||
data map[string]*fakeDataObj
|
||||
key string
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
name: "cache should return nil for empty data source",
|
||||
key: "key1",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "cache should return nil for non exist key",
|
||||
data: map[string]*fakeDataObj{"key2": val},
|
||||
key: "key1",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "cache should return data for existing key",
|
||||
data: map[string]*fakeDataObj{"key1": val},
|
||||
key: "key1",
|
||||
expected: val,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(c.data)
|
||||
val, err := cache.Get(c.key)
|
||||
assert.NoError(t, err, c.name)
|
||||
assert.Equal(t, c.expected, val, c.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheGetError(t *testing.T) {
|
||||
getError := fmt.Errorf("getError")
|
||||
getter := func(key string) (interface{}, error) {
|
||||
return nil, getError
|
||||
}
|
||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
val, err := cache.Get("key")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, getError, err)
|
||||
assert.Nil(t, val)
|
||||
}
|
||||
|
||||
func TestCacheDelete(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
dataSource.set(nil)
|
||||
cache.Delete(key)
|
||||
v, err = cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, nil, v, "cache should get nil after data is removed")
|
||||
}
|
||||
|
||||
func TestCacheExpired(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
time.Sleep(fakeCacheTTL)
|
||||
v, err = cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data even after expired")
|
||||
}
|
1316
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
Normal file
1316
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
192
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
Normal file
192
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
storageAccountNameTemplate = "pvc%s"
|
||||
|
||||
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
errDiskBlobNotFound = "DiskBlobNotFound"
|
||||
)
|
||||
|
||||
var defaultBackOff = kwait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 2 * time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.0,
|
||||
}
|
||||
|
||||
type controllerCommon struct {
|
||||
subscriptionID string
|
||||
location string
|
||||
storageEndpointSuffix string
|
||||
resourceGroup string
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
// 1. vmType is standard, attach with availabilitySet.AttachDisk.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then attach with availabilitySet.AttachDisk.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, attach with scaleSet.AttachDisk.
|
||||
return ss.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
// 1. vmType is standard, detach with availabilitySet.DetachDiskByName.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then detach with availabilitySet.DetachDiskByName.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, detach with scaleSet.DetachDiskByName.
|
||||
return ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetDiskLun.
|
||||
return ss.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetNextDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetNextDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetNextDiskLun.
|
||||
return ss.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
// 1. vmType is standard, check with availabilitySet.DisksAreAttached.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DisksAreAttached(diskNames, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then check with availabilitySet.DisksAreAttached.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DisksAreAttached(diskNames, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, check with scaleSet.DisksAreAttached.
|
||||
return ss.DisksAreAttached(diskNames, nodeName)
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -19,64 +19,22 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map)
|
||||
storageAccountNameTemplate = "pvc%s"
|
||||
|
||||
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
)
|
||||
|
||||
var defaultBackOff = kwait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 2 * time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.0,
|
||||
}
|
||||
|
||||
type controllerCommon struct {
|
||||
tenantID string
|
||||
subscriptionID string
|
||||
location string
|
||||
storageEndpointSuffix string
|
||||
resourceGroup string
|
||||
clientID string
|
||||
clientSecret string
|
||||
managementEndpoint string
|
||||
tokenEndPoint string
|
||||
aadResourceEndPoint string
|
||||
aadToken string
|
||||
expiresOn time.Time
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
@ -111,38 +69,39 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.resourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) {
|
||||
// if lease cannot be acquired, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - failed to acquire disk lease, try detach")
|
||||
c.cloud.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
as.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure attach succeeded")
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil || !exists {
|
||||
func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
@ -175,34 +134,33 @@ func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure disk detach succeeded")
|
||||
glog.V(4).Info("azureDisk - azure disk detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
func (as *availabilitySet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
@ -219,12 +177,10 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
func (as *availabilitySet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
@ -242,13 +198,13 @@ func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
func (as *availabilitySet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
214
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
Normal file
214
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &computepreview.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &computepreview.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bFoundDisk {
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach %q from %s failed, err: %v", diskName, nodeName, err)
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (ss *scaleSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (ss *scaleSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (ss *scaleSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
638
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
638
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
@ -24,11 +25,17 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
type fakeAzureLBClient struct {
|
||||
@ -36,14 +43,14 @@ type fakeAzureLBClient struct {
|
||||
FakeStore map[string]map[string]network.LoadBalancer
|
||||
}
|
||||
|
||||
func newFakeAzureLBClient() fakeAzureLBClient {
|
||||
fLBC := fakeAzureLBClient{}
|
||||
func newFakeAzureLBClient() *fakeAzureLBClient {
|
||||
fLBC := &fakeAzureLBClient{}
|
||||
fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer)
|
||||
fLBC.mutex = &sync.Mutex{}
|
||||
return fLBC
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) {
|
||||
func (fLBC *fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
resultChan := make(chan network.LoadBalancer, 1)
|
||||
@ -65,7 +72,7 @@ func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalan
|
||||
for idx, config := range *parameters.FrontendIPConfigurations {
|
||||
if config.PrivateIPAllocationMethod == network.Dynamic {
|
||||
// Here we randomly assign an ip as private ip
|
||||
// It dosen't smart enough to know whether it is in the subnet's range
|
||||
// It doesn't smart enough to know whether it is in the subnet's range
|
||||
(*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr()
|
||||
}
|
||||
}
|
||||
@ -79,7 +86,7 @@ func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalan
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fLBC *fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -112,7 +119,7 @@ func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
func (fLBC *fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
@ -126,7 +133,7 @@ func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName str
|
||||
}
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) {
|
||||
func (fLBC *fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
var value []network.LoadBalancer
|
||||
@ -143,7 +150,7 @@ func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.Loa
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) {
|
||||
func (fLBC *fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
result.Response.Response = &http.Response{
|
||||
@ -171,15 +178,15 @@ func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName stri
|
||||
pipName)
|
||||
}
|
||||
|
||||
func newFakeAzurePIPClient(subscriptionID string) fakeAzurePIPClient {
|
||||
fAPC := fakeAzurePIPClient{}
|
||||
func newFakeAzurePIPClient(subscriptionID string) *fakeAzurePIPClient {
|
||||
fAPC := &fakeAzurePIPClient{}
|
||||
fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress)
|
||||
fAPC.SubscriptionID = subscriptionID
|
||||
fAPC.mutex = &sync.Mutex{}
|
||||
return fAPC
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) {
|
||||
func (fAPC *fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
resultChan := make(chan network.PublicIPAddress, 1)
|
||||
@ -216,7 +223,7 @@ func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIP
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fAPC *fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -249,7 +256,7 @@ func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressN
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
func (fAPC *fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
@ -263,13 +270,13 @@ func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName
|
||||
}
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) {
|
||||
func (fAPC *fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
return network.PublicIPAddressListResult{}, nil
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) {
|
||||
func (fAPC *fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
var value []network.PublicIPAddress
|
||||
@ -291,15 +298,15 @@ type fakeAzureInterfacesClient struct {
|
||||
FakeStore map[string]map[string]network.Interface
|
||||
}
|
||||
|
||||
func newFakeAzureInterfacesClient() fakeAzureInterfacesClient {
|
||||
fIC := fakeAzureInterfacesClient{}
|
||||
func newFakeAzureInterfacesClient() *fakeAzureInterfacesClient {
|
||||
fIC := &fakeAzureInterfacesClient{}
|
||||
fIC.FakeStore = make(map[string]map[string]network.Interface)
|
||||
fIC.mutex = &sync.Mutex{}
|
||||
|
||||
return fIC
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) {
|
||||
func (fIC *fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
resultChan := make(chan network.Interface, 1)
|
||||
@ -325,7 +332,7 @@ func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, ne
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
func (fIC *fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
|
||||
@ -339,7 +346,7 @@ func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterf
|
||||
}
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@ -348,14 +355,14 @@ type fakeAzureVirtualMachinesClient struct {
|
||||
FakeStore map[string]map[string]compute.VirtualMachine
|
||||
}
|
||||
|
||||
func newFakeAzureVirtualMachinesClient() fakeAzureVirtualMachinesClient {
|
||||
fVMC := fakeAzureVirtualMachinesClient{}
|
||||
func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient {
|
||||
fVMC := &fakeAzureVirtualMachinesClient{}
|
||||
fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine)
|
||||
fVMC.mutex = &sync.Mutex{}
|
||||
return fVMC
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
resultChan := make(chan compute.VirtualMachine, 1)
|
||||
@ -380,7 +387,7 @@ func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName stri
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
@ -394,7 +401,7 @@ func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
var value []compute.VirtualMachine
|
||||
@ -410,7 +417,7 @@ func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (resul
|
||||
result.Value = &value
|
||||
return result, nil
|
||||
}
|
||||
func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
return compute.VirtualMachineListResult{}, nil
|
||||
@ -421,14 +428,14 @@ type fakeAzureSubnetsClient struct {
|
||||
FakeStore map[string]map[string]network.Subnet
|
||||
}
|
||||
|
||||
func newFakeAzureSubnetsClient() fakeAzureSubnetsClient {
|
||||
fASC := fakeAzureSubnetsClient{}
|
||||
func newFakeAzureSubnetsClient() *fakeAzureSubnetsClient {
|
||||
fASC := &fakeAzureSubnetsClient{}
|
||||
fASC.FakeStore = make(map[string]map[string]network.Subnet)
|
||||
fASC.mutex = &sync.Mutex{}
|
||||
return fASC
|
||||
}
|
||||
|
||||
func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) {
|
||||
func (fASC *fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
resultChan := make(chan network.Subnet, 1)
|
||||
@ -454,7 +461,7 @@ func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virt
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fASC *fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -488,7 +495,7 @@ func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetwo
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
func (fASC *fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
@ -502,7 +509,7 @@ func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkN
|
||||
Message: "Not such Subnet",
|
||||
}
|
||||
}
|
||||
func (fASC fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) {
|
||||
func (fASC *fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
@ -525,14 +532,14 @@ type fakeAzureNSGClient struct {
|
||||
FakeStore map[string]map[string]network.SecurityGroup
|
||||
}
|
||||
|
||||
func newFakeAzureNSGClient() fakeAzureNSGClient {
|
||||
fNSG := fakeAzureNSGClient{}
|
||||
func newFakeAzureNSGClient() *fakeAzureNSGClient {
|
||||
fNSG := &fakeAzureNSGClient{}
|
||||
fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup)
|
||||
fNSG.mutex = &sync.Mutex{}
|
||||
return fNSG
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) {
|
||||
func (fNSG *fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
resultChan := make(chan network.SecurityGroup, 1)
|
||||
@ -557,7 +564,7 @@ func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkS
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fNSG *fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -590,7 +597,7 @@ func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityG
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
func (fNSG *fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
@ -604,7 +611,7 @@ func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGrou
|
||||
}
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) {
|
||||
func (fNSG *fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
var value []network.SecurityGroup
|
||||
@ -625,3 +632,556 @@ func getRandomIPPtr() *string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return to.StringPtr(fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)))
|
||||
}
|
||||
|
||||
type fakeVirtualMachineScaleSetVMsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]computepreview.VirtualMachineScaleSetVM
|
||||
}
|
||||
|
||||
func newFakeVirtualMachineScaleSetVMsClient() *fakeVirtualMachineScaleSetVMsClient {
|
||||
fVMC := &fakeVirtualMachineScaleSetVMsClient{}
|
||||
fVMC.FakeStore = make(map[string]map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
fVMC.mutex = &sync.Mutex{}
|
||||
|
||||
return fVMC
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) setFakeStore(store map[string]map[string]computepreview.VirtualMachineScaleSetVM) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
fVMC.FakeStore = store
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
result = []computepreview.VirtualMachineScaleSetVM{}
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMC.FakeStore[resourceGroupName] {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
|
||||
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := scaleSetMap[vmKey]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "No such VirtualMachineScaleSetVM",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result computepreview.VirtualMachineScaleSetVMInstanceView, err error) {
|
||||
_, err = fVMC.Get(ctx, resourceGroupName, VMScaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters computepreview.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
|
||||
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := scaleSetMap[vmKey]; ok {
|
||||
scaleSetMap[vmKey] = parameters
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeVirtualMachineScaleSetsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]computepreview.VirtualMachineScaleSet
|
||||
}
|
||||
|
||||
func newFakeVirtualMachineScaleSetsClient() *fakeVirtualMachineScaleSetsClient {
|
||||
fVMSSC := &fakeVirtualMachineScaleSetsClient{}
|
||||
fVMSSC.FakeStore = make(map[string]map[string]computepreview.VirtualMachineScaleSet)
|
||||
fVMSSC.mutex = &sync.Mutex{}
|
||||
|
||||
return fVMSSC
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) setFakeStore(store map[string]map[string]computepreview.VirtualMachineScaleSet) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
fVMSSC.FakeStore = store
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters computepreview.VirtualMachineScaleSet) (resp *http.Response, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
if _, ok := fVMSSC.FakeStore[resourceGroupName]; !ok {
|
||||
fVMSSC.FakeStore[resourceGroupName] = make(map[string]computepreview.VirtualMachineScaleSet)
|
||||
}
|
||||
fVMSSC.FakeStore[resourceGroupName][VMScaleSetName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result computepreview.VirtualMachineScaleSet, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
if scaleSetMap, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := scaleSetMap[VMScaleSetName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "No such ScaleSet",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []computepreview.VirtualMachineScaleSet, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
result = []computepreview.VirtualMachineScaleSet{}
|
||||
if _, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMSSC.FakeStore[resourceGroupName] {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeRoutesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Route
|
||||
}
|
||||
|
||||
func newFakeRoutesClient() *fakeRoutesClient {
|
||||
fRC := &fakeRoutesClient{}
|
||||
fRC.FakeStore = make(map[string]map[string]network.Route)
|
||||
fRC.mutex = &sync.Mutex{}
|
||||
return fRC
|
||||
}
|
||||
|
||||
func (fRC *fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) {
|
||||
fRC.mutex.Lock()
|
||||
defer fRC.mutex.Unlock()
|
||||
|
||||
resultChan := make(chan network.Route, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.Route
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fRC.FakeStore[routeTableName]; !ok {
|
||||
fRC.FakeStore[routeTableName] = make(map[string]network.Route)
|
||||
}
|
||||
fRC.FakeStore[routeTableName][routeName] = routeParameters
|
||||
result = fRC.FakeStore[routeTableName][routeName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fRC *fakeRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fRC.mutex.Lock()
|
||||
defer fRC.mutex.Unlock()
|
||||
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp autorest.Response
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if routes, ok := fRC.FakeStore[routeTableName]; ok {
|
||||
if _, ok := routes[routeName]; ok {
|
||||
delete(routes, routeName)
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Route",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
type fakeRouteTablesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.RouteTable
|
||||
Calls []string
|
||||
}
|
||||
|
||||
func newFakeRouteTablesClient() *fakeRouteTablesClient {
|
||||
fRTC := &fakeRouteTablesClient{}
|
||||
fRTC.FakeStore = make(map[string]map[string]network.RouteTable)
|
||||
fRTC.mutex = &sync.Mutex{}
|
||||
return fRTC
|
||||
}
|
||||
|
||||
func (fRTC *fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) {
|
||||
fRTC.mutex.Lock()
|
||||
defer fRTC.mutex.Unlock()
|
||||
|
||||
fRTC.Calls = append(fRTC.Calls, "CreateOrUpdate")
|
||||
|
||||
resultChan := make(chan network.RouteTable, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.RouteTable
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fRTC.FakeStore[resourceGroupName]; !ok {
|
||||
fRTC.FakeStore[resourceGroupName] = make(map[string]network.RouteTable)
|
||||
}
|
||||
fRTC.FakeStore[resourceGroupName][routeTableName] = parameters
|
||||
result = fRTC.FakeStore[resourceGroupName][routeTableName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fRTC *fakeRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) {
|
||||
fRTC.mutex.Lock()
|
||||
defer fRTC.mutex.Unlock()
|
||||
|
||||
fRTC.Calls = append(fRTC.Calls, "Get")
|
||||
|
||||
if _, ok := fRTC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fRTC.FakeStore[resourceGroupName][routeTableName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such RouteTable",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeFileClient struct {
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) deleteFileShare(accountName, accountKey, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeStorageAccountClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]storage.Account
|
||||
Keys storage.AccountListKeysResult
|
||||
Accounts storage.AccountListResult
|
||||
Err error
|
||||
}
|
||||
|
||||
func newFakeStorageAccountClient() *fakeStorageAccountClient {
|
||||
fSAC := &fakeStorageAccountClient{}
|
||||
fSAC.FakeStore = make(map[string]map[string]storage.Account)
|
||||
fSAC.mutex = &sync.Mutex{}
|
||||
return fSAC
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
resultChan := make(chan storage.Account, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result storage.Account
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fSAC.FakeStore[resourceGroupName]; !ok {
|
||||
fSAC.FakeStore[resourceGroupName] = make(map[string]storage.Account)
|
||||
}
|
||||
fSAC.FakeStore[resourceGroupName][accountName] = storage.Account{
|
||||
Name: &accountName,
|
||||
Sku: parameters.Sku,
|
||||
Kind: parameters.Kind,
|
||||
Location: parameters.Location,
|
||||
Identity: parameters.Identity,
|
||||
Tags: parameters.Tags,
|
||||
AccountProperties: &storage.AccountProperties{},
|
||||
}
|
||||
result = fSAC.FakeStore[resourceGroupName][accountName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
if rgAccounts, ok := fSAC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgAccounts[accountName]; ok {
|
||||
delete(rgAccounts, accountName)
|
||||
result.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
result.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such StorageAccount",
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) {
|
||||
return fSAC.Keys, fSAC.Err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) {
|
||||
return fSAC.Accounts, fSAC.Err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
if _, ok := fSAC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fSAC.FakeStore[resourceGroupName][accountName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such StorageAccount",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDisksClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]disk.Model
|
||||
}
|
||||
|
||||
func newFakeDisksClient() *fakeDisksClient {
|
||||
fDC := &fakeDisksClient{}
|
||||
fDC.FakeStore = make(map[string]map[string]disk.Model)
|
||||
fDC.mutex = &sync.Mutex{}
|
||||
return fDC
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
resultChan := make(chan disk.Model, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result disk.Model
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fDC.FakeStore[resourceGroupName]; !ok {
|
||||
fDC.FakeStore[resourceGroupName] = make(map[string]disk.Model)
|
||||
}
|
||||
fDC.FakeStore[resourceGroupName][diskName] = diskParameter
|
||||
result = fDC.FakeStore[resourceGroupName][diskName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
respChan := make(chan disk.OperationStatusResponse, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp disk.OperationStatusResponse
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if rgDisks, ok := fDC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgDisks[diskName]; ok {
|
||||
delete(rgDisks, diskName)
|
||||
resp.Response = autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
},
|
||||
}
|
||||
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Disk",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
if _, ok := fDC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fDC.FakeStore[resourceGroupName][diskName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Disk",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeVMSet struct {
|
||||
NodeToIP map[string]map[string]string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetIPByNodeName(name, vmSetName string) (string, error) {
|
||||
nodes, found := f.NodeToIP[vmSetName]
|
||||
if !found {
|
||||
return "", fmt.Errorf("not found")
|
||||
}
|
||||
ip, found := nodes[name]
|
||||
if !found {
|
||||
return "", fmt.Errorf("not found")
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
return network.Interface{}, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
return types.NodeName(""), fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPrimaryVMSetName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
return -1, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
return -1, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
azs "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
@ -27,9 +28,33 @@ const (
|
||||
useHTTPS = true
|
||||
)
|
||||
|
||||
// FileClient is the interface for creating file shares, interface for test
|
||||
// injection.
|
||||
type FileClient interface {
|
||||
createFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
deleteFileShare(accountName, accountKey, name string) error
|
||||
resizeFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
}
|
||||
|
||||
// create file share
|
||||
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB int) error {
|
||||
fileClient, err := az.getFileSvcClient(accountName, accountKey)
|
||||
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.FileClient.createFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
||||
func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
|
||||
return az.FileClient.deleteFileShare(accountName, accountKey, name)
|
||||
}
|
||||
|
||||
func (az *Cloud) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.FileClient.resizeFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
||||
type azureFileClient struct {
|
||||
env azure.Environment
|
||||
}
|
||||
|
||||
func (f *azureFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -42,7 +67,7 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in
|
||||
if err = share.Create(nil); err != nil {
|
||||
return fmt.Errorf("failed to create file share, err: %v", err)
|
||||
}
|
||||
share.Properties.Quota = sizeGB
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
if err := share.Delete(nil); err != nil {
|
||||
glog.Errorf("Error deleting share: %v", err)
|
||||
@ -53,20 +78,38 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in
|
||||
}
|
||||
|
||||
// delete a file share
|
||||
func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
|
||||
fileClient, err := az.getFileSvcClient(accountName, accountKey)
|
||||
if err == nil {
|
||||
share := fileClient.GetShareReference(name)
|
||||
return share.Delete(nil)
|
||||
func (f *azureFileClient) deleteFileShare(accountName, accountKey, name string) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fileClient.GetShareReference(name).Delete(nil)
|
||||
}
|
||||
|
||||
func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
share := fileClient.GetShareReference(name)
|
||||
if share.Properties.Quota >= sizeGiB {
|
||||
glog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
|
||||
share.Properties.Quota, sizeGiB, accountName, name)
|
||||
return nil
|
||||
}
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
|
||||
}
|
||||
glog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
|
||||
client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
func (f *azureFileClient) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
|
||||
fileClient, err := azs.NewClient(accountName, accountKey, f.env.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating azure client: %v", err)
|
||||
}
|
||||
f := client.GetFileService()
|
||||
return &f, nil
|
||||
fc := fileClient.GetFileService()
|
||||
return &fc, nil
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -17,18 +17,20 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
ipAddress := IPAddress{}
|
||||
err := az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
@ -48,6 +50,7 @@ func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
ip, err := az.GetIPForMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff", name)
|
||||
@ -63,29 +66,29 @@ func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return az.NodeAddresses(name)
|
||||
return az.NodeAddresses(ctx, name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (az *Cloud) ExternalID(name types.NodeName) (string, error) {
|
||||
return az.InstanceID(name)
|
||||
func (az *Cloud) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return az.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = az.InstanceID(name)
|
||||
_, err = az.InstanceID(ctx, name)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return false, nil
|
||||
@ -99,101 +102,76 @@ func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if az.VMType == vmTypeVMSS {
|
||||
// VMSS vmName is not same with hostname, use hostname instead.
|
||||
metadataName, err = os.Hostname()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
metadataName = strings.ToLower(metadataName)
|
||||
return (metadataName == nodeName), err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(name types.NodeName) (string, error) {
|
||||
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isLocalInstance {
|
||||
externalInstanceID, err := az.metadata.Text("instance/compute/vmId")
|
||||
if err == nil {
|
||||
return externalInstanceID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
id, err := az.getVmssInstanceID(name)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
return az.getStandardInstanceID(name)
|
||||
// Not local instance, get instanceID from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
return id, err
|
||||
}
|
||||
// Compose instanceID based on nodeName for standard instance.
|
||||
if az.VMType == vmTypeStandard {
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
}
|
||||
|
||||
return az.getStandardInstanceID(name)
|
||||
}
|
||||
|
||||
func (az *Cloud) getVmssInstanceID(name types.NodeName) (string, error) {
|
||||
var machine compute.VirtualMachineScaleSetVM
|
||||
var exists bool
|
||||
var err error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err = az.getVmssVirtualMachine(name)
|
||||
if err != nil {
|
||||
if az.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, exists, err = az.GetScaleSetsVMWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
// Get scale set name and instanceID from vmName for vmss.
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getStandardInstanceID(name types.NodeName) (string, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var exists bool
|
||||
var err error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err = az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
if az.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, exists, err = az.GetVirtualMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
ssName, instanceID, err := extractVmssVMName(metadataName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
return az.getVmssMachineID(ssName, instanceID), nil
|
||||
}
|
||||
return *machine.ID, nil
|
||||
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return az.InstanceType(name)
|
||||
return az.InstanceType(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
@ -207,57 +185,18 @@ func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
machineType, err := az.getVmssInstanceType(name)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
return az.getStandardInstanceType(name)
|
||||
}
|
||||
|
||||
return machineType, err
|
||||
}
|
||||
|
||||
return az.getStandardInstanceType(name)
|
||||
}
|
||||
|
||||
// getVmssInstanceType gets instance with type vmss.
|
||||
func (az *Cloud) getVmssInstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVmssVirtualMachine(name)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.InstanceType(%s), az.getVmssVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
if machine.Sku.Name != nil {
|
||||
return *machine.Sku.Name, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("instance type is not set")
|
||||
}
|
||||
|
||||
// getStandardInstanceType gets instance with standard type.
|
||||
func (az *Cloud) getStandardInstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.InstanceType(%s), az.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
return az.vmSet.GetInstanceTypeByNodeName(string(name))
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
// On Azure this is the hostname, so we just return the hostname.
|
||||
func (az *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
@ -266,9 +205,3 @@ func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func mapNodeNameToVMName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapVMNameToNodeName maps an Azure VM Name to a k8s NodeName
|
||||
// This is a simple string cast.
|
||||
func mapVMNameToNodeName(vmName string) types.NodeName {
|
||||
return types.NodeName(vmName)
|
||||
}
|
||||
|
349
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
349
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
@ -17,72 +17,77 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
|
||||
const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal"
|
||||
const (
|
||||
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
|
||||
ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal"
|
||||
|
||||
// ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service
|
||||
// to specify what subnet it is exposed on
|
||||
const ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet"
|
||||
// ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service
|
||||
// to specify what subnet it is exposed on
|
||||
ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet"
|
||||
|
||||
// ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the
|
||||
// Azure load balancer selection based on availability sets
|
||||
// There are currently three possible load balancer selection modes :
|
||||
// 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode")
|
||||
// In this case the Loadbalancer of the primary Availability set is selected
|
||||
// 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set
|
||||
// is selected which has the miinimum rules associated with it.
|
||||
// 3. "as1,as2" mode - this is when the laod balancer from the specified availability sets is selected that has the
|
||||
// miinimum rules associated with it.
|
||||
const ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode"
|
||||
// ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the
|
||||
// Azure load balancer selection based on availability sets
|
||||
// There are currently three possible load balancer selection modes :
|
||||
// 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode")
|
||||
// In this case the Loadbalancer of the primary Availability set is selected
|
||||
// 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set
|
||||
// is selected which has the minimum rules associated with it.
|
||||
// 3. "as1,as2" mode - this is when the load balancer from the specified availability sets is selected that has the
|
||||
// minimum rules associated with it.
|
||||
ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode"
|
||||
|
||||
// ServiceAnnotationLoadBalancerAutoModeValue the annotation used on the service to specify the
|
||||
// Azure load balancer auto selection from the availability sets
|
||||
const ServiceAnnotationLoadBalancerAutoModeValue = "__auto__"
|
||||
// ServiceAnnotationLoadBalancerAutoModeValue is the annotation used on the service to specify the
|
||||
// Azure load balancer auto selection from the availability sets
|
||||
ServiceAnnotationLoadBalancerAutoModeValue = "__auto__"
|
||||
|
||||
// ServiceAnnotationDNSLabelName annotation speficying the DNS label name for the service.
|
||||
const ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name"
|
||||
// ServiceAnnotationDNSLabelName is the annotation used on the service
|
||||
// to specify the DNS label name for the service.
|
||||
ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name"
|
||||
|
||||
// ServiceAnnotationSharedSecurityRule is the annotation used on the service
|
||||
// to specify that the service should be exposed using an Azure security rule
|
||||
// that may be shared with other service, trading specificity of rules for an
|
||||
// increase in the number of services that can be exposed. This relies on the
|
||||
// Azure "augmented security rules" feature which at the time of writing is in
|
||||
// preview and available only in certain regions.
|
||||
const ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule"
|
||||
// ServiceAnnotationSharedSecurityRule is the annotation used on the service
|
||||
// to specify that the service should be exposed using an Azure security rule
|
||||
// that may be shared with other service, trading specificity of rules for an
|
||||
// increase in the number of services that can be exposed. This relies on the
|
||||
// Azure "augmented security rules" feature which at the time of writing is in
|
||||
// preview and available only in certain regions.
|
||||
ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule"
|
||||
)
|
||||
|
||||
// ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service
|
||||
// to specify the resource group of load balancer objects that are not in the same resource group as the cluster.
|
||||
const ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group"
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// if so, what its status is.
|
||||
func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
_, status, exists, err = az.getServiceLoadBalancer(service, clusterName, nil, false)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if exists == false {
|
||||
if !exists {
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s)- IP doesn't exist in any of the lbs", clusterName, serviceName)
|
||||
return nil, false, fmt.Errorf("Service(%s) - Loadbalancer not found", serviceName)
|
||||
glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName)
|
||||
return nil, false, nil
|
||||
}
|
||||
return status, true, nil
|
||||
}
|
||||
|
||||
func getPublicIPLabel(service *v1.Service) string {
|
||||
func getPublicIPDomainNameLabel(service *v1.Service) string {
|
||||
if labelName, found := service.Annotations[ServiceAnnotationDNSLabelName]; found {
|
||||
return labelName
|
||||
}
|
||||
@ -90,7 +95,7 @@ func getPublicIPLabel(service *v1.Service) string {
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
// When a client updates the internal load balancer annotation,
|
||||
// the service may be switched from an internal LB to a public one, or vise versa.
|
||||
// Here we'll firstly ensure service do not lie in the opposite LB.
|
||||
@ -128,8 +133,8 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
_, err := az.EnsureLoadBalancer(clusterName, service, nodes)
|
||||
func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
_, err := az.EnsureLoadBalancer(ctx, clusterName, service, nodes)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -139,12 +144,12 @@ func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nod
|
||||
// This construction is useful because many cloud providers' load balancers
|
||||
// have multiple underlying components, meaning a Get could say that the LB
|
||||
// doesn't exist even if some part of it is still laying around.
|
||||
func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName)
|
||||
|
||||
serviceIPToCleanup, err := az.findServiceIPAddress(clusterName, service, isInternal)
|
||||
serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -166,15 +171,16 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servi
|
||||
return nil
|
||||
}
|
||||
|
||||
// getServiceLoadBalancer gets the loadbalancer for the service if it already exists
|
||||
// If wantLb is TRUE then -it selects a new load balancer
|
||||
// In case the selected load balancer does not exists it returns network.LoadBalancer struct
|
||||
// with added metadata (such as name, location) and existsLB set to FALSE
|
||||
// By default - cluster default LB is returned
|
||||
// getServiceLoadBalancer gets the loadbalancer for the service if it already exists.
|
||||
// If wantLb is TRUE then -it selects a new load balancer.
|
||||
// In case the selected load balancer does not exist it returns network.LoadBalancer struct
|
||||
// with added metadata (such as name, location) and existsLB set to FALSE.
|
||||
// By default - cluster default LB is returned.
|
||||
func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
var defaultLB *network.LoadBalancer
|
||||
defaultLBName := az.getLoadBalancerName(clusterName, az.Config.PrimaryAvailabilitySetName, isInternal)
|
||||
primaryVMSetName := az.vmSet.GetPrimaryVMSetName()
|
||||
defaultLBName := az.getLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
||||
|
||||
existingLBs, err := az.ListLBWithRetry()
|
||||
if err != nil {
|
||||
@ -226,26 +232,27 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
return defaultLB, nil, false, nil
|
||||
}
|
||||
|
||||
// select load balancer for the service in the cluster
|
||||
// the selection algorithm selectes the the load balancer with currently has
|
||||
// the minimum lb rules, there there are multiple LB's with same number of rules
|
||||
// it selects the first one (sorted based on name)
|
||||
// selectLoadBalancer selects load balancer for the service in the cluster.
|
||||
// The selection algorithm selects the load balancer which currently has
|
||||
// the minimum lb rules. If there are multiple LBs with same number of rules,
|
||||
// then selects the first one (sorted based on name).
|
||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
availabilitySetNames, err := az.getLoadBalancerAvailabilitySetNames(service, nodes)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.getLoadBalancerAvailabilitySetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
return nil, false, err
|
||||
}
|
||||
glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - availabilitysetsnames %v", clusterName, serviceName, isInternal, *availabilitySetNames)
|
||||
glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
|
||||
mapExistingLBs := map[string]network.LoadBalancer{}
|
||||
for _, lb := range *existingLBs {
|
||||
mapExistingLBs[*lb.Name] = lb
|
||||
}
|
||||
selectedLBRuleCount := math.MaxInt32
|
||||
for _, currASName := range *availabilitySetNames {
|
||||
for _, currASName := range *vmSetNames {
|
||||
currLBName := az.getLoadBalancerName(clusterName, currASName, isInternal)
|
||||
lb, exists := mapExistingLBs[currLBName]
|
||||
if !exists {
|
||||
@ -272,13 +279,13 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
}
|
||||
|
||||
if selectedLB == nil {
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected availability sets %v", clusterName, serviceName, isInternal, *availabilitySetNames)
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
glog.Error(err)
|
||||
return nil, false, err
|
||||
}
|
||||
// validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount
|
||||
if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount {
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, availabilitysetnames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *availabilitySetNames)
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames)
|
||||
glog.Error(err)
|
||||
return selectedLB, existsLb, err
|
||||
}
|
||||
@ -288,7 +295,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
|
||||
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
||||
if lb == nil {
|
||||
glog.V(10).Infof("getServiceLoadBalancerStatus lb is nil")
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus lb is nil")
|
||||
return nil, nil
|
||||
}
|
||||
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
||||
@ -314,7 +321,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID)
|
||||
}
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipName)
|
||||
pip, existsPip, err := az.getPublicIPAddress(az.getPublicIPAddressResourceGroup(service), pipName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -336,7 +343,9 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service)
|
||||
return getPublicIPName(clusterName, service), nil
|
||||
}
|
||||
|
||||
pips, err := az.ListPIPWithRetry()
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
|
||||
pips, err := az.ListPIPWithRetry(pipResourceGroup)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -347,7 +356,7 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service)
|
||||
return *pip.Name, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("user supplied IP Address %s was not found", loadBalancerIP)
|
||||
return "", fmt.Errorf("user supplied IP Address %s was not found in resource group %s", loadBalancerIP, pipResourceGroup)
|
||||
}
|
||||
|
||||
func flipServiceInternalAnnotation(service *v1.Service) *v1.Service {
|
||||
@ -365,27 +374,30 @@ func flipServiceInternalAnnotation(service *v1.Service) *v1.Service {
|
||||
return copyService
|
||||
}
|
||||
|
||||
func (az *Cloud) findServiceIPAddress(clusterName string, service *v1.Service, isInternalLb bool) (string, error) {
|
||||
func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service, isInternalLb bool) (string, error) {
|
||||
if len(service.Spec.LoadBalancerIP) > 0 {
|
||||
return service.Spec.LoadBalancerIP, nil
|
||||
}
|
||||
|
||||
lbStatus, existsLb, err := az.GetLoadBalancer(clusterName, service)
|
||||
lbStatus, existsLb, err := az.GetLoadBalancer(ctx, clusterName, service)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !existsLb {
|
||||
return "", fmt.Errorf("Expected to find an IP address for service %s but did not", service.Name)
|
||||
glog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name)
|
||||
return "", nil
|
||||
}
|
||||
if len(lbStatus.Ingress) < 1 {
|
||||
return "", fmt.Errorf("Expected to find an IP address for service %s but it had no ingresses", service.Name)
|
||||
glog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return lbStatus.Ingress[0].IP, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel string) (*network.PublicIPAddress, error) {
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipName)
|
||||
func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel string) (*network.PublicIPAddress, error) {
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -393,6 +405,7 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel stri
|
||||
return &pip, nil
|
||||
}
|
||||
|
||||
serviceName := getServiceName(service)
|
||||
pip.Name = to.StringPtr(pipName)
|
||||
pip.Location = to.StringPtr(az.Location)
|
||||
pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{
|
||||
@ -405,19 +418,15 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel stri
|
||||
}
|
||||
pip.Tags = &map[string]*string{"service": &serviceName}
|
||||
glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): start", *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pip)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): end", *pip.Name)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name)
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%q): start", *pip.Name)
|
||||
pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, *pip.Name, "")
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%q): end", *pip.Name)
|
||||
pip, err = az.PublicIPAddressesClient.Get(pipResourceGroup, *pip.Name, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -544,8 +553,8 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
domainNameLabel := getPublicIPLabel(service)
|
||||
pip, err := az.ensurePublicIPExists(serviceName, pipName, domainNameLabel)
|
||||
domainNameLabel := getPublicIPDomainNameLabel(service)
|
||||
pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -741,14 +750,24 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
||||
glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName)
|
||||
err := az.DeleteLBWithRetry(lbName)
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||
|
||||
// Remove the LB.
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName)
|
||||
err = az.DeleteLBWithRetry(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%q): end", lbName)
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
} else {
|
||||
glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName)
|
||||
err := az.CreateOrUpdateLBWithRetry(*lb)
|
||||
@ -756,28 +775,28 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isInternal {
|
||||
// Refresh updated lb which will be used later in other places.
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("getAzureLoadBalancer(%s) failed: %v", lbName, err)
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("load balancer %q not found", lbName)
|
||||
}
|
||||
lb = &newLB
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if wantLb && nodes != nil {
|
||||
// Add the machines to the backend pool if they're not already
|
||||
availabilitySetName := az.mapLoadBalancerNameToAvailabilitySet(lbName, clusterName)
|
||||
hostUpdates := make([]func() error, len(nodes))
|
||||
for i, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
f := func() error {
|
||||
err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID, availabilitySetName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates[i] = f
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
if errs != nil {
|
||||
return nil, utilerrors.Flatten(errs)
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -800,10 +819,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%q): start", az.SecurityGroupName)
|
||||
sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%q): end", az.SecurityGroupName)
|
||||
sg, err := az.getSecurityGroup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -972,7 +988,6 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
if dirtySg {
|
||||
sg.SecurityRules = &updatedRules
|
||||
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
err := az.CreateOrUpdateSGWithRetry(sg)
|
||||
if err != nil {
|
||||
@ -1143,7 +1158,9 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
}
|
||||
}
|
||||
|
||||
pips, err := az.ListPIPWithRetry()
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
|
||||
pips, err := az.ListPIPWithRetry(pipResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1159,15 +1176,14 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
// Public ip resource with match service tag
|
||||
} else {
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%q): start", pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipName)
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName)
|
||||
// We let err to pass through
|
||||
// It may be ignorable
|
||||
}
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%q): end", pipName) // response not read yet...
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName) // response not read yet...
|
||||
|
||||
err = ignoreStatusNotFoundFromError(err)
|
||||
if err != nil {
|
||||
@ -1182,8 +1198,8 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
if !isInternal && wantLb {
|
||||
// Confirm desired public ip resource exists
|
||||
var pip *network.PublicIPAddress
|
||||
domainNameLabel := getPublicIPLabel(service)
|
||||
if pip, err = az.ensurePublicIPExists(serviceName, desiredPipName, domainNameLabel); err != nil {
|
||||
domainNameLabel := getPublicIPDomainNameLabel(service)
|
||||
if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pip, nil
|
||||
@ -1246,98 +1262,17 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b
|
||||
return false
|
||||
}
|
||||
|
||||
// This ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, availabilitySetName string) error {
|
||||
var machine compute.VirtualMachine
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%q): start", vmName)
|
||||
machine, err := az.VirtualMachineClientGetWithRetry(az.ResourceGroup, vmName, "")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensureHostInPool(%s, %s, %s) abort backoff", serviceName, nodeName, backendPoolID)
|
||||
return err
|
||||
}
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%q): end", vmName)
|
||||
|
||||
primaryNicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nicName, err := getLastSegment(primaryNicID)
|
||||
if err != nil {
|
||||
return err
|
||||
func (az *Cloud) getPublicIPAddressResourceGroup(service *v1.Service) string {
|
||||
if resourceGroup, found := service.Annotations[ServiceAnnotationLoadBalancerResourceGroup]; found {
|
||||
return resourceGroup
|
||||
}
|
||||
|
||||
// Check availability set
|
||||
if availabilitySetName != "" {
|
||||
expectedAvailabilitySetName := az.getAvailabilitySetID(availabilitySetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"nicupdate(%s): skipping nic (%s) since it is not in the availabilitySet(%s)",
|
||||
serviceName, nicName, availabilitySetName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundPool := false
|
||||
newBackendPools := []network.BackendAddressPool{}
|
||||
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): start", *nic.Name)
|
||||
respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := az.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return az.ResourceGroup
|
||||
}
|
||||
|
||||
// Check if service requires an internal load balancer.
|
||||
func requiresInternalLoadBalancer(service *v1.Service) bool {
|
||||
if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternal]; ok {
|
||||
if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternal]; found {
|
||||
return l == "true"
|
||||
}
|
||||
|
||||
@ -1346,7 +1281,7 @@ func requiresInternalLoadBalancer(service *v1.Service) bool {
|
||||
|
||||
func subnet(service *v1.Service) *string {
|
||||
if requiresInternalLoadBalancer(service) {
|
||||
if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; ok {
|
||||
if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; found {
|
||||
return &l
|
||||
}
|
||||
}
|
||||
@ -1354,28 +1289,28 @@ func subnet(service *v1.Service) *string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getServiceLoadBalancerMode parses the mode value
|
||||
// if the value is __auto__ it returns isAuto = TRUE
|
||||
// if anything else it returns the unique availability set names after triming spaces
|
||||
func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, availabilitySetNames []string) {
|
||||
// getServiceLoadBalancerMode parses the mode value.
|
||||
// if the value is __auto__ it returns isAuto = TRUE.
|
||||
// if anything else it returns the unique VM set names after triming spaces.
|
||||
func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, vmSetNames []string) {
|
||||
mode, hasMode := service.Annotations[ServiceAnnotationLoadBalancerMode]
|
||||
mode = strings.TrimSpace(mode)
|
||||
isAuto = strings.EqualFold(mode, ServiceAnnotationLoadBalancerAutoModeValue)
|
||||
if !isAuto {
|
||||
// Break up list of "AS1,AS2"
|
||||
availabilitySetParsedList := strings.Split(mode, ",")
|
||||
vmSetParsedList := strings.Split(mode, ",")
|
||||
|
||||
// Trim the availability set names and remove duplicates
|
||||
// Trim the VM set names and remove duplicates
|
||||
// e.g. {"AS1"," AS2", "AS3", "AS3"} => {"AS1", "AS2", "AS3"}
|
||||
availabilitySetNameSet := sets.NewString()
|
||||
for _, v := range availabilitySetParsedList {
|
||||
availabilitySetNameSet.Insert(strings.TrimSpace(v))
|
||||
vmSetNameSet := sets.NewString()
|
||||
for _, v := range vmSetParsedList {
|
||||
vmSetNameSet.Insert(strings.TrimSpace(v))
|
||||
}
|
||||
|
||||
availabilitySetNames = availabilitySetNameSet.List()
|
||||
vmSetNames = vmSetNameSet.List()
|
||||
}
|
||||
|
||||
return hasMode, isAuto, availabilitySetNames
|
||||
return hasMode, isAuto, vmSetNames
|
||||
}
|
||||
|
||||
func useSharedSecurityRule(service *v1.Service) bool {
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
@ -9,7 +9,7 @@ Despite the ideal philosophy above, we have to face the reality. NSG depends on
|
||||
And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict.
|
||||
|
||||
There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost incase of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost in case of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...)
|
||||
@ -40,7 +40,7 @@ Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error)
|
||||
- gets the loadbalancer for the service if it already exists
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selction helps distribute the services across load balancers
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selection helps distribute the services across load balancers
|
||||
- In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE
|
||||
- By default - cluster default LB is returned
|
||||
|
||||
@ -74,4 +74,4 @@ Service Annotation for Auto and specific load balancer mode
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, false)
|
||||
- Reconcile Public IP, public IP needs related LB reconciled first
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
||||
|
@ -73,7 +73,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisonState, id, err := c.getDisk(diskName)
|
||||
provisionState, id, err := c.getDisk(diskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
@ -81,7 +81,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.ToLower(provisonState) == "succeeded" {
|
||||
if strings.ToLower(provisionState) == "succeeded" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
@ -106,8 +106,8 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We don't need poll here, k8s will immediatly stop referencing the disk
|
||||
// the disk will be evantually deleted - cleanly - by ARM
|
||||
// We don't need poll here, k8s will immediately stop referencing the disk
|
||||
// the disk will be eventually deleted - cleanly - by ARM
|
||||
|
||||
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
|
||||
|
||||
|
82
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics.go
generated
vendored
Normal file
82
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type apiCallMetrics struct {
|
||||
latency *prometheus.HistogramVec
|
||||
errors *prometheus.CounterVec
|
||||
}
|
||||
|
||||
var (
|
||||
metricLabels = []string{
|
||||
"request", // API function that is being invoked
|
||||
"resource_group", // Resource group of the resource being monitored
|
||||
"subscription_id", // Subscription ID of the resource being monitored
|
||||
}
|
||||
|
||||
apiMetrics = registerAPIMetrics(metricLabels...)
|
||||
)
|
||||
|
||||
type metricContext struct {
|
||||
start time.Time
|
||||
attributes []string
|
||||
}
|
||||
|
||||
func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
|
||||
return &metricContext{
|
||||
start: time.Now(),
|
||||
attributes: []string{prefix + "_" + request, resourceGroup, subscriptionID},
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *metricContext) Observe(err error) {
|
||||
apiMetrics.latency.WithLabelValues(mc.attributes...).Observe(
|
||||
time.Since(mc.start).Seconds())
|
||||
if err != nil {
|
||||
apiMetrics.errors.WithLabelValues(mc.attributes...).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func registerAPIMetrics(attributes ...string) *apiCallMetrics {
|
||||
metrics := &apiCallMetrics{
|
||||
latency: prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_azure_api_request_duration_seconds",
|
||||
Help: "Latency of an Azure API call",
|
||||
},
|
||||
attributes,
|
||||
),
|
||||
errors: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_azure_api_request_errors",
|
||||
Help: "Number of errors for an Azure API call",
|
||||
},
|
||||
attributes,
|
||||
),
|
||||
}
|
||||
|
||||
prometheus.MustRegister(metrics.latency)
|
||||
prometheus.MustRegister(metrics.errors)
|
||||
|
||||
return metrics
|
||||
}
|
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics_test.go
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAzureMetricLabelCardinality(t *testing.T) {
|
||||
mc := newMetricContext("test", "create", "resource_group", "subscription_id")
|
||||
assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match")
|
||||
}
|
||||
|
||||
func TestAzureMetricLabelPrefix(t *testing.T) {
|
||||
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id")
|
||||
found := false
|
||||
for _, attribute := range mc.attributes {
|
||||
if attribute == "prefix_request" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "request label must be prefixed")
|
||||
}
|
98
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
98
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
@ -28,18 +29,23 @@ import (
|
||||
)
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route, err error) {
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
}
|
||||
|
||||
// Injectable for testing
|
||||
func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
if !exists {
|
||||
return []*cloudprovider.Route{}, nil
|
||||
}
|
||||
|
||||
var kubeRoutes []*cloudprovider.Route
|
||||
if routeTable.Routes != nil {
|
||||
if routeTable.RouteTablePropertiesFormat != nil && routeTable.Routes != nil {
|
||||
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
@ -58,51 +64,53 @@ func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route,
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
return nil
|
||||
}
|
||||
return az.createRouteTable()
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTable() error {
|
||||
routeTable := network.RouteTable{
|
||||
Name: to.StringPtr(az.RouteTableName),
|
||||
Location: to.StringPtr(az.Location),
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
az.rtCache.Delete(az.RouteTableName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
routeTable = network.RouteTable{
|
||||
Name: to.StringPtr(az.RouteTableName),
|
||||
Location: to.StringPtr(az.Location),
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%q): start", az.RouteTableName)
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%q): end", az.RouteTableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -119,8 +127,6 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
@ -143,12 +149,10 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
|
344
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
Normal file
344
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
Normal file
@ -0,0 +1,344 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
func TestDeleteRoute(t *testing.T) {
|
||||
fakeRoutes := newFakeRoutesClient()
|
||||
|
||||
cloud := &Cloud{
|
||||
RoutesClient: fakeRoutes,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
|
||||
fakeRoutes.FakeStore = map[string]map[string]network.Route{
|
||||
cloud.RouteTableName: {
|
||||
routeName: {},
|
||||
},
|
||||
}
|
||||
|
||||
err := cloud.DeleteRoute(context.TODO(), "cluster", &route)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error deleting route: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
mp, found := fakeRoutes.FakeStore[cloud.RouteTableName]
|
||||
if !found {
|
||||
t.Errorf("unexpected missing item for %s", cloud.RouteTableName)
|
||||
t.FailNow()
|
||||
}
|
||||
ob, found := mp[routeName]
|
||||
if found {
|
||||
t.Errorf("unexpectedly found: %v that should have been deleted.", ob)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRoute(t *testing.T) {
|
||||
fakeTable := newFakeRouteTablesClient()
|
||||
fakeVM := &fakeVMSet{}
|
||||
fakeRoutes := newFakeRoutesClient()
|
||||
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fakeTable,
|
||||
RoutesClient: fakeRoutes,
|
||||
vmSet: fakeVM,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
fakeTable.FakeStore = map[string]map[string]network.RouteTable{}
|
||||
fakeTable.FakeStore[cloud.ResourceGroup] = map[string]network.RouteTable{
|
||||
cloud.RouteTableName: expectedTable,
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
|
||||
nodeIP := "2.4.6.8"
|
||||
fakeVM.NodeToIP = map[string]map[string]string{
|
||||
"": {
|
||||
"node": nodeIP,
|
||||
},
|
||||
}
|
||||
|
||||
err := cloud.CreateRoute(context.TODO(), "cluster", "unused", &route)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
if len(fakeTable.Calls) != 1 || fakeTable.Calls[0] != "Get" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fakeTable.Calls)
|
||||
}
|
||||
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
routeInfo, found := fakeRoutes.FakeStore[cloud.RouteTableName][routeName]
|
||||
if !found {
|
||||
t.Errorf("could not find route: %v in %v", routeName, fakeRoutes.FakeStore)
|
||||
t.FailNow()
|
||||
}
|
||||
if *routeInfo.AddressPrefix != route.DestinationCIDR {
|
||||
t.Errorf("Expected cidr: %s, saw %s", *routeInfo.AddressPrefix, route.DestinationCIDR)
|
||||
}
|
||||
if routeInfo.NextHopType != network.RouteNextHopTypeVirtualAppliance {
|
||||
t.Errorf("Expected next hop: %v, saw %v", network.RouteNextHopTypeVirtualAppliance, routeInfo.NextHopType)
|
||||
}
|
||||
if *routeInfo.NextHopIPAddress != nodeIP {
|
||||
t.Errorf("Expected IP address: %s, saw %s", nodeIP, *routeInfo.NextHopIPAddress)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
fake.FakeStore = map[string]map[string]network.RouteTable{}
|
||||
fake.FakeStore[cloud.ResourceGroup] = map[string]network.RouteTable{
|
||||
cloud.RouteTableName: expectedTable,
|
||||
}
|
||||
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
if len(fake.Calls) != 1 || fake.Calls[0] != "Get" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_NotExists(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
|
||||
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
table := fake.FakeStore[cloud.ResourceGroup][cloud.RouteTableName]
|
||||
if *table.Location != *expectedTable.Location {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
|
||||
}
|
||||
if *table.Name != *expectedTable.Name {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
|
||||
}
|
||||
if len(fake.Calls) != 2 || fake.Calls[0] != "Get" || fake.Calls[1] != "CreateOrUpdate" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTable(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
|
||||
err := cloud.createRouteTable()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error in creating route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
table := fake.FakeStore["foo"]["bar"]
|
||||
if *table.Location != *expectedTable.Location {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
|
||||
}
|
||||
if *table.Name != *expectedTable.Name {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRoutes(t *testing.T) {
|
||||
tests := []struct {
|
||||
rt network.RouteTable
|
||||
exists bool
|
||||
err error
|
||||
expectErr bool
|
||||
expectedError string
|
||||
expectedRoute []cloudprovider.Route
|
||||
name string
|
||||
}{
|
||||
{
|
||||
err: fmt.Errorf("test error"),
|
||||
expectErr: true,
|
||||
expectedError: "test error",
|
||||
},
|
||||
{
|
||||
exists: false,
|
||||
name: "doesn't exist",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{},
|
||||
exists: true,
|
||||
name: "nil routes",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
},
|
||||
exists: true,
|
||||
name: "no routes",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
|
||||
Routes: &[]network.Route{
|
||||
{
|
||||
Name: to.StringPtr("name"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("1.2.3.4/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exists: true,
|
||||
expectedRoute: []cloudprovider.Route{
|
||||
{
|
||||
Name: "name",
|
||||
TargetNode: mapRouteNameToNodeName("name"),
|
||||
DestinationCIDR: "1.2.3.4/16",
|
||||
},
|
||||
},
|
||||
name: "one route",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
|
||||
Routes: &[]network.Route{
|
||||
{
|
||||
Name: to.StringPtr("name"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("1.2.3.4/16"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr("name2"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("5.6.7.8/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exists: true,
|
||||
expectedRoute: []cloudprovider.Route{
|
||||
{
|
||||
Name: "name",
|
||||
TargetNode: mapRouteNameToNodeName("name"),
|
||||
DestinationCIDR: "1.2.3.4/16",
|
||||
},
|
||||
{
|
||||
Name: "name2",
|
||||
TargetNode: mapRouteNameToNodeName("name2"),
|
||||
DestinationCIDR: "5.6.7.8/16",
|
||||
},
|
||||
},
|
||||
name: "more routes",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
routes, err := processRoutes(test.rt, test.exists, test.err)
|
||||
if test.expectErr {
|
||||
if err == nil {
|
||||
t.Errorf("%s: unexpected non-error", test.name)
|
||||
continue
|
||||
}
|
||||
if err.Error() != test.expectedError {
|
||||
t.Errorf("%s: Expected error: %v, saw error: %v", test.name, test.expectedError, err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("%s; unexpected error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if len(routes) != len(test.expectedRoute) {
|
||||
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, routes, test.expectedRoute)
|
||||
continue
|
||||
}
|
||||
for ix := range test.expectedRoute {
|
||||
if !reflect.DeepEqual(test.expectedRoute[ix], *routes[ix]) {
|
||||
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, test.expectedRoute[ix], *routes[ix])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -30,9 +30,12 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -43,21 +46,22 @@ const (
|
||||
availabilitySetIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/%s"
|
||||
frontendIPConfigIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/%s"
|
||||
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
|
||||
loadBalancerRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/loadBalancingRules/%s"
|
||||
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
|
||||
securityRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s/securityRules/%s"
|
||||
|
||||
// InternalLoadBalancerNameSuffix is load balancer posfix
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
|
||||
storageAccountNameMaxLength = 24
|
||||
)
|
||||
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
|
||||
// returns the full identifier of a machine
|
||||
func (az *Cloud) getMachineID(machineName string) string {
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
@ -94,16 +98,6 @@ func (az *Cloud) getBackendPoolID(lbName, backendPoolName string) string {
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer rule.
|
||||
func (az *Cloud) getLoadBalancerRuleID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
loadBalancerRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer probe.
|
||||
func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
@ -114,134 +108,22 @@ func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a network security group security rule.
|
||||
func (az *Cloud) getSecurityRuleID(securityRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
securityRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
az.SecurityGroupName,
|
||||
securityRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a publicIPAddress.
|
||||
func (az *Cloud) getpublicIPAddressID(pipName string) string {
|
||||
return fmt.Sprintf(
|
||||
publicIPAddressIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
pipName)
|
||||
}
|
||||
|
||||
// getLoadBalancerAvailabilitySetNames selects all possible availability sets for
|
||||
// service load balancer, if the service has no loadbalancer mode annotaion returns the
|
||||
// primary availability set if service annotation for loadbalancer availability set
|
||||
// exists then return the eligible a availability set
|
||||
func (az *Cloud) getLoadBalancerAvailabilitySetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryAvailabilitySetName
|
||||
availabilitySetNames = &[]string{az.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
availabilitySetNames, err = az.getAgentPoolAvailabiliySets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*availabilitySetNames)
|
||||
if !isAuto {
|
||||
if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate availability set exists
|
||||
var found bool
|
||||
for sasx := range serviceAvailabilitySetNames {
|
||||
for asx := range *availabilitySetNames {
|
||||
if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) {
|
||||
found = true
|
||||
serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
|
||||
}
|
||||
}
|
||||
availabilitySetNames = &serviceAvailabilitySetNames
|
||||
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
|
||||
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||
if strings.EqualFold(clusterName, vmSetName) {
|
||||
vmSetName = az.vmSet.GetPrimaryVMSetName()
|
||||
}
|
||||
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// lists the virtual machines for for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s
|
||||
func (az *Cloud) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := az.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
|
||||
for vmx := range vms {
|
||||
vm := vms[vmx]
|
||||
if vm.AvailabilitySet != nil {
|
||||
vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
|
||||
}
|
||||
}
|
||||
availabilitySetIDs := sets.NewString()
|
||||
agentPoolAvailabilitySets = &[]string{}
|
||||
for nx := range nodes {
|
||||
nodeName := (*nodes[nx]).Name
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
// already added in the list
|
||||
continue
|
||||
}
|
||||
asName, err := getLastSegment(asID)
|
||||
if err != nil {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
// We want to keep it lower case, before the ID get fixed
|
||||
asName = strings.ToLower(asName)
|
||||
|
||||
*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
|
||||
}
|
||||
|
||||
return agentPoolAvailabilitySets, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) mapLoadBalancerNameToAvailabilitySet(lbName string, clusterName string) (availabilitySetName string) {
|
||||
availabilitySetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||
if strings.EqualFold(clusterName, lbName) {
|
||||
availabilitySetName = az.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
return availabilitySetName
|
||||
return vmSetName
|
||||
}
|
||||
|
||||
// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress.
|
||||
// Thus Azure do not allow mixed type (public and internal) load balancer.
|
||||
// So we'd have a separate name for internal load balancer.
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, availabilitySetName string, isInternal bool) string {
|
||||
lbNamePrefix := availabilitySetName
|
||||
if strings.EqualFold(availabilitySetName, az.Config.PrimaryAvailabilitySetName) {
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
@ -250,7 +132,7 @@ func (az *Cloud) getLoadBalancerName(clusterName string, availabilitySetName str
|
||||
return lbNamePrefix
|
||||
}
|
||||
|
||||
// isMasterNode returns returns true is the node has a master role label.
|
||||
// isMasterNode returns true if the node has a master role label.
|
||||
// The master role is determined by looking for:
|
||||
// * a kubernetes.io/role="master" label
|
||||
func isMasterNode(node *v1.Node) bool {
|
||||
@ -311,6 +193,10 @@ func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
|
||||
}
|
||||
|
||||
func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguration, error) {
|
||||
if nic.IPConfigurations == nil {
|
||||
return nil, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
|
||||
}
|
||||
|
||||
if len(*nic.IPConfigurations) == 1 {
|
||||
return &((*nic.IPConfigurations)[0]), nil
|
||||
}
|
||||
@ -321,7 +207,7 @@ func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguratio
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to determine the determine primary ipconfig. nicname=%q", *nic.Name)
|
||||
return nil, fmt.Errorf("failed to determine the primary ipconfig. nicname=%q", *nic.Name)
|
||||
}
|
||||
|
||||
func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
|
||||
@ -402,67 +288,7 @@ outer:
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
ip, err := az.getIPForVmssMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
return az.getIPForStandardMachine(nodeName)
|
||||
}
|
||||
|
||||
return ip, err
|
||||
}
|
||||
|
||||
return az.getIPForStandardMachine(nodeName)
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForStandardMachine(nodeName types.NodeName) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err := az.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), az.getVirtualMachine(%s), err=%v", nodeName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(nicID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), az.InterfacesClient.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// splitProviderID converts a providerID to a NodeName.
|
||||
func splitProviderID(providerID string) (types.NodeName, error) {
|
||||
matches := providerIDRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("error splitting providerID")
|
||||
}
|
||||
return types.NodeName(matches[1]), nil
|
||||
return az.vmSet.GetIPByNodeName(string(nodeName), "")
|
||||
}
|
||||
|
||||
var polyTable = crc32.MakeTable(crc32.Koopman)
|
||||
@ -519,3 +345,329 @@ func ExtractDiskData(diskData interface{}) (provisioningState string, diskState
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
||||
|
||||
// availabilitySet implements VMSet interface for Azure availability sets.
|
||||
type availabilitySet struct {
|
||||
*Cloud
|
||||
}
|
||||
|
||||
// newStandardSet creates a new availabilitySet.
|
||||
func newAvailabilitySet(az *Cloud) VMSet {
|
||||
return &availabilitySet{
|
||||
Cloud: az,
|
||||
}
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var err error
|
||||
|
||||
machine, err = as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is part of providerID for standard instances.
|
||||
matches := providerIDRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("error splitting providerID")
|
||||
}
|
||||
|
||||
return types.NodeName(matches[1]), nil
|
||||
}
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets zone from instance view.
|
||||
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
func (as *availabilitySet) GetPrimaryVMSetName() string {
|
||||
return as.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (as *availabilitySet) GetIPByNodeName(name, vmSetName string) (string, error) {
|
||||
nic, err := as.GetPrimaryInterface(name, vmSetName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s.
|
||||
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := as.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
|
||||
for vmx := range vms {
|
||||
vm := vms[vmx]
|
||||
if vm.AvailabilitySet != nil {
|
||||
vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
|
||||
}
|
||||
}
|
||||
availabilitySetIDs := sets.NewString()
|
||||
agentPoolAvailabilitySets = &[]string{}
|
||||
for nx := range nodes {
|
||||
nodeName := (*nodes[nx]).Name
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
// already added in the list
|
||||
continue
|
||||
}
|
||||
asName, err := getLastSegment(asID)
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
// We want to keep it lower case, before the ID get fixed
|
||||
asName = strings.ToLower(asName)
|
||||
|
||||
*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
|
||||
}
|
||||
|
||||
return agentPoolAvailabilitySets, nil
|
||||
}
|
||||
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryAvailabilitySetName
|
||||
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*availabilitySetNames)
|
||||
if !isAuto {
|
||||
if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate availability set exists
|
||||
var found bool
|
||||
for sasx := range serviceAvailabilitySetNames {
|
||||
for asx := range *availabilitySetNames {
|
||||
if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) {
|
||||
found = true
|
||||
serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
|
||||
}
|
||||
}
|
||||
availabilitySetNames = &serviceAvailabilitySetNames
|
||||
}
|
||||
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
primaryNicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
nicName, err := getLastSegment(primaryNicID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set
|
||||
if vmSetName != "" {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
}
|
||||
|
||||
nic, err := as.InterfacesClient.Get(as.ResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
return nic, nil
|
||||
}
|
||||
|
||||
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nic, err := as.GetPrimaryInterface(vmName, vmSetName)
|
||||
if err != nil {
|
||||
if err == errNotInVMSet {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundPool := false
|
||||
newBackendPools := []network.BackendAddressPool{}
|
||||
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
nicName := *nic.Name
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
respChan, errChan := as.InterfacesClient.CreateOrUpdate(as.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
hostUpdates := make([]func() error, len(nodes))
|
||||
for i, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates[i] = f
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
if errs != nil {
|
||||
return utilerrors.Flatten(errs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
// Do nothing for availability set.
|
||||
return nil
|
||||
}
|
||||
|
||||
// get a storage account by UUID
|
||||
func generateStorageAccountName(accountNamePrefix string) string {
|
||||
uniqueID := strings.Replace(string(uuid.NewUUID()), "-", "", -1)
|
||||
accountName := strings.ToLower(accountNamePrefix + uniqueID)
|
||||
if len(accountName) > storageAccountNameMaxLength {
|
||||
return accountName[:storageAccountNameMaxLength-1]
|
||||
}
|
||||
return accountName
|
||||
}
|
166
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
Normal file
166
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestIsMasterNode(t *testing.T) {
|
||||
if isMasterNode(&v1.Node{}) {
|
||||
t.Errorf("Empty node should not be master!")
|
||||
}
|
||||
if isMasterNode(&v1.Node{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelRole: "worker",
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Errorf("Node labelled 'worker' should not be master!")
|
||||
}
|
||||
if !isMasterNode(&v1.Node{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelRole: "master",
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Errorf("Node should be master!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLastSegment(t *testing.T) {
|
||||
tests := []struct {
|
||||
ID string
|
||||
expected string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
ID: "",
|
||||
expected: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
ID: "foo/",
|
||||
expected: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
ID: "foo/bar",
|
||||
expected: "bar",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
ID: "foo/bar/baz",
|
||||
expected: "baz",
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
s, e := getLastSegment(test.ID)
|
||||
if test.expectErr && e == nil {
|
||||
t.Errorf("Expected err, but it was nil")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && e != nil {
|
||||
t.Errorf("Unexpected error: %v", e)
|
||||
continue
|
||||
}
|
||||
if s != test.expected {
|
||||
t.Errorf("expected: %s, got %s", test.expected, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateStorageAccountName(t *testing.T) {
|
||||
tests := []struct {
|
||||
prefix string
|
||||
}{
|
||||
{
|
||||
prefix: "",
|
||||
},
|
||||
{
|
||||
prefix: "pvc",
|
||||
},
|
||||
{
|
||||
prefix: "1234512345123451234512345",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
accountName := generateStorageAccountName(test.prefix)
|
||||
if len(accountName) > storageAccountNameMaxLength || len(accountName) < 3 {
|
||||
t.Errorf("input prefix: %s, output account name: %s, length not in [3,%d]", test.prefix, accountName, storageAccountNameMaxLength)
|
||||
}
|
||||
|
||||
for _, char := range accountName {
|
||||
if (char < 'a' || char > 'z') && (char < '0' || char > '9') {
|
||||
t.Errorf("input prefix: %s, output account name: %s, there is non-digit or non-letter(%q)", test.prefix, accountName, char)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
cases := []struct {
|
||||
description string
|
||||
lbName string
|
||||
clusterName string
|
||||
expectedVMSet string
|
||||
}{
|
||||
{
|
||||
description: "default external LB should map to primary vmset",
|
||||
lbName: "azure",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "primary",
|
||||
},
|
||||
{
|
||||
description: "default internal LB should map to primary vmset",
|
||||
lbName: "azure-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "primary",
|
||||
},
|
||||
{
|
||||
description: "non-default external LB should map to its own vmset",
|
||||
lbName: "azuretest-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "azuretest",
|
||||
},
|
||||
{
|
||||
description: "non-default internal LB should map to its own vmset",
|
||||
lbName: "azuretest-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "azuretest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
vmset := az.mapLoadBalancerNameToVMSet(c.lbName, c.clusterName)
|
||||
assert.Equal(t, c.expectedVMSet, vmset, c.description)
|
||||
}
|
||||
}
|
63
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
63
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
@ -19,56 +19,41 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageAccountType = string(storage.StandardLRS)
|
||||
fileShareAccountNamePrefix = "f"
|
||||
sharedDiskAccountNamePrefix = "ds"
|
||||
dedicatedDiskAccountNamePrefix = "dd"
|
||||
)
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = az.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := az.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not get storage key for storage account %s: %v", account.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = az.createFileShare(account.Name, key, name, requestGB)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to create share %s in account %s: %v", name, account.Name, err)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("created share %s in account %s", name, account.Name)
|
||||
return account.Name, key, err
|
||||
}
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) {
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = fmt.Errorf("failed to find a matching storage account")
|
||||
if err := az.createFileShare(account, key, shareName, requestGiB); err != nil {
|
||||
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err)
|
||||
}
|
||||
return "", "", err
|
||||
glog.V(4).Infof("created share %s in account %s", shareName, account)
|
||||
return account, key, nil
|
||||
}
|
||||
|
||||
// DeleteFileShare deletes a file share using storage account name and key
|
||||
func (az *Cloud) DeleteFileShare(accountName, key, name string) error {
|
||||
err := az.deleteFileShare(accountName, key, name)
|
||||
if err != nil {
|
||||
func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) error {
|
||||
if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("share %s deleted", name)
|
||||
glog.V(4).Infof("share %s deleted", shareName)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// ResizeFileShare resizes a file share
|
||||
func (az *Cloud) ResizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.resizeFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
)
|
||||
|
||||
func TestCreateFileShare(t *testing.T) {
|
||||
cloud := &Cloud{}
|
||||
fake := newFakeStorageAccountClient()
|
||||
cloud.StorageAccountClient = fake
|
||||
cloud.FileClient = &fakeFileClient{}
|
||||
|
||||
name := "baz"
|
||||
sku := "sku"
|
||||
location := "centralus"
|
||||
value := "foo key"
|
||||
bogus := "bogus"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
acct string
|
||||
acctType string
|
||||
loc string
|
||||
gb int
|
||||
accounts storage.AccountListResult
|
||||
keys storage.AccountListKeysResult
|
||||
err error
|
||||
|
||||
expectErr bool
|
||||
expectAcct string
|
||||
expectKey string
|
||||
}{
|
||||
{
|
||||
name: "foo",
|
||||
acct: "bar",
|
||||
acctType: "type",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: "type",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
|
||||
},
|
||||
},
|
||||
keys: storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
expectAcct: "baz",
|
||||
expectKey: "key",
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &bogus, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &bogus},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fake.Accounts = test.accounts
|
||||
fake.Keys = test.keys
|
||||
fake.Err = test.err
|
||||
|
||||
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.loc, test.gb)
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
if test.expectAcct != account {
|
||||
t.Errorf("Expected: %s, got %s", test.expectAcct, account)
|
||||
}
|
||||
if test.expectKey != key {
|
||||
t.Errorf("Expected: %s, got %s", test.expectKey, key)
|
||||
}
|
||||
}
|
||||
}
|
81
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
81
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
@ -20,6 +20,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
@ -27,32 +29,29 @@ type accountWithLocation struct {
|
||||
Name, StorageType, Location string
|
||||
}
|
||||
|
||||
// getStorageAccounts gets the storage accounts' name, type, location in a resource group
|
||||
func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): start", az.ResourceGroup)
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup)
|
||||
glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): end", az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Value == nil {
|
||||
return nil, fmt.Errorf("no storage accounts from resource group %s", az.ResourceGroup)
|
||||
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", az.ResourceGroup)
|
||||
}
|
||||
|
||||
accounts := []accountWithLocation{}
|
||||
for _, acct := range *result.Value {
|
||||
if acct.Name != nil {
|
||||
name := *acct.Name
|
||||
loc := ""
|
||||
if acct.Location != nil {
|
||||
loc = *acct.Location
|
||||
if acct.Name != nil && acct.Location != nil && acct.Sku != nil {
|
||||
storageType := string((*acct.Sku).Name)
|
||||
if matchingAccountType != "" && !strings.EqualFold(matchingAccountType, storageType) {
|
||||
continue
|
||||
}
|
||||
storageType := ""
|
||||
if acct.Sku != nil {
|
||||
storageType = string((*acct.Sku).Name)
|
||||
|
||||
location := *acct.Location
|
||||
if matchingLocation != "" && !strings.EqualFold(matchingLocation, location) {
|
||||
continue
|
||||
}
|
||||
accounts = append(accounts, accountWithLocation{Name: name, StorageType: storageType, Location: loc})
|
||||
accounts = append(accounts, accountWithLocation{Name: *acct.Name, StorageType: storageType, Location: location})
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,10 +60,7 @@ func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) {
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("StorageAccountClient.ListKeys(%q): start", account)
|
||||
result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account)
|
||||
glog.V(10).Infof("StorageAccountClient.ListKeys(%q): end", account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -83,3 +79,52 @@ func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("no valid keys")
|
||||
}
|
||||
|
||||
// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) {
|
||||
if len(accountName) == 0 {
|
||||
// find a storage account that matches accountType
|
||||
accounts, err := az.getStorageAccounts(accountType, location)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
|
||||
}
|
||||
|
||||
if len(accounts) > 0 {
|
||||
accountName = accounts[0].Name
|
||||
glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
|
||||
}
|
||||
|
||||
if len(accountName) == 0 {
|
||||
// not found a matching account, now create a new account in current resource group
|
||||
accountName = generateStorageAccountName(genAccountNamePrefix)
|
||||
if location == "" {
|
||||
location = az.Location
|
||||
}
|
||||
if accountType == "" {
|
||||
accountType = defaultStorageAccountType
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel)
|
||||
err := <-errchan
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the access key with this account
|
||||
accountKey, err := az.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
return accountName, accountKey, nil
|
||||
}
|
||||
|
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go
generated
vendored
Normal file
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
)
|
||||
|
||||
func TestGetStorageAccessKeys(t *testing.T) {
|
||||
cloud := &Cloud{}
|
||||
fake := newFakeStorageAccountClient()
|
||||
cloud.StorageAccountClient = fake
|
||||
value := "foo bar"
|
||||
|
||||
tests := []struct {
|
||||
results storage.AccountListKeysResult
|
||||
expectedKey string
|
||||
expectErr bool
|
||||
err error
|
||||
}{
|
||||
{storage.AccountListKeysResult{}, "", true, nil},
|
||||
{
|
||||
storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
"bar",
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{},
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
"bar",
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{storage.AccountListKeysResult{}, "", true, fmt.Errorf("test error")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
expectedKey := test.expectedKey
|
||||
fake.Keys = test.results
|
||||
fake.Err = test.err
|
||||
key, err := cloud.getStorageAccesskey("acct")
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("Unexpected non-error")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
if key != expectedKey {
|
||||
t.Errorf("expected: %s, saw %s", expectedKey, key)
|
||||
}
|
||||
}
|
||||
}
|
139
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
139
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
@ -29,8 +30,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
@ -125,7 +126,7 @@ func testLoadBalancerServiceDefaultModeSelection(t *testing.T, isInternal bool)
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -178,7 +179,7 @@ func testLoadBalancerServiceAutoModeSelection(t *testing.T, isInternal bool) {
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
setLoadBalancerAutoModeAnnotation(&svc)
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -241,7 +242,7 @@ func testLoadBalancerServicesSpecifiedSelection(t *testing.T, isInternal bool) {
|
||||
lbMode := fmt.Sprintf("%s,%s", selectedAvailabilitySetName1, selectedAvailabilitySetName2)
|
||||
setLoadBalancerModeAnnotation(&svc, lbMode)
|
||||
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -279,7 +280,7 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) {
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -305,7 +306,7 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) {
|
||||
} else {
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
_, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
_, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err == nil {
|
||||
t.Errorf("Expect any new service to fail as max limit in lb has reached")
|
||||
} else {
|
||||
@ -336,7 +337,7 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
setLoadBalancerAutoModeAnnotation(&svc)
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -365,7 +366,7 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
|
||||
err := az.EnsureLoadBalancerDeleted(testClusterName, &svc)
|
||||
err := az.EnsureLoadBalancerDeleted(context.TODO(), testClusterName, &svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -850,8 +851,10 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) {
|
||||
func getTestCloud() (az *Cloud) {
|
||||
az = &Cloud{
|
||||
Config: Config{
|
||||
TenantID: "tenant",
|
||||
SubscriptionID: "subscription",
|
||||
AzureAuthConfig: auth.AzureAuthConfig{
|
||||
TenantID: "tenant",
|
||||
SubscriptionID: "subscription",
|
||||
},
|
||||
ResourceGroup: "rg",
|
||||
VnetResourceGroup: "rg",
|
||||
Location: "westus",
|
||||
@ -863,13 +866,22 @@ func getTestCloud() (az *Cloud) {
|
||||
MaximumLoadBalancerRuleCount: 250,
|
||||
},
|
||||
}
|
||||
az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(100, 100)
|
||||
az.DisksClient = newFakeDisksClient()
|
||||
az.InterfacesClient = newFakeAzureInterfacesClient()
|
||||
az.LoadBalancerClient = newFakeAzureLBClient()
|
||||
az.PublicIPAddressesClient = newFakeAzurePIPClient(az.Config.SubscriptionID)
|
||||
az.SubnetsClient = newFakeAzureSubnetsClient()
|
||||
az.RoutesClient = newFakeRoutesClient()
|
||||
az.RouteTablesClient = newFakeRouteTablesClient()
|
||||
az.SecurityGroupsClient = newFakeAzureNSGClient()
|
||||
az.SubnetsClient = newFakeAzureSubnetsClient()
|
||||
az.VirtualMachineScaleSetsClient = newFakeVirtualMachineScaleSetsClient()
|
||||
az.VirtualMachineScaleSetVMsClient = newFakeVirtualMachineScaleSetVMsClient()
|
||||
az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient()
|
||||
az.InterfacesClient = newFakeAzureInterfacesClient()
|
||||
az.vmSet = newAvailabilitySet(az)
|
||||
az.vmCache, _ = az.newVMCache()
|
||||
az.lbCache, _ = az.newLBCache()
|
||||
az.nsgCache, _ = az.newNSGCache()
|
||||
az.rtCache, _ = az.newRouteTableCache()
|
||||
|
||||
return az
|
||||
}
|
||||
@ -1000,23 +1012,6 @@ func getBackendPort(port int32) int32 {
|
||||
return port + 10000
|
||||
}
|
||||
|
||||
func getTestPublicFipConfigurationProperties() network.FrontendIPConfigurationPropertiesFormat {
|
||||
return network.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("/this/is/a/public/ip/address/id")},
|
||||
}
|
||||
}
|
||||
|
||||
func getTestInternalFipConfigurationProperties(expectedSubnetName *string) network.FrontendIPConfigurationPropertiesFormat {
|
||||
var expectedSubnet *network.Subnet
|
||||
if expectedSubnetName != nil {
|
||||
expectedSubnet = &network.Subnet{Name: expectedSubnetName}
|
||||
}
|
||||
return network.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("/this/is/a/public/ip/address/id")},
|
||||
Subnet: expectedSubnet,
|
||||
}
|
||||
}
|
||||
|
||||
func getTestService(identifier string, proto v1.Protocol, requestedPorts ...int32) v1.Service {
|
||||
ports := []v1.ServicePort{}
|
||||
for _, port := range requestedPorts {
|
||||
@ -1056,39 +1051,6 @@ func setLoadBalancerAutoModeAnnotation(service *v1.Service) {
|
||||
setLoadBalancerModeAnnotation(service, ServiceAnnotationLoadBalancerAutoModeValue)
|
||||
}
|
||||
|
||||
func getTestLoadBalancer(services ...v1.Service) network.LoadBalancer {
|
||||
rules := []network.LoadBalancingRule{}
|
||||
probes := []network.Probe{}
|
||||
|
||||
for _, service := range services {
|
||||
for _, port := range service.Spec.Ports {
|
||||
ruleName := getLoadBalancerRuleName(&service, port, nil)
|
||||
rules = append(rules, network.LoadBalancingRule{
|
||||
Name: to.StringPtr(ruleName),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
},
|
||||
})
|
||||
probes = append(probes, network.Probe{
|
||||
Name: to.StringPtr(ruleName),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
lb := network.LoadBalancer{
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
|
||||
LoadBalancingRules: &rules,
|
||||
Probes: &probes,
|
||||
},
|
||||
}
|
||||
|
||||
return lb
|
||||
}
|
||||
|
||||
func getServiceSourceRanges(service *v1.Service) []string {
|
||||
if len(service.Spec.LoadBalancerSourceRanges) == 0 {
|
||||
if !requiresInternalLoadBalancer(service) {
|
||||
@ -1285,7 +1247,7 @@ func validatePublicIP(t *testing.T, publicIP *network.PublicIPAddress, service *
|
||||
}
|
||||
// We cannot use service.Spec.LoadBalancerIP to compare with
|
||||
// Public IP's IPAddress
|
||||
// Becuase service properties are updated outside of cloudprovider code
|
||||
// Because service properties are updated outside of cloudprovider code
|
||||
}
|
||||
|
||||
func contains(ruleValues []string, targetValue string) bool {
|
||||
@ -1607,19 +1569,57 @@ func validateEmptyConfig(t *testing.T, config string) {
|
||||
if azureCloud.CloudProviderBackoff != false {
|
||||
t.Errorf("got incorrect value for CloudProviderBackoff")
|
||||
}
|
||||
|
||||
// rate limits should be disabled by default if not explicitly enabled in config
|
||||
if azureCloud.CloudProviderRateLimit != false {
|
||||
t.Errorf("got incorrect value for CloudProviderRateLimit")
|
||||
}
|
||||
}
|
||||
func TestGetZone(t *testing.T) {
|
||||
data := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, data)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
cloud := &Cloud{}
|
||||
cloud.Location = "eastus"
|
||||
|
||||
zone, err := cloud.getZoneFromURL(ts.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if zone.FailureDomain != "99" {
|
||||
t.Errorf("Unexpected value: %s, expected '99'", zone.FailureDomain)
|
||||
}
|
||||
if zone.Region != cloud.Location {
|
||||
t.Errorf("Expected: %s, saw: %s", cloud.Location, zone.Region)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchFaultDomain(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, `{"ID":"_azdev","UD":"0","FD":"99"}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
faultDomain, err := fetchFaultDomain(ts.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if faultDomain == nil {
|
||||
t.Errorf("Unexpected nil fault domain")
|
||||
}
|
||||
if *faultDomain != "99" {
|
||||
t.Errorf("Expected '99', saw '%s'", *faultDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeInstanceInfo(t *testing.T) {
|
||||
response := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
|
||||
faultDomain, err := readFaultDomain(strings.NewReader(response))
|
||||
if err != nil {
|
||||
t.Error("Unexpected error in ReadFaultDomain")
|
||||
t.Errorf("Unexpected error in ReadFaultDomain: %v", err)
|
||||
}
|
||||
|
||||
if faultDomain == nil {
|
||||
@ -1631,7 +1631,8 @@ func TestDecodeInstanceInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitProviderID(t *testing.T) {
|
||||
func TestGetNodeNameByProviderID(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
providers := []struct {
|
||||
providerID string
|
||||
name types.NodeName
|
||||
@ -1666,7 +1667,7 @@ func TestSplitProviderID(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range providers {
|
||||
name, err := splitProviderID(test.providerID)
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(test.providerID)
|
||||
if (err != nil) != test.fail {
|
||||
t.Errorf("Expected to failt=%t, with pattern %v", test.fail, test)
|
||||
}
|
||||
@ -1717,7 +1718,7 @@ func TestMetadataParsing(t *testing.T) {
|
||||
"macAddress": "002248020E1E"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
network := NetworkMetadata{}
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_test.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_test.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetVmssInstanceID(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
machineName string
|
||||
expectError bool
|
||||
expectedInstanceID string
|
||||
}{{
|
||||
msg: "invalid vmss instance name",
|
||||
machineName: "vmvm",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
msg: "valid vmss instance name",
|
||||
machineName: "vm00000Z",
|
||||
expectError: false,
|
||||
expectedInstanceID: "35",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
instanceID, err := getVmssInstanceID(test.machineName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
} else {
|
||||
assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
}
|
102
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go
generated
vendored
102
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go
generated
vendored
@ -1,102 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func (az *Cloud) getIPForVmssMachine(nodeName types.NodeName) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err := az.getVmssVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), az.getVmssVirtualMachine(%s), err=%v", nodeName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicID, err := getPrimaryInterfaceIDForVmssMachine(machine)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(nicID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(az.ResourceGroup, az.Config.PrimaryScaleSetName, *machine.InstanceID, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), az.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func getPrimaryInterfaceIDForVmssMachine(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
// machineName is composed of computerNamePrefix and 36-based instanceID.
|
||||
// And instanceID part if in fixed length of 6 characters.
|
||||
// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
|
||||
func getVmssInstanceID(machineName string) (string, error) {
|
||||
nameLength := len(machineName)
|
||||
if nameLength < 6 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
|
||||
if err != nil {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
71
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// VMSet defines functions all vmsets (including scale set and availability
|
||||
// set) should be implemented.
|
||||
type VMSet interface {
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
GetInstanceIDByNodeName(name string) (string, error)
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
GetInstanceTypeByNodeName(name string) (string, error)
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
GetIPByNodeName(name, vmSetName string) (string, error)
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error)
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
GetNodeNameByProviderID(providerID string) (types.NodeName, error)
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
GetZoneByNodeName(name string) (cloudprovider.Zone, error)
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
GetPrimaryVMSetName() string
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string) error
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error)
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
}
|
764
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
Normal file
764
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
Normal file
@ -0,0 +1,764 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
|
||||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
|
||||
)
|
||||
|
||||
// scaleSet implements VMSet interface for Azure scale set.
|
||||
type scaleSet struct {
|
||||
*Cloud
|
||||
|
||||
// availabilitySet is also required for scaleSet because some instances
|
||||
// (e.g. master nodes) may not belong to any scale sets.
|
||||
availabilitySet VMSet
|
||||
|
||||
vmssCache *timedCache
|
||||
vmssVMCache *timedCache
|
||||
nodeNameToScaleSetMappingCache *timedCache
|
||||
availabilitySetNodesCache *timedCache
|
||||
}
|
||||
|
||||
// newScaleSet creates a new scaleSet.
|
||||
func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
var err error
|
||||
ss := &scaleSet{
|
||||
Cloud: az,
|
||||
availabilitySet: newAvailabilitySet(az),
|
||||
}
|
||||
|
||||
ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssCache, err = ss.newVmssCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssVMCache, err = ss.newVmssVMCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
instanceID, err = getScaleSetVMInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
ssName, err = ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
return "", "", vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(ss.makeVmssVMName(ssName, instanceID))
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("Can't find node (%q) in any scale sets", nodeName)
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("cound't find vmss virtual machine by scaleSetName (%q) and instanceID (%q)", scaleSetName, instanceID)
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetInstanceIDByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return *vm.ID, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is not part of providerID for vmss instances.
|
||||
scaleSetName, err := extractScaleSetNameByExternalID(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
vm, err := ss.getVmssVMByInstanceID(scaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
|
||||
nodeName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
return types.NodeName(nodeName), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vm.Sku != nil && vm.Sku.Name != nil {
|
||||
return *vm.Sku.Name, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetZoneByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)),
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
func (ss *scaleSet) GetPrimaryVMSetName() string {
|
||||
return ss.Config.PrimaryScaleSetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName, vmSetName string) (string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName, vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q, %q), err=%v", nodeName, nodeName, vmSetName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine computepreview.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
// machineName is composed of computerNamePrefix and 36-based instanceID.
|
||||
// And instanceID part if in fixed length of 6 characters.
|
||||
// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
|
||||
func getScaleSetVMInstanceID(machineName string) (string, error) {
|
||||
nameLength := len(machineName)
|
||||
if nameLength < 6 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
|
||||
if err != nil {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByExternalID extracts the scaleset name by node's externalID.
|
||||
func extractScaleSetNameByExternalID(externalID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(externalID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// listScaleSets lists all scale sets.
|
||||
func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, ss.ResourceGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ssNames := make([]string, len(allScaleSets))
|
||||
for i := range allScaleSets {
|
||||
ssNames[i] = *(allScaleSets[i].Name)
|
||||
}
|
||||
|
||||
return ssNames, nil
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]computepreview.VirtualMachineScaleSetVM, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(computepreview.InstanceView))
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allVMs, nil
|
||||
}
|
||||
|
||||
// getAgentPoolScaleSets lists the virtual machines for the resource group and then builds
|
||||
// a list of scale sets that match the nodes available to k8s.
|
||||
func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
agentPoolScaleSets := &[]string{}
|
||||
for nx := range nodes {
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
*agentPoolScaleSets = append(*agentPoolScaleSets, ssName)
|
||||
}
|
||||
|
||||
return agentPoolScaleSets, nil
|
||||
}
|
||||
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer. If the service has
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryScaleSetName.
|
||||
scaleSetNames := &[]string{ss.Config.PrimaryScaleSetName}
|
||||
return scaleSetNames, nil
|
||||
}
|
||||
|
||||
scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*scaleSetNames) == 0 {
|
||||
glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*scaleSetNames)
|
||||
|
||||
if !isAuto {
|
||||
if serviceVMSetNames == nil || len(serviceVMSetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate scale set exists
|
||||
var found bool
|
||||
for sasx := range serviceVMSetNames {
|
||||
for asx := range *scaleSetNames {
|
||||
if strings.EqualFold((*scaleSetNames)[asx], serviceVMSetNames[sasx]) {
|
||||
found = true
|
||||
serviceVMSetNames[sasx] = (*scaleSetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
|
||||
return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx])
|
||||
}
|
||||
}
|
||||
vmSetNames = &serviceVMSetNames
|
||||
}
|
||||
|
||||
return vmSetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName, "")
|
||||
}
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check scale set name.
|
||||
if vmSetName != "" && !strings.EqualFold(ssName, vmSetName) {
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(primaryInterfaceID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Fix interface's location, which is required when updating the interface.
|
||||
// TODO: is this a bug of azure SDK?
|
||||
if nic.Location == nil || *nic.Location == "" {
|
||||
nic.Location = vm.Location
|
||||
}
|
||||
|
||||
return nic, nil
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMachineScaleSet, bool, error) {
|
||||
var result computepreview.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
cached, retryErr := ss.vmssCache.Get(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Info("backoff: success for scale set %q", name)
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
result = *(cached.(*computepreview.VirtualMachineScaleSet))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return result, exists, err
|
||||
}
|
||||
|
||||
// getPrimaryNetworkConfiguration gets primary network interface configuration for scale sets.
|
||||
func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetNetworkConfiguration, error) {
|
||||
networkConfigurations := *networkConfigurationList
|
||||
if len(networkConfigurations) == 1 {
|
||||
return &networkConfigurations[0], nil
|
||||
}
|
||||
|
||||
for idx := range networkConfigurations {
|
||||
networkConfig := &networkConfigurations[idx]
|
||||
if networkConfig.Primary != nil && *networkConfig.Primary == true {
|
||||
return networkConfig, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetIPConfiguration, error) {
|
||||
ipConfigurations := *config.IPConfigurations
|
||||
if len(ipConfigurations) == 1 {
|
||||
return &ipConfigurations[0], nil
|
||||
}
|
||||
|
||||
for idx := range ipConfigurations {
|
||||
ipConfig := &ipConfigurations[idx]
|
||||
if ipConfig.Primary != nil && *ipConfig.Primary == true {
|
||||
return ipConfig, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet computepreview.VirtualMachineScaleSet) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
errorMessage := fmt.Errorf("Scale set %q not found", vmSetName)
|
||||
glog.Errorf("%v", errorMessage)
|
||||
return errorMessage
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update primary IP configuration's LoadBalancerBackendAddressPools.
|
||||
foundPool := false
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
newBackendPools = append(newBackendPools,
|
||||
computepreview.SubResource{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName)
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Construct instanceIDs from nodes.
|
||||
instanceIDs := []string{}
|
||||
for _, curNode := range nodes {
|
||||
curScaleSetName, err := extractScaleSetNameByExternalID(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, omitting it", curNode.Name)
|
||||
continue
|
||||
}
|
||||
if curScaleSetName != vmSetName {
|
||||
glog.V(4).Infof("Node %q is not belonging to scale set %q, omitting it", curNode.Name, vmSetName)
|
||||
continue
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get last segment from %q: %v", curNode.Spec.ExternalID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
instanceIDs = append(instanceIDs, instanceID)
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.EnsureBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, vmSetName, vmSetName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("ss.EnsureBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, vmSetName, vmSetName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration.
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools == nil || len(*primaryIPConfiguration.LoadBalancerBackendAddressPools) == 0 {
|
||||
return nil
|
||||
}
|
||||
existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
foundPool := false
|
||||
for i := len(existingBackendPools) - 1; i >= 0; i-- {
|
||||
curPool := existingBackendPools[i]
|
||||
if strings.EqualFold(poolID, *curPool.ID) {
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, vmSetName)
|
||||
foundPool = true
|
||||
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
// Pool not found, assume it has been already removed.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update scale set with backoff.
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", vmSetName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
instanceIDs := []string{"*"}
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update virtualMachineScaleSet again. This is a workaround for removing VMSS reference from LB.
|
||||
// TODO: remove this workaround when figuring out the root cause.
|
||||
if len(newBackendPools) == 0 {
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", vmSetName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf(
|
||||
vmssMachineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
scaleSetName,
|
||||
instanceID)
|
||||
}
|
197
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
Normal file
197
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
var (
|
||||
vmssNameSeparator = "_"
|
||||
|
||||
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
vmssCacheTTL = time.Minute
|
||||
vmssVMCacheTTL = time.Minute
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 15 * time.Minute
|
||||
)
|
||||
|
||||
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
|
||||
// The map is required because vmss nodeName is not equal to its vmName.
|
||||
type nodeNameToScaleSetMapping map[string]string
|
||||
|
||||
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
|
||||
}
|
||||
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
ret := strings.Split(name, vmssNameSeparator)
|
||||
if len(ret) != 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return ret[0], ret[1], nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
scaleSetNames, err := ss.listScaleSets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
}
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := sets.NewString()
|
||||
for _, vm := range vmList {
|
||||
localCache.Insert(*vm.Name)
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// vmssVM name's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not found, the VM doesn't belong to any known scale sets.
|
||||
if ssName == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssVMCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) {
|
||||
getScaleSetName := func(nodeName string) (string, error) {
|
||||
nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
realMapping := nodeNameMapping.(nodeNameToScaleSetMapping)
|
||||
if ssName, ok := realMapping[nodeName]; ok {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ssName, err := getScaleSetName(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ssName != "" {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
// ssName is still not found, it is likely that new Nodes are created.
|
||||
// Force refresh the cache and try again.
|
||||
ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey)
|
||||
return getScaleSetName(nodeName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
|
||||
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availabilitySetNodes := cached.(sets.String)
|
||||
return availabilitySetNodes.Has(nodeName), nil
|
||||
}
|
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtractVmssVMName(t *testing.T) {
|
||||
cases := []struct {
|
||||
description string
|
||||
vmName string
|
||||
expectError bool
|
||||
expectedScaleSet string
|
||||
expectedInstanceID string
|
||||
}{
|
||||
{
|
||||
description: "wrong vmss VM name should report error",
|
||||
vmName: "vm1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "wrong VM name separator should report error",
|
||||
vmName: "vm-1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "correct vmss VM name should return correct scaleSet and instanceID",
|
||||
vmName: "vm_1234",
|
||||
expectedScaleSet: "vm",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ssName, instanceID, err := extractVmssVMName(c.vmName)
|
||||
if c.expectError {
|
||||
assert.Error(t, err, c.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, c.expectedScaleSet, ssName, c.description)
|
||||
assert.Equal(t, c.expectedInstanceID, instanceID, c.description)
|
||||
}
|
||||
}
|
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, vmList)
|
||||
ss, err := newScaleSet(cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss.(*scaleSet), nil
|
||||
}
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
|
||||
scaleSets := make(map[string]map[string]computepreview.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]computepreview.VirtualMachineScaleSet{
|
||||
scaleSetName: {
|
||||
Name: &scaleSetName,
|
||||
},
|
||||
}
|
||||
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
|
||||
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
ssVMs := make(map[string]map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
networkInterfaces := []computepreview.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmName] = computepreview.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &computepreview.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &computepreview.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
},
|
||||
NetworkProfile: &computepreview.NetworkProfile{
|
||||
NetworkInterfaces: &networkInterfaces,
|
||||
},
|
||||
},
|
||||
ID: &ID,
|
||||
InstanceID: &instanceID,
|
||||
Name: &vmName,
|
||||
Location: &ss.Location,
|
||||
}
|
||||
}
|
||||
virtualMachineScaleSetVMsClient.setFakeStore(ssVMs)
|
||||
|
||||
ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
}
|
||||
|
||||
func TestGetScaleSetVMInstanceID(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
machineName string
|
||||
expectError bool
|
||||
expectedInstanceID string
|
||||
}{{
|
||||
msg: "invalid vmss instance name",
|
||||
machineName: "vmvm",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
msg: "valid vmss instance name",
|
||||
machineName: "vm00000Z",
|
||||
expectError: false,
|
||||
expectedInstanceID: "35",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
instanceID, err := getScaleSetVMInstanceID(test.machineName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
} else {
|
||||
assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInstanceIDByNodeName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
nodeName string
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "scaleSet should get instance by node name",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000001",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/1",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should get instance by node name with upper cases hostname",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"VMSSEE6C2000000", "VMSSEE6C2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should not get instance for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, real, test.description)
|
||||
}
|
||||
}
|
257
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
257
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
@ -17,19 +17,23 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
vmCacheTTL = time.Minute
|
||||
lbCacheTTL = 2 * time.Minute
|
||||
nsgCacheTTL = 2 * time.Minute
|
||||
rtCacheTTL = 2 * time.Minute
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
@ -62,139 +66,44 @@ func ignoreStatusNotFoundFromError(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
|
||||
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm
|
||||
/// resource request in short period.
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) {
|
||||
vmName := string(nodeName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%s): start", vmName)
|
||||
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return vm, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return vm, false, nil
|
||||
}
|
||||
|
||||
return vm, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getVmssVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
vmName := string(nodeName)
|
||||
instanceID, err := getVmssInstanceID(vmName)
|
||||
cachedVM, err := az.vmCache.Get(vmName)
|
||||
if err != nil {
|
||||
return vm, false, err
|
||||
return vm, err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): start", vmName)
|
||||
vm, err = az.VirtualMachineScaleSetVMsClient.Get(az.ResourceGroup, az.PrimaryScaleSetName, instanceID)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): end", vmName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return vm, false, realErr
|
||||
if cachedVM == nil {
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return vm, false, nil
|
||||
}
|
||||
|
||||
return vm, exists, err
|
||||
return *(cachedVM.(*compute.VirtualMachine)), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%s): start", az.RouteTableName)
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%s): end", az.RouteTableName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return routeTable, false, realErr
|
||||
cachedRt, err := az.rtCache.Get(az.RouteTableName)
|
||||
if err != nil {
|
||||
return routeTable, false, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if cachedRt == nil {
|
||||
return routeTable, false, nil
|
||||
}
|
||||
|
||||
return routeTable, exists, err
|
||||
return *(cachedRt.(*network.RouteTable)), true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err error) {
|
||||
func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pip network.PublicIPAddress, exists bool, err error) {
|
||||
resourceGroup := az.ResourceGroup
|
||||
if pipResourceGroup != "" {
|
||||
resourceGroup = pipResourceGroup
|
||||
}
|
||||
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%s): start", az.SecurityGroupName)
|
||||
sg, err = az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%s): end", az.SecurityGroupName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return sg, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return sg, false, nil
|
||||
}
|
||||
|
||||
return sg, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
var realErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Get(%s): start", name)
|
||||
lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "")
|
||||
glog.V(10).Infof("LoadBalancerClient.Get(%s): end", name)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return lb, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return lb, false, nil
|
||||
}
|
||||
|
||||
return lb, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResult, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%s): start", az.ResourceGroup)
|
||||
lbListResult, err = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%s): end", az.ResourceGroup)
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return lbListResult, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return lbListResult, false, nil
|
||||
}
|
||||
|
||||
return lbListResult, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%s): start", name)
|
||||
pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, name, "")
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%s): end", name)
|
||||
|
||||
pip, err = az.PublicIPAddressesClient.Get(resourceGroup, pipName, "")
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return pip, false, realErr
|
||||
@ -217,11 +126,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
rg = az.ResourceGroup
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SubnetsClient.Get(%s): start", subnetName)
|
||||
subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "")
|
||||
glog.V(10).Infof("SubnetsClient.Get(%s): end", subnetName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return subnet, false, realErr
|
||||
@ -233,3 +138,107 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
|
||||
return subnet, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
cachedLB, err := az.lbCache.Get(name)
|
||||
if err != nil {
|
||||
return lb, false, err
|
||||
}
|
||||
|
||||
if cachedLB == nil {
|
||||
return lb, false, nil
|
||||
}
|
||||
|
||||
return *(cachedLB.(*network.LoadBalancer)), true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) {
|
||||
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName)
|
||||
if err != nil {
|
||||
return nsg, err
|
||||
}
|
||||
|
||||
if securityGroup == nil {
|
||||
return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName)
|
||||
}
|
||||
|
||||
return *(securityGroup.(*network.SecurityGroup)), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView
|
||||
// request. If we first send an InstanceView request and then a non InstanceView request, the second
|
||||
// request will still hit throttling. This is what happens now for cloud controller manager: In this
|
||||
// case we do get instance view every time to fulfill the azure_zones requirement without hitting
|
||||
// throttling.
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, key, compute.InstanceView)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &vm, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
lb, err := az.LoadBalancerClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &lb, nil
|
||||
}
|
||||
|
||||
return newTimedcache(lbCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
nsg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &nsg, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nsgCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
rt, err := az.RouteTablesClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &rt, nil
|
||||
}
|
||||
|
||||
return newTimedcache(rtCacheTTL, getter)
|
||||
}
|
||||
|
41
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
@ -17,17 +17,15 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
)
|
||||
|
||||
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
|
||||
@ -42,12 +40,17 @@ type instanceInfo struct {
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
return az.getZoneFromURL(instanceInfoURL)
|
||||
}
|
||||
|
||||
// This is injectable for testing.
|
||||
func (az *Cloud) getZoneFromURL(url string) (cloudprovider.Zone, error) {
|
||||
faultMutex.Lock()
|
||||
defer faultMutex.Unlock()
|
||||
if faultDomain == nil {
|
||||
var err error
|
||||
faultDomain, err = fetchFaultDomain()
|
||||
faultDomain, err = fetchFaultDomain(url)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
@ -62,36 +65,24 @@ func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
nodeName, err := splitProviderID(providerID)
|
||||
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
return az.GetZoneByNodeName(nodeName)
|
||||
|
||||
return az.GetZoneByNodeName(ctx, nodeName)
|
||||
}
|
||||
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
|
||||
vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, string(nodeName), compute.InstanceView)
|
||||
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
}
|
||||
return zone, nil
|
||||
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
return az.vmSet.GetZoneByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
func fetchFaultDomain() (*string, error) {
|
||||
resp, err := http.Get(instanceInfoURL)
|
||||
func fetchFaultDomain(url string) (*string, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Reference in New Issue
Block a user