mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
@ -42,11 +42,10 @@ go_library(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
@ -57,6 +56,7 @@ go_library(
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
@ -89,10 +89,9 @@ go_test(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
@ -43,7 +43,7 @@ type AzureAuthConfig struct {
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
}
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
@ -49,6 +49,14 @@ const (
|
||||
|
||||
vmTypeVMSS = "vmss"
|
||||
vmTypeStandard = "standard"
|
||||
|
||||
loadBalancerSkuBasic = "basic"
|
||||
loadBalancerSkuStandard = "standard"
|
||||
)
|
||||
|
||||
var (
|
||||
// Master nodes are not added to standard load balancer by default.
|
||||
defaultExcludeMasterFromStandardLB = true
|
||||
)
|
||||
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
@ -109,11 +117,15 @@ type Config struct {
|
||||
// Use instance metadata service where possible
|
||||
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
|
||||
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
// Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
||||
// If not set, it will be default to basic.
|
||||
LoadBalancerSku string `json:"loadBalancerSku" yaml:"loadBalancerSku"`
|
||||
// ExcludeMasterFromStandardLB excludes master nodes from standard load balancer.
|
||||
// If not set, it will be default to true.
|
||||
ExcludeMasterFromStandardLB *bool `json:"excludeMasterFromStandardLB" yaml:"excludeMasterFromStandardLB"`
|
||||
|
||||
// Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"`
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount" yaml:"maximumLoadBalancerRuleCount"`
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
@ -160,6 +172,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.VMType == "" {
|
||||
// default to standard vmType if not set.
|
||||
config.VMType = vmTypeStandard
|
||||
}
|
||||
|
||||
env, err := auth.ParseAzureEnvironment(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -206,7 +223,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
}
|
||||
|
||||
// Do not add master nodes to standard LB by default.
|
||||
if config.ExcludeMasterFromStandardLB == nil {
|
||||
config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB
|
||||
}
|
||||
|
||||
azClientConfig := &azClientConfig{
|
||||
|
221
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
221
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
@ -20,14 +20,14 @@ import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// requestBackoff if backoff is disabled in cloud provider it
|
||||
@ -51,11 +51,14 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
var retryErr error
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
machine, retryErr = az.getVirtualMachine(name)
|
||||
if retryErr == cloudprovider.InstanceNotFound {
|
||||
return true, cloudprovider.InstanceNotFound
|
||||
}
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Info("backoff: success")
|
||||
glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
@ -68,10 +71,11 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) {
|
||||
allNodes := []compute.VirtualMachine{}
|
||||
var result compute.VirtualMachineListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -85,57 +89,34 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allNodes = append(allNodes, *result.Value...)
|
||||
appendResults = false
|
||||
// follow the next link to get all the vms for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.VirtualMachinesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup, retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("VirtualMachinesClient.ListNextResults(%v): success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allNodes, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allNodes, err
|
||||
}
|
||||
|
||||
// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
|
||||
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) {
|
||||
var ip string
|
||||
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) {
|
||||
var ip, publicIP string
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ip, retryErr = az.getIPForMachine(name)
|
||||
ip, publicIP, retryErr = az.getIPForMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Info("backoff: success")
|
||||
glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
return ip, err
|
||||
return ip, publicIP, err
|
||||
}
|
||||
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.nsgCache.Delete(*sg.Name)
|
||||
@ -147,11 +128,12 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
@ -162,12 +144,14 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
|
||||
// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
|
||||
func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
allLBs := []network.LoadBalancer{}
|
||||
var result network.LoadBalancerListResult
|
||||
var allLBs []network.LoadBalancer
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -181,42 +165,19 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allLBs = append(allLBs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the vms for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.LoadBalancerClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("LoadBalancerClient.ListNextResults(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allLBs, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allLBs, nil
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in the given resource group
|
||||
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
allPIPs := []network.PublicIPAddress{}
|
||||
var result network.PublicIPAddressListResult
|
||||
var allPIPs []network.PublicIPAddress
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.PublicIPAddressesClient.List(pipResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
pipResourceGroup,
|
||||
@ -230,74 +191,52 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allPIPs = append(allPIPs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the pip resources for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.PublicIPAddressesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allPIPs, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allPIPs, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(pipResourceGroup, *pip.Name, pip, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.PublicIPAddressesClient.Delete(pipResourceGroup, pipName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
return processRetryResponse(resp, err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
done, err := processRetryResponse(resp, err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after deleting
|
||||
az.lbCache.Delete(lbName)
|
||||
@ -309,48 +248,52 @@ func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
// CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
return processRetryResponse(resp.Response, err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
|
||||
return processRetryResponse(resp, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.ResourceGroup, vmName, newVM)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters computepreview.VirtualMachineScaleSetVM) error {
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
@ -361,11 +304,11 @@ func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName st
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
glog.V(2).Infof("backoff: success, HTTP response=%d", resp.StatusCode)
|
||||
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetryAPIRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
@ -418,7 +361,7 @@ func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
azstorage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
@ -277,7 +277,10 @@ func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error)
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(ctx, c.common.resourceGroup, SAName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -432,7 +435,9 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(c.common.resourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(ctx, c.common.resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -484,12 +489,12 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
_, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel)
|
||||
err := <-errChan
|
||||
_, err := c.common.cloud.StorageAccountClient.Create(ctx, c.common.resourceGroup, storageAccountName, cp)
|
||||
if err != nil {
|
||||
return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err))
|
||||
}
|
||||
@ -584,7 +589,9 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
account, err := c.common.cloud.StorageAccountClient.GetProperties(ctx, c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
671
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
671
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
230
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
230
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
@ -20,10 +20,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -56,119 +58,11 @@ type controllerCommon struct {
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
// 1. vmType is standard, attach with availabilitySet.AttachDisk.
|
||||
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
|
||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) {
|
||||
// 1. vmType is standard, return cloud.vmSet directly.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then attach with availabilitySet.AttachDisk.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, attach with scaleSet.AttachDisk.
|
||||
return ss.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
// 1. vmType is standard, detach with availabilitySet.DetachDiskByName.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then detach with availabilitySet.DetachDiskByName.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, detach with scaleSet.DetachDiskByName.
|
||||
return ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetDiskLun.
|
||||
return ss.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetNextDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetNextDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetNextDiskLun.
|
||||
return ss.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
// 1. vmType is standard, check with availabilitySet.DisksAreAttached.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DisksAreAttached(diskNames, nodeName)
|
||||
return c.cloud.vmSet, nil
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
@ -177,16 +71,118 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
|
||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then check with availabilitySet.DisksAreAttached.
|
||||
// 3. If the node is managed by availability set, then return ss.availabilitySet.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DisksAreAttached(diskNames, nodeName)
|
||||
return ss.availabilitySet, nil
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, check with scaleSet.DisksAreAttached.
|
||||
return ss.DisksAreAttached(diskNames, nodeName)
|
||||
// 4. Node is managed by vmss
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return vmset.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
|
||||
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vmset.GetDataDisks(nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("all luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
@ -20,11 +20,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
@ -70,10 +69,10 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
@ -135,10 +134,10 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
@ -156,71 +155,16 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (as *availabilitySet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return nil, err
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
|
||||
if vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (as *availabilitySet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (as *availabilitySet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
} else if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
||||
|
100
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
100
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
@ -20,12 +20,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
@ -36,27 +34,30 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
disks := []compute.DataDisk{}
|
||||
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
|
||||
disks = *vm.StorageProfile.DataDisks
|
||||
}
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
Caching: compute.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &computepreview.ManagedDiskParameters{
|
||||
ManagedDisk: &compute.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &computepreview.VirtualHardDisk{
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
Caching: compute.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
@ -97,7 +98,10 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
disks := []compute.DataDisk{}
|
||||
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
|
||||
disks = *vm.StorageProfile.DataDisks
|
||||
}
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
@ -139,76 +143,16 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (ss *scaleSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
if vm.StorageProfile == nil || vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (ss *scaleSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (ss *scaleSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
||||
|
571
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
571
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
File diff suppressed because it is too large
Load Diff
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -18,7 +18,6 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@ -31,9 +30,39 @@ import (
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ip},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
if len(publicIP) > 0 {
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: publicIP,
|
||||
})
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not local instance, get addresses from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
ipAddress := IPAddress{}
|
||||
err := az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
err = az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -51,16 +80,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
ip, err := az.GetIPForMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff", name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ip},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}, nil
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
@ -75,11 +95,6 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
|
||||
return az.NodeAddresses(ctx, name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (az *Cloud) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return az.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
@ -99,6 +114,11 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
@ -146,6 +166,10 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
}
|
||||
ssName, instanceID, err := extractVmssVMName(metadataName)
|
||||
if err != nil {
|
||||
if err == ErrorNotVmssInstance {
|
||||
// Compose machineID for standard Node.
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
@ -191,7 +215,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return fmt.Errorf("not supported")
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
|
182
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
182
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -27,7 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -63,14 +64,24 @@ const (
|
||||
// to specify that the service should be exposed using an Azure security rule
|
||||
// that may be shared with other service, trading specificity of rules for an
|
||||
// increase in the number of services that can be exposed. This relies on the
|
||||
// Azure "augmented security rules" feature which at the time of writing is in
|
||||
// preview and available only in certain regions.
|
||||
// Azure "augmented security rules" feature.
|
||||
ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule"
|
||||
|
||||
// ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service
|
||||
// to specify the resource group of load balancer objects that are not in the same resource group as the cluster.
|
||||
ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group"
|
||||
|
||||
// ServiceAnnotationAllowedServiceTag is the annotation used on the service
|
||||
// to specify a list of allowed service tags separated by comma
|
||||
ServiceAnnotationAllowedServiceTag = "service.beta.kubernetes.io/azure-allowed-service-tags"
|
||||
)
|
||||
|
||||
// ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service
|
||||
// to specify the resource group of load balancer objects that are not in the same resource group as the cluster.
|
||||
const ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group"
|
||||
var (
|
||||
// supportedServiceTags holds a list of supported service tags on Azure.
|
||||
// Refer https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#service-tags for more information.
|
||||
supportedServiceTags = sets.NewString("VirtualNetwork", "VIRTUAL_NETWORK", "AzureLoadBalancer", "AZURE_LOADBALANCER",
|
||||
"Internet", "INTERNET", "AzureTrafficManager", "Storage", "Sql")
|
||||
)
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// if so, what its status is.
|
||||
@ -124,7 +135,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
if lbStatus != nil && len(lbStatus.Ingress) > 0 {
|
||||
serviceIP = &lbStatus.Ingress[0].IP
|
||||
}
|
||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancer for %s with IP %s, wantLb = true", service.Name, logSafe(serviceIP))
|
||||
glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -154,7 +165,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancerDeleted for %s with IP %s, wantLb = false", service.Name, serviceIPToCleanup)
|
||||
glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -189,7 +200,8 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
|
||||
// check if the service already has a load balancer
|
||||
if existingLBs != nil {
|
||||
for _, existingLB := range existingLBs {
|
||||
for i := range existingLBs {
|
||||
existingLB := existingLBs[i]
|
||||
if strings.EqualFold(*existingLB.Name, defaultLBName) {
|
||||
defaultLB = &existingLB
|
||||
}
|
||||
@ -209,8 +221,14 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
}
|
||||
}
|
||||
|
||||
// service does not have a load balancer, select one
|
||||
if wantLb {
|
||||
hasMode, _, _ := getServiceLoadBalancerMode(service)
|
||||
if az.useStandardLoadBalancer() && hasMode {
|
||||
return nil, nil, false, fmt.Errorf("standard load balancer doesn't work with annotation %q", ServiceAnnotationLoadBalancerMode)
|
||||
}
|
||||
|
||||
// service does not have a basic load balancer, select one.
|
||||
// Standard load balancer doesn't need this because all backends nodes should be added to same LB.
|
||||
if wantLb && !az.useStandardLoadBalancer() {
|
||||
// select new load balancer for service
|
||||
selectedLB, exists, err := az.selectLoadBalancer(clusterName, service, &existingLBs, nodes)
|
||||
if err != nil {
|
||||
@ -227,6 +245,11 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
Location: &az.Location,
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{},
|
||||
}
|
||||
if az.useStandardLoadBalancer() {
|
||||
defaultLB.Sku = &network.LoadBalancerSku{
|
||||
Name: network.LoadBalancerSkuNameStandard,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return defaultLB, nil, false, nil
|
||||
@ -239,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
@ -295,10 +318,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
|
||||
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
||||
if lb == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus lb is nil")
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
|
||||
return nil, nil
|
||||
}
|
||||
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil")
|
||||
return nil, nil
|
||||
}
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
@ -330,6 +354,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", *lbIP, lbFrontendIPConfigName, serviceName)
|
||||
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil
|
||||
}
|
||||
}
|
||||
@ -416,8 +441,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
DomainNameLabel: &domainNameLabel,
|
||||
}
|
||||
}
|
||||
pip.Tags = &map[string]*string{"service": &serviceName}
|
||||
glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
pip.Tags = map[string]*string{"service": &serviceName}
|
||||
if az.useStandardLoadBalancer() {
|
||||
pip.Sku = &network.PublicIPAddressSku{
|
||||
Name: network.PublicIPAddressSkuNameStandard,
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
||||
if err != nil {
|
||||
@ -426,7 +457,9 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
}
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name)
|
||||
|
||||
pip, err = az.PublicIPAddressesClient.Get(pipResourceGroup, *pip.Name, "")
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
pip, err = az.PublicIPAddressesClient.Get(ctx, pipResourceGroup, *pip.Name, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -441,13 +474,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(2).Infof("reconcileLoadBalancer(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb)
|
||||
if err != nil {
|
||||
glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
|
||||
return nil, err
|
||||
}
|
||||
lbName := *lb.Name
|
||||
glog.V(2).Infof("reconcileLoadBalancer(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
|
||||
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
||||
lbBackendPoolName := getBackendPoolName(clusterName)
|
||||
@ -465,18 +499,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
foundBackendPool := false
|
||||
for _, bp := range newBackendPools {
|
||||
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
foundBackendPool = true
|
||||
break
|
||||
} else {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
}
|
||||
}
|
||||
if !foundBackendPool {
|
||||
newBackendPools = append(newBackendPools, network.BackendAddressPool{
|
||||
Name: to.StringPtr(lbBackendPoolName),
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
|
||||
dirtyLb = true
|
||||
lb.BackendAddressPools = &newBackendPools
|
||||
@ -494,7 +528,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@ -504,7 +538,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@ -568,7 +602,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
Name: to.StringPtr(lbFrontendIPConfigName),
|
||||
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
@ -669,15 +703,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
||||
existingProbe := updatedProbes[i]
|
||||
if serviceOwnsRule(service, *existingProbe.Name) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe := false
|
||||
if findProbe(expectedProbes, existingProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe = true
|
||||
}
|
||||
if !keepProbe {
|
||||
updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
dirtyProbes = true
|
||||
}
|
||||
}
|
||||
@ -686,11 +720,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedProbe := range expectedProbes {
|
||||
foundProbe := false
|
||||
if findProbe(updatedProbes, expectedProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
foundProbe = true
|
||||
}
|
||||
if !foundProbe {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
updatedProbes = append(updatedProbes, expectedProbe)
|
||||
dirtyProbes = true
|
||||
}
|
||||
@ -711,13 +745,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
existingRule := updatedRules[i]
|
||||
if serviceOwnsRule(service, *existingRule.Name) {
|
||||
keepRule := false
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
if findRule(expectedRules, existingRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule = true
|
||||
}
|
||||
if !keepRule {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||
dirtyRules = true
|
||||
}
|
||||
@ -727,11 +761,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedRule := range expectedRules {
|
||||
foundRule := false
|
||||
if findRule(updatedRules, expectedRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
foundRule = true
|
||||
}
|
||||
if !foundRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
updatedRules = append(updatedRules, expectedRule)
|
||||
dirtyRules = true
|
||||
}
|
||||
@ -748,12 +782,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
|
||||
// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
|
||||
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
||||
glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||
if err != nil {
|
||||
glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err)
|
||||
return nil, err
|
||||
@ -761,18 +795,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||
|
||||
// Remove the LB.
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName)
|
||||
glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName)
|
||||
err = az.DeleteLBWithRetry(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
} else {
|
||||
glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
|
||||
err := az.CreateOrUpdateLBWithRetry(*lb)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -780,7 +814,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// Refresh updated lb which will be used later in other places.
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("getAzureLoadBalancer(%s) failed: %v", lbName, err)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
@ -794,13 +828,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if wantLb && nodes != nil {
|
||||
// Add the machines to the backend pool if they're not already
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName)
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
@ -839,8 +873,12 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceTags, err := getServiceTags(service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sourceAddressPrefixes []string
|
||||
if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) {
|
||||
if (sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 {
|
||||
if !requiresInternalLoadBalancer(service) {
|
||||
sourceAddressPrefixes = []string{"Internet"}
|
||||
}
|
||||
@ -848,6 +886,9 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
for _, ip := range sourceRanges {
|
||||
sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String())
|
||||
}
|
||||
for _, serviceTag := range serviceTags {
|
||||
sourceAddressPrefixes = append(sourceAddressPrefixes, serviceTag)
|
||||
}
|
||||
}
|
||||
expectedSecurityRules := []network.SecurityRule{}
|
||||
|
||||
@ -885,8 +926,8 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
// update security rules
|
||||
dirtySg := false
|
||||
var updatedRules []network.SecurityRule
|
||||
if sg.SecurityRules != nil {
|
||||
updatedRules = *sg.SecurityRules
|
||||
if sg.SecurityGroupPropertiesFormat != nil && sg.SecurityGroupPropertiesFormat.SecurityRules != nil {
|
||||
updatedRules = *sg.SecurityGroupPropertiesFormat.SecurityRules
|
||||
}
|
||||
|
||||
for _, r := range updatedRules {
|
||||
@ -987,7 +1028,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
|
||||
if dirtySg {
|
||||
sg.SecurityRules = &updatedRules
|
||||
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
err := az.CreateOrUpdateSGWithRetry(sg)
|
||||
if err != nil {
|
||||
@ -1167,15 +1208,15 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
|
||||
for _, pip := range pips {
|
||||
if pip.Tags != nil &&
|
||||
(*pip.Tags)["service"] != nil &&
|
||||
*(*pip.Tags)["service"] == serviceName {
|
||||
(pip.Tags)["service"] != nil &&
|
||||
*(pip.Tags)["service"] == serviceName {
|
||||
// We need to process for pips belong to this service
|
||||
pipName := *pip.Name
|
||||
if wantLb && !isInternal && pipName == desiredPipName {
|
||||
// This is the only case we should preserve the
|
||||
// Public ip resource with match service tag
|
||||
} else {
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
@ -1189,7 +1230,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - finished", serviceName, pipName)
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1218,13 +1259,30 @@ func findProbe(probes []network.Probe, probe network.Probe) bool {
|
||||
|
||||
func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool {
|
||||
for _, existingRule := range rules {
|
||||
if strings.EqualFold(*existingRule.Name, *rule.Name) {
|
||||
if strings.EqualFold(*existingRule.Name, *rule.Name) &&
|
||||
equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// equalLoadBalancingRulePropertiesFormat checks whether the provided LoadBalancingRulePropertiesFormat are equal.
|
||||
// Note: only fields used in reconcileLoadBalancer are considered.
|
||||
func equalLoadBalancingRulePropertiesFormat(s, t *network.LoadBalancingRulePropertiesFormat) bool {
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(s.Protocol, t.Protocol) &&
|
||||
reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) &&
|
||||
reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) &&
|
||||
reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) &&
|
||||
reflect.DeepEqual(s.FrontendPort, t.FrontendPort) &&
|
||||
reflect.DeepEqual(s.BackendPort, t.BackendPort) &&
|
||||
reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP)
|
||||
}
|
||||
|
||||
// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction.
|
||||
// Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined.
|
||||
// We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal,
|
||||
@ -1320,3 +1378,23 @@ func useSharedSecurityRule(service *v1.Service) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getServiceTags(service *v1.Service) ([]string, error) {
|
||||
if serviceTags, found := service.Annotations[ServiceAnnotationAllowedServiceTag]; found {
|
||||
tags := strings.Split(strings.TrimSpace(serviceTags), ",")
|
||||
for _, tag := range tags {
|
||||
// Storage and Sql service tags support setting regions with suffix ".Region"
|
||||
if strings.HasPrefix(tag, "Storage.") || strings.HasPrefix(tag, "Sql.") {
|
||||
continue
|
||||
}
|
||||
|
||||
if !supportedServiceTags.Has(tag) {
|
||||
return nil, fmt.Errorf("only %q are allowed in service tags", supportedServiceTags.List())
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
115
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
115
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -97,3 +97,116 @@ func TestFindProbe(t *testing.T) {
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindRule(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
existingRule []network.LoadBalancingRule
|
||||
curRule network.LoadBalancingRule
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
msg: "empty existing rules should return false",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe1"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe2"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while frontend ports unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while backend ports unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while LoadDistribution unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("probe1"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
LoadDistribution: network.Default,
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("probe2"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "both rule names and LoadBalancingRulePropertiesFormats match should return true",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("matchName"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("matchName"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
findResult := findRule(test.existingRule, test.curRule)
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
|
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
@ -17,13 +17,17 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
//ManagedDiskController : managed disk controller struct
|
||||
@ -36,7 +40,8 @@ func newManagedDiskController(common *controllerCommon) (*ManagedDiskController,
|
||||
}
|
||||
|
||||
//CreateManagedDisk : create managed disk
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) {
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, resourceGroup string,
|
||||
sizeGB int, tags map[string]string) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
|
||||
newTags := make(map[string]*string)
|
||||
@ -54,18 +59,24 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
}
|
||||
|
||||
diskSizeGB := int32(sizeGB)
|
||||
model := disk.Model{
|
||||
model := compute.Disk{
|
||||
Location: &c.common.location,
|
||||
Tags: &newTags,
|
||||
Properties: &disk.Properties{
|
||||
AccountType: disk.StorageAccountTypes(storageAccountType),
|
||||
Tags: newTags,
|
||||
Sku: &compute.DiskSku{
|
||||
Name: compute.StorageAccountTypes(storageAccountType),
|
||||
},
|
||||
DiskProperties: &compute.DiskProperties{
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &disk.CreationData{CreateOption: disk.Empty},
|
||||
CreationData: &compute.CreationData{CreateOption: compute.Empty},
|
||||
}}
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
|
||||
if resourceGroup == "" {
|
||||
resourceGroup = c.common.resourceGroup
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, model)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -73,7 +84,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisionState, id, err := c.getDisk(diskName)
|
||||
provisionState, id, err := c.getDisk(resourceGroup, diskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
@ -99,10 +110,15 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
//DeleteManagedDisk : delete managed disk
|
||||
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
diskName := path.Base(diskURI)
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
_, err = c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -115,15 +131,73 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
}
|
||||
|
||||
// return: disk provisionState, diskID, error
|
||||
func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) {
|
||||
result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName)
|
||||
func (c *ManagedDiskController) getDisk(resourceGroup, diskName string) (string, string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if result.Properties != nil && (*result.Properties).ProvisioningState != nil {
|
||||
return *(*result.Properties).ProvisioningState, *result.ID, nil
|
||||
if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil {
|
||||
return *(*result.DiskProperties).ProvisioningState, *result.ID, nil
|
||||
}
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// ResizeDisk Expand the disk to new size
|
||||
func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
diskName := path.Base(diskURI)
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil {
|
||||
return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName)
|
||||
}
|
||||
|
||||
requestBytes := newSize.Value()
|
||||
// Azure resizes in chunks of GiB (not GB)
|
||||
requestGiB := int32(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
||||
|
||||
glog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
|
||||
// If disk already of greater or equal size than requested we return
|
||||
if *result.DiskProperties.DiskSizeGB >= requestGiB {
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
result.DiskProperties.DiskSizeGB = &requestGiB
|
||||
|
||||
ctx, cancel = getContextWithCancel()
|
||||
defer cancel()
|
||||
if _, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
|
||||
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
// get resource group name from a managed disk URI, e.g. return {group-name} according to
|
||||
// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}
|
||||
// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get
|
||||
func getResourceGroupFromDiskURI(diskURI string) (string, error) {
|
||||
fields := strings.Split(diskURI, "/")
|
||||
if len(fields) != 9 || fields[3] != "resourceGroups" {
|
||||
return "", fmt.Errorf("invalid disk URI: %s", diskURI)
|
||||
}
|
||||
return fields[4], nil
|
||||
}
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -30,7 +30,7 @@ import (
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
||||
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
}
|
||||
@ -50,7 +50,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.AddressPrefix
|
||||
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
|
||||
glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
@ -60,13 +60,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(10).Info("list: FINISH")
|
||||
glog.V(10).Info("ListRoutes: FINISH")
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
return nil
|
||||
@ -81,17 +81,17 @@ func (az *Cloud) createRouteTable() error {
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -107,11 +107,11 @@ func (az *Cloud) createRouteTable() error {
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
}
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
targetIP, _, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,51 +126,51 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) {
|
||||
glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.DeleteRouteWithRetry(routeName)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("delete abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
@ -94,10 +94,8 @@ func TestCreateRoute(t *testing.T) {
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
|
||||
nodeIP := "2.4.6.8"
|
||||
fakeVM.NodeToIP = map[string]map[string]string{
|
||||
"": {
|
||||
"node": nodeIP,
|
||||
},
|
||||
fakeVM.NodeToIP = map[string]string{
|
||||
"node": nodeIP,
|
||||
}
|
||||
|
||||
err := cloud.CreateRoute(context.TODO(), "cluster", "unused", &route)
|
||||
|
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
@ -28,8 +28,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -52,13 +52,15 @@ const (
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
nicFailedState = "Failed"
|
||||
|
||||
storageAccountNameMaxLength = 24
|
||||
)
|
||||
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
||||
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
@ -123,7 +125,7 @@ func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) {
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
@ -172,7 +174,7 @@ func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.Transpor
|
||||
securityProto = network.SecurityRuleProtocolUDP
|
||||
return &transportProto, &securityProto, nil, nil
|
||||
default:
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers")
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers")
|
||||
}
|
||||
|
||||
}
|
||||
@ -284,11 +286,11 @@ outer:
|
||||
return smallest, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
|
||||
return -1, fmt.Errorf("securityGroup priorities are exhausted")
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
|
||||
return az.vmSet.GetIPByNodeName(string(nodeName), "")
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
||||
return az.vmSet.GetIPByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
var polyTable = crc32.MakeTable(crc32.Koopman)
|
||||
@ -366,12 +368,15 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
var err error
|
||||
|
||||
machine, err = as.getVirtualMachine(types.NodeName(name))
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
@ -396,7 +401,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -424,21 +429,37 @@ func (as *availabilitySet) GetPrimaryVMSetName() string {
|
||||
return as.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (as *availabilitySet) GetIPByNodeName(name, vmSetName string) (string, error) {
|
||||
nic, err := as.GetPrimaryInterface(name, vmSetName)
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) {
|
||||
nic, err := as.GetPrimaryInterface(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", err
|
||||
glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
privateIP := *ipConfig.PrivateIPAddress
|
||||
publicIP := ""
|
||||
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
|
||||
pipID := *ipConfig.PublicIPAddress.ID
|
||||
pipName, err := getLastSegment(pipID)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
|
||||
}
|
||||
pip, existsPip, err := as.getPublicIPAddress(as.ResourceGroup, pipName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if existsPip {
|
||||
publicIP = *pip.IPAddress
|
||||
}
|
||||
}
|
||||
|
||||
return privateIP, publicIP, nil
|
||||
}
|
||||
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
@ -534,8 +555,13 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
// GetPrimaryInterface gets machine primary network interface by node name.
|
||||
func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
|
||||
}
|
||||
|
||||
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
|
||||
@ -553,8 +579,14 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set
|
||||
if vmSetName != "" {
|
||||
// Check availability set name. Note that vmSetName is empty string when getting
|
||||
// the Node's IP address. While vmSetName is not empty, it should be checked with
|
||||
// Node's real availability set name:
|
||||
// - For basic SKU load balancer, errNotInVMSet should be returned if the node's
|
||||
// availability set is mismatched with vmSetName.
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !as.useStandardLoadBalancer() {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
@ -563,7 +595,9 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw
|
||||
}
|
||||
}
|
||||
|
||||
nic, err := as.InterfacesClient.Get(as.ResourceGroup, nicName, "")
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := as.InterfacesClient.Get(ctx, as.ResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
@ -573,9 +607,9 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw
|
||||
|
||||
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string) error {
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nic, err := as.GetPrimaryInterface(vmName, vmSetName)
|
||||
nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
|
||||
if err != nil {
|
||||
if err == errNotInVMSet {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
|
||||
@ -586,6 +620,11 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
return err
|
||||
}
|
||||
|
||||
if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because its primdary nic %s is in Failed state", nodeName, nic.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
@ -604,6 +643,24 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
if as.useStandardLoadBalancer() && len(newBackendPools) > 0 {
|
||||
// Although standard load balancer supports backends from multiple availability
|
||||
// sets, the same network interface couldn't be added to more than one load balancer of
|
||||
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
|
||||
// about this.
|
||||
for _, pool := range newBackendPools {
|
||||
backendPool := *pool.ID
|
||||
matches := backendPoolIDRE.FindStringSubmatch(backendPool)
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
@ -613,11 +670,11 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
|
||||
nicName := *nic.Name
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
respChan, errChan := as.InterfacesClient.CreateOrUpdate(as.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
if retryErr != nil {
|
||||
@ -634,18 +691,23 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
hostUpdates := make([]func() error, len(nodes))
|
||||
for i, node := range nodes {
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
hostUpdates := make([]func() error, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
|
||||
continue
|
||||
}
|
||||
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName)
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates[i] = f
|
||||
hostUpdates = append(hostUpdates, f)
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
@ -657,7 +719,7 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
// Do nothing for availability set.
|
||||
return nil
|
||||
}
|
||||
|
87
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
87
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
@ -130,6 +130,7 @@ func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
cases := []struct {
|
||||
description string
|
||||
lbName string
|
||||
useStandardLB bool
|
||||
clusterName string
|
||||
expectedVMSet string
|
||||
}{
|
||||
@ -160,7 +161,93 @@ func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if c.useStandardLB {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
vmset := az.mapLoadBalancerNameToVMSet(c.lbName, c.clusterName)
|
||||
assert.Equal(t, c.expectedVMSet, vmset, c.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancerName(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
cases := []struct {
|
||||
description string
|
||||
vmSet string
|
||||
isInternal bool
|
||||
useStandardLB bool
|
||||
clusterName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
description: "default external LB should get primary vmset",
|
||||
vmSet: "primary",
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "default internal LB should get primary vmset",
|
||||
vmSet: "primary",
|
||||
clusterName: "azure",
|
||||
isInternal: true,
|
||||
expected: "azure-internal",
|
||||
},
|
||||
{
|
||||
description: "non-default external LB should get its own vmset",
|
||||
vmSet: "as",
|
||||
clusterName: "azure",
|
||||
expected: "as",
|
||||
},
|
||||
{
|
||||
description: "non-default internal LB should get its own vmset",
|
||||
vmSet: "as",
|
||||
clusterName: "azure",
|
||||
isInternal: true,
|
||||
expected: "as-internal",
|
||||
},
|
||||
{
|
||||
description: "default standard external LB should get cluster name",
|
||||
vmSet: "primary",
|
||||
useStandardLB: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "default standard internal LB should get cluster name",
|
||||
vmSet: "primary",
|
||||
useStandardLB: true,
|
||||
isInternal: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure-internal",
|
||||
},
|
||||
{
|
||||
description: "non-default standard external LB should get cluster-name",
|
||||
vmSet: "as",
|
||||
useStandardLB: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "non-default standard internal LB should get cluster-name",
|
||||
vmSet: "as",
|
||||
useStandardLB: true,
|
||||
isInternal: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure-internal",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if c.useStandardLB {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
loadbalancerName := az.getLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
|
||||
assert.Equal(t, c.expected, loadbalancerName, c.description)
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
@ -19,7 +19,7 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
@ -19,7 +19,7 @@ package azure
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
)
|
||||
|
||||
func TestCreateFileShare(t *testing.T) {
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -31,7 +31,9 @@ type accountWithLocation struct {
|
||||
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -60,7 +62,10 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := az.StorageAccountClient.ListKeys(ctx, az.ResourceGroup, account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -107,13 +112,16 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
|
||||
Kind: storage.StorageV2,
|
||||
AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{EnableHTTPSTrafficOnly: to.BoolPtr(true)},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel)
|
||||
err := <-errchan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := az.StorageAccountClient.Create(ctx, az.ResourceGroup, accountName, cp)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
)
|
||||
|
||||
func TestGetStorageAccessKeys(t *testing.T) {
|
||||
|
179
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
179
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -34,13 +35,88 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var testClusterName = "testCluster"
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
azureConfig := `{
|
||||
"aadClientCertPassword": "aadClientCertPassword",
|
||||
"aadClientCertPath": "aadClientCertPath",
|
||||
"aadClientId": "aadClientId",
|
||||
"aadClientSecret": "aadClientSecret",
|
||||
"cloud":"AzurePublicCloud",
|
||||
"cloudProviderBackoff": true,
|
||||
"cloudProviderBackoffDuration": 1,
|
||||
"cloudProviderBackoffExponent": 1,
|
||||
"cloudProviderBackoffJitter": 1,
|
||||
"cloudProviderBackoffRetries": 1,
|
||||
"cloudProviderRatelimit": true,
|
||||
"cloudProviderRateLimitBucket": 1,
|
||||
"CloudProviderRateLimitBucketWrite": 1,
|
||||
"cloudProviderRateLimitQPS": 1,
|
||||
"CloudProviderRateLimitQPSWrite": 1,
|
||||
"location": "location",
|
||||
"maximumLoadBalancerRuleCount": 1,
|
||||
"primaryAvailabilitySetName": "primaryAvailabilitySetName",
|
||||
"primaryScaleSetName": "primaryScaleSetName",
|
||||
"resourceGroup": "resourceGroup",
|
||||
"routeTableName": "routeTableName",
|
||||
"securityGroupName": "securityGroupName",
|
||||
"subnetName": "subnetName",
|
||||
"subscriptionId": "subscriptionId",
|
||||
"tenantId": "tenantId",
|
||||
"useInstanceMetadata": true,
|
||||
"useManagedIdentityExtension": true,
|
||||
"vnetName": "vnetName",
|
||||
"vnetResourceGroup": "vnetResourceGroup",
|
||||
vmType: "standard"
|
||||
}`
|
||||
expected := &Config{
|
||||
AzureAuthConfig: auth.AzureAuthConfig{
|
||||
AADClientCertPassword: "aadClientCertPassword",
|
||||
AADClientCertPath: "aadClientCertPath",
|
||||
AADClientID: "aadClientId",
|
||||
AADClientSecret: "aadClientSecret",
|
||||
Cloud: "AzurePublicCloud",
|
||||
SubscriptionID: "subscriptionId",
|
||||
TenantID: "tenantId",
|
||||
UseManagedIdentityExtension: true,
|
||||
},
|
||||
CloudProviderBackoff: true,
|
||||
CloudProviderBackoffDuration: 1,
|
||||
CloudProviderBackoffExponent: 1,
|
||||
CloudProviderBackoffJitter: 1,
|
||||
CloudProviderBackoffRetries: 1,
|
||||
CloudProviderRateLimit: true,
|
||||
CloudProviderRateLimitBucket: 1,
|
||||
CloudProviderRateLimitBucketWrite: 1,
|
||||
CloudProviderRateLimitQPS: 1,
|
||||
CloudProviderRateLimitQPSWrite: 1,
|
||||
Location: "location",
|
||||
MaximumLoadBalancerRuleCount: 1,
|
||||
PrimaryAvailabilitySetName: "primaryAvailabilitySetName",
|
||||
PrimaryScaleSetName: "primaryScaleSetName",
|
||||
ResourceGroup: "resourceGroup",
|
||||
RouteTableName: "routeTableName",
|
||||
SecurityGroupName: "securityGroupName",
|
||||
SubnetName: "subnetName",
|
||||
UseInstanceMetadata: true,
|
||||
VMType: "standard",
|
||||
VnetName: "vnetName",
|
||||
VnetResourceGroup: "vnetResourceGroup",
|
||||
}
|
||||
|
||||
buffer := bytes.NewBufferString(azureConfig)
|
||||
config, err := parseConfig(buffer)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, config)
|
||||
}
|
||||
|
||||
// Test flipServiceInternalAnnotation
|
||||
func TestFlipServiceInternalAnnotation(t *testing.T) {
|
||||
svc := getTestService("servicea", v1.ProtocolTCP, 80)
|
||||
@ -139,9 +215,11 @@ func testLoadBalancerServiceDefaultModeSelection(t *testing.T, isInternal bool)
|
||||
expectedLBName = testClusterName + "-internal"
|
||||
}
|
||||
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lb := (*result.Value)[0]
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lb := result[0]
|
||||
lbCount := len(result)
|
||||
expectedNumOfLB := 1
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
@ -189,15 +267,17 @@ func testLoadBalancerServiceAutoModeSelection(t *testing.T, isInternal bool) {
|
||||
|
||||
// expected is MIN(index, availabilitySetCount)
|
||||
expectedNumOfLB := int(math.Min(float64(index), float64(availabilitySetCount)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
|
||||
maxRules := 0
|
||||
minRules := serviceCount
|
||||
for _, lb := range *result.Value {
|
||||
for _, lb := range result {
|
||||
ruleCount := len(*lb.LoadBalancingRules)
|
||||
if ruleCount < minRules {
|
||||
minRules = ruleCount
|
||||
@ -252,8 +332,10 @@ func testLoadBalancerServicesSpecifiedSelection(t *testing.T, isInternal bool) {
|
||||
|
||||
// expected is MIN(index, 2)
|
||||
expectedNumOfLB := int(math.Min(float64(index), float64(2)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
@ -290,8 +372,10 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) {
|
||||
|
||||
// expected is MIN(index, az.Config.MaximumLoadBalancerRuleCount)
|
||||
expectedNumOfLBRules := int(math.Min(float64(index), float64(az.Config.MaximumLoadBalancerRuleCount)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLBRules {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLBRules, lbCount)
|
||||
}
|
||||
@ -360,8 +444,10 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo
|
||||
|
||||
// expected is MIN(index, availabilitySetCount)
|
||||
expectedNumOfLB := int(math.Min(float64(index), float64(availabilitySetCount)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
@ -864,6 +950,7 @@ func getTestCloud() (az *Cloud) {
|
||||
RouteTableName: "rt",
|
||||
PrimaryAvailabilitySetName: "as",
|
||||
MaximumLoadBalancerRuleCount: 250,
|
||||
VMType: vmTypeStandard,
|
||||
},
|
||||
}
|
||||
az.DisksClient = newFakeDisksClient()
|
||||
@ -969,7 +1056,9 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
|
||||
},
|
||||
},
|
||||
}
|
||||
az.InterfacesClient.CreateOrUpdate(az.Config.ResourceGroup, nicName, newNIC, nil)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
az.InterfacesClient.CreateOrUpdate(ctx, az.Config.ResourceGroup, nicName, newNIC)
|
||||
|
||||
// create vm
|
||||
asID := az.getAvailabilitySetID(asName)
|
||||
@ -990,8 +1079,10 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
|
||||
},
|
||||
}
|
||||
|
||||
_, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.Config.ResourceGroup, vmName, newVM, nil)
|
||||
if err := <-errChan; err != nil {
|
||||
vmCtx, vmCancel := getContextWithCancel()
|
||||
defer vmCancel()
|
||||
_, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM)
|
||||
if err != nil {
|
||||
}
|
||||
// add to kubernetes
|
||||
newNode := &v1.Node{
|
||||
@ -1087,11 +1178,13 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
az.SecurityGroupsClient.CreateOrUpdate(
|
||||
ctx,
|
||||
az.ResourceGroup,
|
||||
az.SecurityGroupName,
|
||||
sg,
|
||||
nil)
|
||||
sg)
|
||||
|
||||
return &sg
|
||||
}
|
||||
@ -1237,12 +1330,12 @@ func validatePublicIP(t *testing.T, publicIP *network.PublicIPAddress, service *
|
||||
t.Errorf("Expected publicIP resource exists, when it is not an internal service")
|
||||
}
|
||||
|
||||
if publicIP.Tags == nil || (*publicIP.Tags)["service"] == nil {
|
||||
if publicIP.Tags == nil || publicIP.Tags["service"] == nil {
|
||||
t.Errorf("Expected publicIP resource has tags[service]")
|
||||
}
|
||||
|
||||
serviceName := getServiceName(service)
|
||||
if serviceName != *(*publicIP.Tags)["service"] {
|
||||
if serviceName != *(publicIP.Tags["service"]) {
|
||||
t.Errorf("Expected publicIP resource has matching tags[service]")
|
||||
}
|
||||
// We cannot use service.Spec.LoadBalancerIP to compare with
|
||||
@ -1765,13 +1858,15 @@ func addTestSubnet(t *testing.T, az *Cloud, svc *v1.Service) {
|
||||
az.VnetName,
|
||||
subName)
|
||||
|
||||
_, errChan := az.SubnetsClient.CreateOrUpdate(az.VnetResourceGroup, az.VnetName, subName,
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := az.SubnetsClient.CreateOrUpdate(ctx, az.VnetResourceGroup, az.VnetName, subName,
|
||||
network.Subnet{
|
||||
ID: &subnetID,
|
||||
Name: &subName,
|
||||
}, nil)
|
||||
})
|
||||
|
||||
if err := <-errChan; err != nil {
|
||||
if err != nil {
|
||||
t.Errorf("Subnet cannot be created or update, %v", err)
|
||||
}
|
||||
svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = subName
|
||||
@ -2604,3 +2699,39 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
|
||||
// func TestIfServiceIsEditedFromSharedRuleToOwnRuleThenItIsRemovedFromSharedRuleAndOwnRuleIsCreated(t *testing.T) {
|
||||
// t.Error()
|
||||
// }
|
||||
|
||||
func TestGetResourceGroupFromDiskURI(t *testing.T) {
|
||||
tests := []struct {
|
||||
diskURL string
|
||||
expectedResult string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
diskURL: "/subscriptions/4be8920b-2978-43d7-axyz-04d8549c1d05/resourceGroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
|
||||
expectedResult: "azure-k8s1102",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
diskURL: "/4be8920b-2978-43d7-axyz-04d8549c1d05/resourceGroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
|
||||
expectedResult: "",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
diskURL: "",
|
||||
expectedResult: "",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result, err := getResourceGroupFromDiskURI(test.diskURL)
|
||||
assert.Equal(t, result, test.expectedResult, "Expect result not equal with getResourceGroupFromDiskURI(%s) return: %q, expected: %q",
|
||||
test.diskURL, result, test.expectedResult)
|
||||
|
||||
if test.expectError {
|
||||
assert.NotNil(t, err, "Expect error during getResourceGroupFromDiskURI(%s)", test.diskURL)
|
||||
} else {
|
||||
assert.Nil(t, err, "Expect error is nil during getResourceGroupFromDiskURI(%s)", test.diskURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -34,10 +34,10 @@ type VMSet interface {
|
||||
GetInstanceIDByNodeName(name string) (string, error)
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
GetInstanceTypeByNodeName(name string) (string, error)
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
GetIPByNodeName(name, vmSetName string) (string, error)
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error)
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
GetIPByNodeName(name string) (string, string, error)
|
||||
// GetPrimaryInterface gets machine primary network interface by node name.
|
||||
GetPrimaryInterface(nodeName string) (network.Interface, error)
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
GetNodeNameByProviderID(providerID string) (types.NodeName, error)
|
||||
|
||||
@ -54,18 +54,14 @@ type VMSet interface {
|
||||
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string) error
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error)
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
|
||||
}
|
||||
|
294
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
294
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
@ -24,13 +24,14 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
@ -90,7 +91,7 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
instanceID, err = getScaleSetVMInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
@ -116,12 +117,12 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
if err != nil {
|
||||
@ -133,7 +134,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm c
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
@ -161,7 +162,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is not part of providerID for vmss instances.
|
||||
scaleSetName, err := extractScaleSetNameByExternalID(providerID)
|
||||
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
@ -243,26 +244,28 @@ func (ss *scaleSet) GetPrimaryVMSetName() string {
|
||||
return ss.Config.PrimaryScaleSetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName, vmSetName string) (string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName, vmSetName)
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet,
|
||||
// fix this after it is supported.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q, %q), err=%v", nodeName, nodeName, vmSetName, err)
|
||||
return "", err
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
return targetIP, "", nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine computepreview.VirtualMachineScaleSetVM) (string, error) {
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
@ -293,9 +296,9 @@ func getScaleSetVMInstanceID(machineName string) (string, error) {
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByExternalID extracts the scaleset name by node's externalID.
|
||||
func extractScaleSetNameByExternalID(externalID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(externalID)
|
||||
// extractScaleSetNameByProviderID extracts the scaleset name by node's ProviderID.
|
||||
func extractScaleSetNameByProviderID(providerID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
@ -324,12 +327,12 @@ func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]computepreview.VirtualMachineScaleSetVM, error) {
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]compute.VirtualMachineScaleSetVM, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(computepreview.InstanceView))
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView))
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
@ -415,7 +418,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
@ -423,7 +426,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName, "")
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
||||
}
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
@ -432,11 +435,6 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check scale set name.
|
||||
if vmSetName != "" && !strings.EqualFold(ssName, vmSetName) {
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
@ -449,7 +447,9 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
@ -465,8 +465,8 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMachineScaleSet, bool, error) {
|
||||
var result computepreview.VirtualMachineScaleSet
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
var result compute.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
@ -479,7 +479,7 @@ func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMac
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
result = *(cached.(*computepreview.VirtualMachineScaleSet))
|
||||
result = *(cached.(*compute.VirtualMachineScaleSet))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
@ -489,7 +489,7 @@ func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMac
|
||||
}
|
||||
|
||||
// getPrimaryNetworkConfiguration gets primary network interface configuration for scale sets.
|
||||
func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetNetworkConfiguration, error) {
|
||||
func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
|
||||
networkConfigurations := *networkConfigurationList
|
||||
if len(networkConfigurations) == 1 {
|
||||
return &networkConfigurations[0], nil
|
||||
@ -505,7 +505,7 @@ func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]c
|
||||
return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetIPConfiguration, error) {
|
||||
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
|
||||
ipConfigurations := *config.IPConfigurations
|
||||
if len(ipConfigurations) == 1 {
|
||||
return &ipConfigurations[0], nil
|
||||
@ -522,7 +522,7 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *computepreview.Virtual
|
||||
}
|
||||
|
||||
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet computepreview.VirtualMachineScaleSet) error {
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -533,7 +533,7 @@ func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet computepr
|
||||
}
|
||||
|
||||
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -543,9 +543,44 @@ func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstance
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
// getNodesScaleSets returns scalesets with instanceIDs and standard node names for given nodes.
|
||||
func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, []*v1.Node, error) {
|
||||
scalesets := make(map[string]sets.String)
|
||||
standardNodes := []*v1.Node{}
|
||||
|
||||
for _, curNode := range nodes {
|
||||
if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
|
||||
standardNodes = append(standardNodes, curNode)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := scalesets[curScaleSetName]; !ok {
|
||||
scalesets[curScaleSetName] = sets.NewString()
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
scalesets[curScaleSetName].Insert(instanceID)
|
||||
}
|
||||
|
||||
return scalesets, standardNodes, nil
|
||||
}
|
||||
|
||||
// ensureHostsInVMSetPool ensures the given Node's primary IP configurations are
|
||||
// participating in the vmSet's LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
|
||||
glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
@ -572,7 +607,7 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
|
||||
// Update primary IP configuration's LoadBalancerBackendAddressPools.
|
||||
foundPool := false
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
newBackendPools := []compute.SubResource{}
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
}
|
||||
@ -583,8 +618,26 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
if ss.useStandardLoadBalancer() && len(newBackendPools) > 0 {
|
||||
// Although standard load balancer supports backends from multiple vmss,
|
||||
// the same network interface couldn't be added to more than one load balancer of
|
||||
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
|
||||
// about this.
|
||||
for _, pool := range newBackendPools {
|
||||
backendPool := *pool.ID
|
||||
matches := backendPoolIDRE.FindStringSubmatch(backendPool)
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newBackendPools = append(newBackendPools,
|
||||
computepreview.SubResource{
|
||||
compute.SubResource{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
@ -607,30 +660,8 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
}
|
||||
}
|
||||
|
||||
// Construct instanceIDs from nodes.
|
||||
instanceIDs := []string{}
|
||||
for _, curNode := range nodes {
|
||||
curScaleSetName, err := extractScaleSetNameByExternalID(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, omitting it", curNode.Name)
|
||||
continue
|
||||
}
|
||||
if curScaleSetName != vmSetName {
|
||||
glog.V(4).Infof("Node %q is not belonging to scale set %q, omitting it", curNode.Name, vmSetName)
|
||||
continue
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get last segment from %q: %v", curNode.Spec.ExternalID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
instanceIDs = append(instanceIDs, instanceID)
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
@ -652,27 +683,68 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
scalesets, standardNodes, err := ss.getNodesScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.EnsureBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, vmSetName, vmSetName, err)
|
||||
glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
for ssName, instanceIDs := range scalesets {
|
||||
// Only add nodes belonging to specified vmSet for basic SKU LB.
|
||||
if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if instanceIDs.Len() == 0 {
|
||||
// This may happen when scaling a vmss capacity to 0.
|
||||
glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName)
|
||||
// InstanceIDs is required to update vmss, use * instead here since there are no nodes actually.
|
||||
instanceIDs.Insert("*")
|
||||
}
|
||||
|
||||
err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ss.useStandardLoadBalancer() && len(standardNodes) > 0 {
|
||||
err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset.
|
||||
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error {
|
||||
glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("ss.EnsureBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, vmSetName, vmSetName)
|
||||
glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName)
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, ssName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName)
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, ssName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -682,12 +754,12 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
return nil
|
||||
}
|
||||
existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
newBackendPools := []compute.SubResource{}
|
||||
foundPool := false
|
||||
for i := len(existingBackendPools) - 1; i >= 0; i-- {
|
||||
curPool := existingBackendPools[i]
|
||||
if strings.EqualFold(poolID, *curPool.ID) {
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, vmSetName)
|
||||
glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName)
|
||||
foundPool = true
|
||||
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
|
||||
}
|
||||
@ -699,17 +771,17 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
|
||||
// Update scale set with backoff.
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", vmSetName)
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -718,17 +790,19 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
instanceIDs := []string{"*"}
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", vmSetName)
|
||||
instanceCtx, instanceCancel := getContextWithCancel()
|
||||
defer instanceCancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", vmSetName)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -738,14 +812,16 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
// Update virtualMachineScaleSet again. This is a workaround for removing VMSS reference from LB.
|
||||
// TODO: remove this workaround when figuring out the root cause.
|
||||
if len(newBackendPools) == 0 {
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", vmSetName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
updateCtx, updateCancel := getContextWithCancel()
|
||||
defer updateCancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -753,6 +829,48 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
if backendAddressPools == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scalesets := sets.NewString()
|
||||
for _, backendPool := range *backendAddressPools {
|
||||
if strings.EqualFold(*backendPool.ID, poolID) && backendPool.BackendIPConfigurations != nil {
|
||||
for _, ipConfigurations := range *backendPool.BackendIPConfigurations {
|
||||
if ipConfigurations.ID == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
|
||||
continue
|
||||
}
|
||||
|
||||
scalesets.Insert(ssName)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for ssName := range scalesets {
|
||||
// Only remove nodes belonging to specified vmSet to basic LB backends.
|
||||
if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) {
|
||||
continue
|
||||
}
|
||||
|
||||
err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf(
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
@ -47,13 +47,19 @@ func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
}
|
||||
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
ret := strings.Split(name, vmssNameSeparator)
|
||||
if len(ret) != 2 {
|
||||
split := strings.SplitAfter(name, vmssNameSeparator)
|
||||
if len(split) < 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return ret[0], ret[1], nil
|
||||
ssName := strings.Join(split[0:len(split)-1], "")
|
||||
// removing the trailing `vmssNameSeparator` since we used SplitAfter
|
||||
ssName = ssName[:len(ssName)-1]
|
||||
|
||||
instanceID := split[len(split)-1]
|
||||
|
||||
return ssName, instanceID, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
@ -61,12 +67,13 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -141,12 +148,13 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
@ -46,6 +46,12 @@ func TestExtractVmssVMName(t *testing.T) {
|
||||
expectedScaleSet: "vm",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
{
|
||||
description: "correct vmss VM name with Extra Separator should return correct scaleSet and instanceID",
|
||||
vmName: "vm_test_1234",
|
||||
expectedScaleSet: "vm_test",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -37,8 +37,8 @@ func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
|
||||
scaleSets := make(map[string]map[string]computepreview.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]computepreview.VirtualMachineScaleSet{
|
||||
scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{
|
||||
scaleSetName: {
|
||||
Name: &scaleSetName,
|
||||
},
|
||||
@ -46,24 +46,24 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
||||
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
|
||||
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
ssVMs := make(map[string]map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM)
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
networkInterfaces := []computepreview.NetworkInterfaceReference{
|
||||
networkInterfaces := []compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmName] = computepreview.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &computepreview.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &computepreview.OSProfile{
|
||||
ssVMs["rg"][vmName] = compute.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
},
|
||||
NetworkProfile: &computepreview.NetworkProfile{
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &networkInterfaces,
|
||||
},
|
||||
},
|
||||
|
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
@ -19,11 +19,13 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
@ -39,18 +41,18 @@ var (
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
// false if error is an autorest.Error with StatusCode=404 and will return the
|
||||
// error back if error is another status code or another type of error.
|
||||
func checkResourceExistsFromError(err error) (bool, error) {
|
||||
func checkResourceExistsFromError(err error) (bool, string, error) {
|
||||
if err == nil {
|
||||
return true, nil
|
||||
return true, "", nil
|
||||
}
|
||||
v, ok := err.(autorest.DetailedError)
|
||||
if !ok {
|
||||
return false, err
|
||||
return false, "", err
|
||||
}
|
||||
if v.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
return false, err.Error(), nil
|
||||
}
|
||||
return false, v
|
||||
return false, "", v
|
||||
}
|
||||
|
||||
// If it is StatusNotFound return nil,
|
||||
@ -103,13 +105,17 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi
|
||||
}
|
||||
|
||||
var realErr error
|
||||
pip, err = az.PublicIPAddressesClient.Get(resourceGroup, pipName, "")
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
var message string
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
pip, err = az.PublicIPAddressesClient.Get(ctx, resourceGroup, pipName, "")
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return pip, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
|
||||
return pip, false, nil
|
||||
}
|
||||
|
||||
@ -118,6 +124,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi
|
||||
|
||||
func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) {
|
||||
var realErr error
|
||||
var message string
|
||||
var rg string
|
||||
|
||||
if len(az.VnetResourceGroup) > 0 {
|
||||
@ -126,13 +133,16 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
rg = az.ResourceGroup
|
||||
}
|
||||
|
||||
subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "")
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
subnet, err = az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "")
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return subnet, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
|
||||
return subnet, false, nil
|
||||
}
|
||||
|
||||
@ -153,6 +163,10 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) {
|
||||
if az.SecurityGroupName == "" {
|
||||
return nsg, fmt.Errorf("securityGroupName is not configured")
|
||||
}
|
||||
|
||||
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName)
|
||||
if err != nil {
|
||||
return nsg, err
|
||||
@ -173,13 +187,16 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
// case we do get instance view every time to fulfill the azure_zones requirement without hitting
|
||||
// throttling.
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, key, compute.InstanceView)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -191,13 +208,17 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
|
||||
func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
lb, err := az.LoadBalancerClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
lb, err := az.LoadBalancerClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -209,13 +230,16 @@ func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
|
||||
func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
nsg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nsg, err := az.SecurityGroupsClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Security group %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -227,13 +251,16 @@ func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
|
||||
func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
rt, err := az.RouteTablesClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
rt, err := az.RouteTablesClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Route table %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -242,3 +269,11 @@ func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
|
||||
return newTimedcache(rtCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) useStandardLoadBalancer() bool {
|
||||
return strings.EqualFold(az.LoadBalancerSku, loadBalancerSkuStandard)
|
||||
}
|
||||
|
||||
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
|
||||
return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
@ -42,7 +42,7 @@ func TestExtractNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
exists, err := checkResourceExistsFromError(test.err)
|
||||
exists, _, err := checkResourceExistsFromError(test.err)
|
||||
if test.exists != exists {
|
||||
t.Errorf("expected: %v, saw: %v", test.exists, exists)
|
||||
}
|
||||
|
Reference in New Issue
Block a user