Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -7,19 +7,8 @@ load(
go_library(
name = "go_default_library",
srcs = [
"cloud.go",
"doc.go",
"plugins.go",
],
srcs = ["doc.go"],
importpath = "k8s.io/kubernetes/pkg/cloudprovider",
deps = [
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
],
)
filegroup(

View File

@ -2,6 +2,7 @@ approvers:
- mikedanese
- dims
- wlan0
- andrewsykim
reviewers:
- thockin
- lavalamp
@ -42,3 +43,5 @@ reviewers:
- wlan0
- cheftako
- andrewsykim
labels:
- sig/cloud-provider

View File

@ -1,16 +1,18 @@
##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Cloud Providers in this directory will continue to be actively developed or maintained and supported at their current level of support as a longer-term solution evolves.
## Overview:
##### Deprecation Notice: cloud providers in this directory are deprecated and will be removed in favor of external (a.k.a out-of-tree) providers. Existing providers in this directory (a.k.a in-tree providers) should only make small incremental changes as needed and avoid large refactors or new features. New providers seeking to support Kubernetes should follow the out-of-tree model as specified in the [Running Kubernetes Cloud Controller Manager docs](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/). For more information on the future of Kubernetes cloud providers see [KEP-0002](https://github.com/kubernetes/community/blob/master/keps/sig-cloud-provider/0002-cloud-controller-manager.md) and [KEP-0013](https://github.com/kubernetes/community/blob/master/keps/sig-cloud-provider/0013-build-deploy-ccm.md).
Cloud Providers in this directory will continue to be actively developed or maintained and supported at their current level of support as a longer-term solution evolves.
## Overview:
The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development.
#### Guidance for potential cloud providers:
* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md).
* In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information).
* Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers.
* It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved.
* Though the fully productized support envisioned in the proposal is still 2 - 3 releases out, the foundational work is underway, and a motivated cloud provider could accomplish the work in a forward-looking way. Contributors are encouraged to assist with the implementation of the design outlined in the proposal.
#### Some additional context on status / direction:
#### Guidance for potential cloud providers:
* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md).
* In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information).
* Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers.
* It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved.
* Though the fully productized support envisioned in the proposal is still 2 - 3 releases out, the foundational work is underway, and a motivated cloud provider could accomplish the work in a forward-looking way. Contributors are encouraged to assist with the implementation of the design outlined in the proposal.
#### Some additional context on status / direction:
* 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow.
* Setting cloud-provider=external allows for creation of a separate controller-manager binary
* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization.
* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization.

View File

@ -1,215 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import (
"context"
"errors"
"fmt"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/kubernetes/pkg/controller"
)
// Interface is an abstract, pluggable interface for cloud providers.
type Interface interface {
// Initialize provides the cloud with a kubernetes client builder and may spawn goroutines
// to perform housekeeping activities within the cloud provider.
Initialize(clientBuilder controller.ControllerClientBuilder)
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
LoadBalancer() (LoadBalancer, bool)
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
Instances() (Instances, bool)
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
Zones() (Zones, bool)
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
Clusters() (Clusters, bool)
// Routes returns a routes interface along with whether the interface is supported.
Routes() (Routes, bool)
// ProviderName returns the cloud provider ID.
ProviderName() string
// HasClusterID returns true if a ClusterID is required and set
HasClusterID() bool
}
type InformerUser interface {
// SetInformers sets the informer on the cloud object.
SetInformers(informerFactory informers.SharedInformerFactory)
}
// Clusters is an abstract, pluggable interface for clusters of containers.
type Clusters interface {
// ListClusters lists the names of the available clusters.
ListClusters(ctx context.Context) ([]string, error)
// Master gets back the address (either DNS name or IP address) of the master node for the cluster.
Master(ctx context.Context, clusterName string) (string, error)
}
// TODO(#6812): Use a shorter name that's less likely to be longer than cloud
// providers' name length limits.
func GetLoadBalancerName(service *v1.Service) string {
//GCE requires that the name of a load balancer starts with a lower case letter.
ret := "a" + string(service.UID)
ret = strings.Replace(ret, "-", "", -1)
//AWS requires that the name of a load balancer is shorter than 32 bytes.
if len(ret) > 32 {
ret = ret[:32]
}
return ret
}
// GetInstanceProviderID builds a ProviderID for a node in a cloud.
func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) {
instances, ok := cloud.Instances()
if !ok {
return "", fmt.Errorf("failed to get instances from cloud provider")
}
instanceID, err := instances.InstanceID(ctx, nodeName)
if err != nil {
return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
}
return cloud.ProviderName() + "://" + instanceID, nil
}
// LoadBalancer is an abstract, pluggable interface for load balancers.
type LoadBalancer interface {
// TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service
// GetLoadBalancer returns whether the specified load balancer exists, and
// if so, what its status is.
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
// Implementations must treat the *v1.Service and *v1.Node
// parameters as read-only and not modify them.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
// UpdateLoadBalancer updates hosts under the specified load balancer.
// Implementations must treat the *v1.Service and *v1.Node
// parameters as read-only and not modify them.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error
// EnsureLoadBalancerDeleted deletes the specified load balancer if it
// exists, returning nil if the load balancer specified either didn't exist or
// was successfully deleted.
// This construction is useful because many cloud providers' load balancers
// have multiple underlying components, meaning a Get could say that the LB
// doesn't exist even if some part of it is still laying around.
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error
}
// Instances is an abstract, pluggable interface for sets of instances.
type Instances interface {
// NodeAddresses returns the addresses of the specified instance.
// TODO(roberthbailey): This currently is only used in such a way that it
// returns the address of the calling instance. We should do a rename to
// make this clearer.
NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error)
// NodeAddressesByProviderID returns the addresses of the specified instance.
// The instance is specified using the providerID of the node. The
// ProviderID is a unique identifier of the node. This will not be called
// from the node whose nodeaddresses are being queried. i.e. local metadata
// services cannot be used in this method to obtain nodeaddresses
NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error)
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
InstanceID(ctx context.Context, nodeName types.NodeName) (string, error)
// InstanceType returns the type of the specified instance.
InstanceType(ctx context.Context, name types.NodeName) (string, error)
// InstanceTypeByProviderID returns the type of the specified instance.
InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error)
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error
// CurrentNodeName returns the name of the node we are currently running on
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error)
// InstanceExistsByProviderID returns true if the instance for the given provider id still is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error)
// InstanceShutdownByProviderID returns true if the instance is shutdown in cloudprovider
InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error)
}
// Route is a representation of an advanced routing rule.
type Route struct {
// Name is the name of the routing rule in the cloud-provider.
// It will be ignored in a Create (although nameHint may influence it)
Name string
// TargetNode is the NodeName of the target instance.
TargetNode types.NodeName
// DestinationCIDR is the CIDR format IP range that this routing rule
// applies to.
DestinationCIDR string
// Blackhole is set to true if this is a blackhole route
// The node controller will delete the route if it is in the managed range.
Blackhole bool
}
// Routes is an abstract, pluggable interface for advanced routing rules.
type Routes interface {
// ListRoutes lists all managed routes that belong to the specified clusterName
ListRoutes(ctx context.Context, clusterName string) ([]*Route, error)
// CreateRoute creates the described managed route
// route.Name will be ignored, although the cloud-provider may use nameHint
// to create a more user-meaningful name.
CreateRoute(ctx context.Context, clusterName string, nameHint string, route *Route) error
// DeleteRoute deletes the specified managed route
// Route should be as returned by ListRoutes
DeleteRoute(ctx context.Context, clusterName string, route *Route) error
}
var (
InstanceNotFound = errors.New("instance not found")
DiskNotFound = errors.New("disk is not found")
NotImplemented = errors.New("unimplemented")
)
// Zone represents the location of a particular machine.
type Zone struct {
FailureDomain string
Region string
}
// Zones is an abstract, pluggable interface for zone enumeration.
type Zones interface {
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
// In most cases, this method is called from the kubelet querying a local metadata service to acquire its zone.
// For the case of external cloud providers, use GetZoneByProviderID or GetZoneByNodeName since GetZone
// can no longer be called from the kubelets.
GetZone(ctx context.Context) (Zone, error)
// GetZoneByProviderID returns the Zone containing the current zone and locality region of the node specified by providerId
// This method is particularly used in the context of external cloud providers where node initialization must be down
// outside the kubelets.
GetZoneByProviderID(ctx context.Context, providerID string) (Zone, error)
// GetZoneByNodeName returns the Zone containing the current zone and locality region of the node specified by node name
// This method is particularly used in the context of external cloud providers where node initialization must be down
// outside the kubelets.
GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (Zone, error)
}
// PVLabeler is an abstract, pluggable interface for fetching labels for volumes
type PVLabeler interface {
GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error)
}

View File

@ -15,4 +15,5 @@ limitations under the License.
*/
// Package cloudprovider supplies interfaces and implementations for cloud service providers.
package cloudprovider // import "k8s.io/kubernetes/pkg/cloudprovider"
// To use: import cloudprovider "k8s.io/cloud-provider"
package cloudprovider

View File

@ -1,142 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import (
"fmt"
"io"
"os"
"sync"
"github.com/golang/glog"
)
// Factory is a function that returns a cloudprovider.Interface.
// The config parameter provides an io.Reader handler to the factory in
// order to load specific configurations. If no configuration is provided
// the parameter is nil.
type Factory func(config io.Reader) (Interface, error)
// All registered cloud providers.
var (
providersMutex sync.Mutex
providers = make(map[string]Factory)
deprecatedCloudProviders = []struct {
name string
external bool
detail string
}{
{"openstack", true, "https://github.com/kubernetes/cloud-provider-openstack"},
{"photon", false, "The Photon Controller project is no longer maintained."},
}
)
const externalCloudProvider = "external"
// RegisterCloudProvider registers a cloudprovider.Factory by name. This
// is expected to happen during app startup.
func RegisterCloudProvider(name string, cloud Factory) {
providersMutex.Lock()
defer providersMutex.Unlock()
if _, found := providers[name]; found {
glog.Fatalf("Cloud provider %q was registered twice", name)
}
glog.V(1).Infof("Registered cloud provider %q", name)
providers[name] = cloud
}
// IsCloudProvider returns true if name corresponds to an already registered
// cloud provider.
func IsCloudProvider(name string) bool {
providersMutex.Lock()
defer providersMutex.Unlock()
_, found := providers[name]
return found
}
// GetCloudProvider creates an instance of the named cloud provider, or nil if
// the name is unknown. The error return is only used if the named provider
// was known but failed to initialize. The config parameter specifies the
// io.Reader handler of the configuration file for the cloud provider, or nil
// for no configuration.
func GetCloudProvider(name string, config io.Reader) (Interface, error) {
providersMutex.Lock()
defer providersMutex.Unlock()
f, found := providers[name]
if !found {
return nil, nil
}
return f(config)
}
// Detects if the string is an external cloud provider
func IsExternal(name string) bool {
return name == externalCloudProvider
}
// InitCloudProvider creates an instance of the named cloud provider.
func InitCloudProvider(name string, configFilePath string) (Interface, error) {
var cloud Interface
var err error
if name == "" {
glog.Info("No cloud provider specified.")
return nil, nil
}
if IsExternal(name) {
glog.Info("External cloud provider specified")
return nil, nil
}
for _, provider := range deprecatedCloudProviders {
if provider.name == name {
detail := provider.detail
if provider.external {
detail = fmt.Sprintf("Please use 'external' cloud provider for %s: %s", name, provider.detail)
}
glog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail)
break
}
}
if configFilePath != "" {
var config *os.File
config, err = os.Open(configFilePath)
if err != nil {
glog.Fatalf("Couldn't open cloud provider configuration %s: %#v",
configFilePath, err)
}
defer config.Close()
cloud, err = GetCloudProvider(name, config)
} else {
// Pass explicit nil so plugins can actually check for nil. See
// "Why is my nil error value not equal to nil?" in golang.org/doc/faq.
cloud, err = GetCloudProvider(name, nil)
}
if err != nil {
return nil, fmt.Errorf("could not init cloud provider %q: %v", name, err)
}
if cloud == nil {
return nil, fmt.Errorf("unknown cloud provider %q", name)
}
return cloud, nil
}

View File

@ -28,12 +28,22 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws",
deps = [
"//pkg/api/v1/service:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/credentialprovider/aws:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library",
@ -48,19 +58,9 @@ go_library(
"//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/sts:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -78,16 +78,16 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/apis:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/mock:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -9,3 +9,5 @@ reviewers:
- justinsb
- zmerlynn
- chrislovecnm
- nckturner
- micahhausler

File diff suppressed because it is too large Load Diff

View File

@ -25,10 +25,10 @@ import (
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/golang/glog"
"k8s.io/klog"
)
// FakeAWSServices is an fake AWS session used for testing
type FakeAWSServices struct {
region string
instances []*ec2.Instance
@ -45,7 +45,8 @@ type FakeAWSServices struct {
kms *FakeKMS
}
func NewFakeAWSServices(clusterId string) *FakeAWSServices {
// NewFakeAWSServices creates a new FakeAWSServices
func NewFakeAWSServices(clusterID string) *FakeAWSServices {
s := &FakeAWSServices{}
s.region = "us-east-1"
s.ec2 = &FakeEC2Impl{aws: s}
@ -71,12 +72,13 @@ func NewFakeAWSServices(clusterId string) *FakeAWSServices {
var tag ec2.Tag
tag.Key = aws.String(TagNameKubernetesClusterLegacy)
tag.Value = aws.String(clusterId)
tag.Value = aws.String(clusterID)
selfInstance.Tags = []*ec2.Tag{&tag}
return s
}
// WithAz sets the ec2 placement availability zone
func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices {
if s.selfInstance.Placement == nil {
s.selfInstance.Placement = &ec2.Placement{}
@ -85,30 +87,37 @@ func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices {
return s
}
// Compute returns a fake EC2 client
func (s *FakeAWSServices) Compute(region string) (EC2, error) {
return s.ec2, nil
}
// LoadBalancing returns a fake ELB client
func (s *FakeAWSServices) LoadBalancing(region string) (ELB, error) {
return s.elb, nil
}
// LoadBalancingV2 returns a fake ELBV2 client
func (s *FakeAWSServices) LoadBalancingV2(region string) (ELBV2, error) {
return s.elbv2, nil
}
// Autoscaling returns a fake ASG client
func (s *FakeAWSServices) Autoscaling(region string) (ASG, error) {
return s.asg, nil
}
// Metadata returns a fake EC2Metadata client
func (s *FakeAWSServices) Metadata() (EC2Metadata, error) {
return s.metadata, nil
}
// KeyManagement returns a fake KMS client
func (s *FakeAWSServices) KeyManagement(region string) (KMS, error) {
return s.kms, nil
}
// FakeEC2 is a fake EC2 client used for testing
type FakeEC2 interface {
EC2
CreateSubnet(*ec2.Subnet) (*ec2.CreateSubnetOutput, error)
@ -117,6 +126,7 @@ type FakeEC2 interface {
RemoveRouteTables()
}
// FakeEC2Impl is an implementation of the FakeEC2 interface used for testing
type FakeEC2Impl struct {
aws *FakeAWSServices
Subnets []*ec2.Subnet
@ -125,12 +135,13 @@ type FakeEC2Impl struct {
DescribeRouteTablesInput *ec2.DescribeRouteTablesInput
}
// DescribeInstances returns fake instance descriptions
func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {
matches := []*ec2.Instance{}
for _, instance := range ec2i.aws.instances {
if request.InstanceIds != nil {
if instance.InstanceId == nil {
glog.Warning("Instance with no instance id: ", instance)
klog.Warning("Instance with no instance id: ", instance)
continue
}
@ -163,54 +174,73 @@ func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput)
return matches, nil
}
// AttachVolume is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) AttachVolume(request *ec2.AttachVolumeInput) (resp *ec2.VolumeAttachment, err error) {
panic("Not implemented")
}
// DetachVolume is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error) {
panic("Not implemented")
}
// DescribeVolumes is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) {
panic("Not implemented")
}
// CreateVolume is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error) {
panic("Not implemented")
}
// DeleteVolume is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) DeleteVolume(request *ec2.DeleteVolumeInput) (resp *ec2.DeleteVolumeOutput, err error) {
panic("Not implemented")
}
// DescribeSecurityGroups is not implemented but is required for interface
// conformance
func (ec2i *FakeEC2Impl) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) {
panic("Not implemented")
}
// CreateSecurityGroup is not implemented but is required for interface
// conformance
func (ec2i *FakeEC2Impl) CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) {
panic("Not implemented")
}
// DeleteSecurityGroup is not implemented but is required for interface
// conformance
func (ec2i *FakeEC2Impl) DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) {
panic("Not implemented")
}
// AuthorizeSecurityGroupIngress is not implemented but is required for
// interface conformance
func (ec2i *FakeEC2Impl) AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) {
panic("Not implemented")
}
// RevokeSecurityGroupIngress is not implemented but is required for interface
// conformance
func (ec2i *FakeEC2Impl) RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) {
panic("Not implemented")
}
// DescribeVolumeModifications is not implemented but is required for interface
// conformance
func (ec2i *FakeEC2Impl) DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) {
panic("Not implemented")
}
// ModifyVolume is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) {
panic("Not implemented")
}
// CreateSubnet creates fake subnets
func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOutput, error) {
ec2i.Subnets = append(ec2i.Subnets, request)
response := &ec2.CreateSubnetOutput{
@ -219,24 +249,29 @@ func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOut
return response, nil
}
// DescribeSubnets returns fake subnet descriptions
func (ec2i *FakeEC2Impl) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
ec2i.DescribeSubnetsInput = request
return ec2i.Subnets, nil
}
// RemoveSubnets clears subnets on client
func (ec2i *FakeEC2Impl) RemoveSubnets() {
ec2i.Subnets = ec2i.Subnets[:0]
}
// CreateTags is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
panic("Not implemented")
}
// DescribeRouteTables returns fake route table descriptions
func (ec2i *FakeEC2Impl) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) {
ec2i.DescribeRouteTablesInput = request
return ec2i.RouteTables, nil
}
// CreateRouteTable creates fake route tables
func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateRouteTableOutput, error) {
ec2i.RouteTables = append(ec2i.RouteTables, request)
response := &ec2.CreateRouteTableOutput{
@ -245,30 +280,38 @@ func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateR
return response, nil
}
// RemoveRouteTables clears route tables on client
func (ec2i *FakeEC2Impl) RemoveRouteTables() {
ec2i.RouteTables = ec2i.RouteTables[:0]
}
// CreateRoute is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) {
panic("Not implemented")
}
// DeleteRoute is not implemented but is required for interface conformance
func (ec2i *FakeEC2Impl) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) {
panic("Not implemented")
}
// ModifyInstanceAttribute is not implemented but is required for interface
// conformance
func (ec2i *FakeEC2Impl) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) {
panic("Not implemented")
}
// DescribeVpcs returns fake VPC descriptions
func (ec2i *FakeEC2Impl) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) {
return &ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{{CidrBlock: aws.String("172.20.0.0/16")}}}, nil
}
// FakeMetadata is a fake EC2 metadata service client used for testing
type FakeMetadata struct {
aws *FakeAWSServices
}
// GetMetadata returns fake EC2 metadata for testing
func (m *FakeMetadata) GetMetadata(key string) (string, error) {
networkInterfacesPrefix := "network/interfaces/macs/"
i := m.aws.selfInstance
@ -291,199 +334,292 @@ func (m *FakeMetadata) GetMetadata(key string) (string, error) {
} else if strings.HasPrefix(key, networkInterfacesPrefix) {
if key == networkInterfacesPrefix {
return strings.Join(m.aws.networkInterfacesMacs, "/\n") + "/\n", nil
} else {
keySplit := strings.Split(key, "/")
macParam := keySplit[3]
if len(keySplit) == 5 && keySplit[4] == "vpc-id" {
for i, macElem := range m.aws.networkInterfacesMacs {
if macParam == macElem {
return m.aws.networkInterfacesVpcIDs[i], nil
}
}
}
if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" {
for i, macElem := range m.aws.networkInterfacesMacs {
if macParam == macElem {
return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil
}
}
}
return "", nil
}
keySplit := strings.Split(key, "/")
macParam := keySplit[3]
if len(keySplit) == 5 && keySplit[4] == "vpc-id" {
for i, macElem := range m.aws.networkInterfacesMacs {
if macParam == macElem {
return m.aws.networkInterfacesVpcIDs[i], nil
}
}
}
if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" {
for i, macElem := range m.aws.networkInterfacesMacs {
if macParam == macElem {
return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil
}
}
}
return "", nil
} else {
return "", nil
}
}
// FakeELB is a fake ELB client used for testing
type FakeELB struct {
aws *FakeAWSServices
}
// CreateLoadBalancer is not implemented but is required for interface
// conformance
func (elb *FakeELB) CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) {
panic("Not implemented")
}
// DeleteLoadBalancer is not implemented but is required for interface
// conformance
func (elb *FakeELB) DeleteLoadBalancer(input *elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) {
panic("Not implemented")
}
// DescribeLoadBalancers is not implemented but is required for interface
// conformance
func (elb *FakeELB) DescribeLoadBalancers(input *elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) {
panic("Not implemented")
}
// AddTags is not implemented but is required for interface conformance
func (elb *FakeELB) AddTags(input *elb.AddTagsInput) (*elb.AddTagsOutput, error) {
panic("Not implemented")
}
// RegisterInstancesWithLoadBalancer is not implemented but is required for
// interface conformance
func (elb *FakeELB) RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) {
panic("Not implemented")
}
// DeregisterInstancesFromLoadBalancer is not implemented but is required for
// interface conformance
func (elb *FakeELB) DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) {
panic("Not implemented")
}
// DetachLoadBalancerFromSubnets is not implemented but is required for
// interface conformance
func (elb *FakeELB) DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) {
panic("Not implemented")
}
// AttachLoadBalancerToSubnets is not implemented but is required for interface
// conformance
func (elb *FakeELB) AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) {
panic("Not implemented")
}
// CreateLoadBalancerListeners is not implemented but is required for interface
// conformance
func (elb *FakeELB) CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) {
panic("Not implemented")
}
// DeleteLoadBalancerListeners is not implemented but is required for interface
// conformance
func (elb *FakeELB) DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) {
panic("Not implemented")
}
// ApplySecurityGroupsToLoadBalancer is not implemented but is required for
// interface conformance
func (elb *FakeELB) ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) {
panic("Not implemented")
}
// ConfigureHealthCheck is not implemented but is required for interface
// conformance
func (elb *FakeELB) ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) {
panic("Not implemented")
}
// CreateLoadBalancerPolicy is not implemented but is required for interface
// conformance
func (elb *FakeELB) CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) {
panic("Not implemented")
}
// SetLoadBalancerPoliciesForBackendServer is not implemented but is required
// for interface conformance
func (elb *FakeELB) SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) {
panic("Not implemented")
}
// SetLoadBalancerPoliciesOfListener is not implemented but is required for
// interface conformance
func (elb *FakeELB) SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) {
panic("Not implemented")
}
// DescribeLoadBalancerPolicies is not implemented but is required for
// interface conformance
func (elb *FakeELB) DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) {
panic("Not implemented")
}
// DescribeLoadBalancerAttributes is not implemented but is required for
// interface conformance
func (elb *FakeELB) DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) {
panic("Not implemented")
}
// ModifyLoadBalancerAttributes is not implemented but is required for
// interface conformance
func (elb *FakeELB) ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) {
panic("Not implemented")
}
func (self *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) {
// expectDescribeLoadBalancers is not implemented but is required for interface
// conformance
func (elb *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) {
panic("Not implemented")
}
// FakeELBV2 is a fake ELBV2 client used for testing
type FakeELBV2 struct {
aws *FakeAWSServices
}
func (self *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) {
// AddTags is not implemented but is required for interface conformance
func (elb *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) {
// CreateLoadBalancer is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) {
// DescribeLoadBalancers is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) {
// DeleteLoadBalancer is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) {
// ModifyLoadBalancerAttributes is not implemented but is required for
// interface conformance
func (elb *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) {
// DescribeLoadBalancerAttributes is not implemented but is required for
// interface conformance
func (elb *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) {
// CreateTargetGroup is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) {
// DescribeTargetGroups is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) {
panic("Not implemented")
}
func (self *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error {
// ModifyTargetGroup is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) {
panic("Not implemented")
}
// DeleteTargetGroup is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) {
panic("Not implemented")
}
// DescribeTargetHealth is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) {
panic("Not implemented")
}
// DescribeTargetGroupAttributes is not implemented but is required for
// interface conformance
func (elb *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) {
panic("Not implemented")
}
// ModifyTargetGroupAttributes is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) {
panic("Not implemented")
}
// RegisterTargets is not implemented but is required for interface conformance
func (elb *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) {
panic("Not implemented")
}
// DeregisterTargets is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) {
panic("Not implemented")
}
// CreateListener is not implemented but is required for interface conformance
func (elb *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) {
panic("Not implemented")
}
// DescribeListeners is not implemented but is required for interface
// conformance
func (elb *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) {
panic("Not implemented")
}
// DeleteListener is not implemented but is required for interface conformance
func (elb *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) {
panic("Not implemented")
}
// ModifyListener is not implemented but is required for interface conformance
func (elb *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) {
panic("Not implemented")
}
// WaitUntilLoadBalancersDeleted is not implemented but is required for
// interface conformance
func (elb *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error {
panic("Not implemented")
}
// FakeASG is a fake Autoscaling client used for testing
type FakeASG struct {
aws *FakeAWSServices
}
// UpdateAutoScalingGroup is not implemented but is required for interface
// conformance
func (a *FakeASG) UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) {
panic("Not implemented")
}
// DescribeAutoScalingGroups is not implemented but is required for interface
// conformance
func (a *FakeASG) DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) {
panic("Not implemented")
}
// FakeKMS is a fake KMS client used for testing
type FakeKMS struct {
aws *FakeAWSServices
}
// DescribeKey is not implemented but is required for interface conformance
func (kms *FakeKMS) DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) {
panic("Not implemented")
}

View File

@ -21,7 +21,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/golang/glog"
"k8s.io/klog"
)
// AWSCloud implements InstanceGroups
@ -42,7 +42,7 @@ func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error {
return nil
}
// Implement InstanceGroups.ResizeInstanceGroup
// ResizeInstanceGroup implements InstanceGroups.ResizeInstanceGroup
// Set the size to the fixed size
func (c *Cloud) ResizeInstanceGroup(instanceGroupName string, size int) error {
return ResizeInstanceGroup(c.asg, instanceGroupName, size)
@ -64,13 +64,13 @@ func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo
return nil, nil
}
if len(response.AutoScalingGroups) > 1 {
glog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName)
klog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName)
}
group := response.AutoScalingGroups[0]
return &awsInstanceGroup{group: group}, nil
}
// Implement InstanceGroups.DescribeInstanceGroup
// DescribeInstanceGroup implements InstanceGroups.DescribeInstanceGroup
// Queries the cloud provider for information about the specified instance group
func (c *Cloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) {
return DescribeInstanceGroup(c.asg, instanceGroupName)

View File

@ -28,15 +28,20 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
// ProxyProtocolPolicyName is the tag named used for the proxy protocol
// policy
ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled"
// SSLNegotiationPolicyNameFormat is a format string used for the SSL
// negotiation policy tag name
SSLNegotiationPolicyNameFormat = "k8s-SSLNegotiationPolicy-%s"
)
@ -129,7 +134,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa
})
}
glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
createResponse, err := c.elbv2.CreateLoadBalancer(createRequest)
if err != nil {
return nil, fmt.Errorf("Error creating load balancer: %q", err)
@ -335,7 +340,7 @@ func createTargetName(namespacedName types.NamespacedName, frontendPort, nodePor
func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping, namespacedName types.NamespacedName, instanceIDs []string, vpcID string) (listener *elbv2.Listener, targetGroupArn *string, err error) {
targetName := createTargetName(namespacedName, mapping.FrontendPort, mapping.TrafficPort)
glog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName)
klog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName)
target, err := c.ensureTargetGroup(
nil,
mapping,
@ -356,7 +361,7 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping
Type: aws.String(elbv2.ActionTypeEnumForward),
}},
}
glog.Infof("Creating load balancer listener for %v", namespacedName)
klog.Infof("Creating load balancer listener for %v", namespacedName)
createListenerOutput, err := c.elbv2.CreateListener(createListernerInput)
if err != nil {
return nil, aws.String(""), fmt.Errorf("Error creating load balancer listener: %q", err)
@ -612,7 +617,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
for _, actualGroup := range actualGroups {
actualGroupID := aws.StringValue(actualGroup.GroupId)
if actualGroupID == "" {
glog.Warning("Ignoring group without ID: ", actualGroup)
klog.Warning("Ignoring group without ID: ", actualGroup)
continue
}
@ -647,17 +652,17 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
for port, add := range portMap {
if add {
if clientTraffic {
glog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
glog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
klog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
klog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
} else {
glog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
klog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
}
} else {
if clientTraffic {
glog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
glog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID)
klog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID)
klog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID)
}
glog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID)
klog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID)
}
if clientTraffic {
@ -712,7 +717,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
return err
}
if !changed {
glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
}
@ -722,7 +727,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
return err
}
if !changed {
glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
}
@ -745,12 +750,12 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
group, err := c.findSecurityGroup(instanceSecurityGroupID)
if err != nil {
glog.Warningf("Error retrieving security group: %q", err)
klog.Warningf("Error retrieving security group: %q", err)
return err
}
if group == nil {
glog.Warning("Security group not found: ", instanceSecurityGroupID)
klog.Warning("Security group not found: ", instanceSecurityGroupID)
return nil
}
@ -771,21 +776,21 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
// the icmp permission is missing
changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission})
if err != nil {
glog.Warningf("Error adding MTU permission to security group: %q", err)
klog.Warningf("Error adding MTU permission to security group: %q", err)
return err
}
if !changed {
glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
} else if icmpExists && permCount == 0 {
// there is no additional permissions, remove icmp
changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission})
if err != nil {
glog.Warningf("Error removing MTU permission to security group: %q", err)
klog.Warningf("Error removing MTU permission to security group: %q", err)
return err
}
if !changed {
glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
}
}
}
@ -864,13 +869,13 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLB(mappings []nlbPortMapping, in
}
if securityGroup == nil {
glog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId))
klog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId))
continue
}
id := aws.StringValue(securityGroup.GroupId)
if id == "" {
glog.Warningf("found security group without id: %v", securityGroup)
klog.Warningf("found security group without id: %v", securityGroup)
continue
}
@ -912,9 +917,17 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
// We are supposed to specify one subnet per AZ.
// TODO: What happens if we have more than one subnet per AZ?
createRequest.Subnets = stringPointerArray(subnetIDs)
if subnetIDs == nil {
createRequest.Subnets = nil
} else {
createRequest.Subnets = aws.StringSlice(subnetIDs)
}
createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)
if securityGroupIDs == nil {
createRequest.SecurityGroups = nil
} else {
createRequest.SecurityGroups = aws.StringSlice(securityGroupIDs)
}
// Get additional tags set by the user
tags := getLoadBalancerAdditionalTags(annotations)
@ -929,7 +942,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
})
}
glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
_, err := c.elb.CreateLoadBalancer(createRequest)
if err != nil {
return nil, err
@ -942,7 +955,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
}
for _, listener := range listeners {
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort)
klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort)
err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)})
if err != nil {
return nil, err
@ -966,7 +979,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
request := &elb.DetachLoadBalancerFromSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(removals)
glog.V(2).Info("Detaching load balancer from removed subnets")
klog.V(2).Info("Detaching load balancer from removed subnets")
_, err := c.elb.DetachLoadBalancerFromSubnets(request)
if err != nil {
return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %q", err)
@ -978,7 +991,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
request := &elb.AttachLoadBalancerToSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(additions)
glog.V(2).Info("Attaching load balancer to added subnets")
klog.V(2).Info("Attaching load balancer to added subnets")
_, err := c.elb.AttachLoadBalancerToSubnets(request)
if err != nil {
return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %q", err)
@ -996,8 +1009,12 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
// This call just replaces the security groups, unlike e.g. subnets (!)
request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.SecurityGroups = stringPointerArray(securityGroupIDs)
glog.V(2).Info("Applying updated security groups to load balancer")
if securityGroupIDs == nil {
request.SecurityGroups = nil
} else {
request.SecurityGroups = aws.StringSlice(securityGroupIDs)
}
klog.V(2).Info("Applying updated security groups to load balancer")
_, err := c.elb.ApplySecurityGroupsToLoadBalancer(request)
if err != nil {
return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %q", err)
@ -1015,7 +1032,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
for _, listenerDescription := range listenerDescriptions {
actual := listenerDescription.Listener
if actual == nil {
glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
klog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
continue
}
@ -1057,7 +1074,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
request := &elb.DeleteLoadBalancerListenersInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.LoadBalancerPorts = removals
glog.V(2).Info("Deleting removed load balancer listeners")
klog.V(2).Info("Deleting removed load balancer listeners")
_, err := c.elb.DeleteLoadBalancerListeners(request)
if err != nil {
return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %q", err)
@ -1069,7 +1086,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
request := &elb.CreateLoadBalancerListenersInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Listeners = additions
glog.V(2).Info("Creating added load balancer listeners")
klog.V(2).Info("Creating added load balancer listeners")
_, err := c.elb.CreateLoadBalancerListeners(request)
if err != nil {
return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %q", err)
@ -1121,7 +1138,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
}
if setPolicy {
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol)
klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol)
err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies)
if err != nil {
return nil, err
@ -1135,7 +1152,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
// corresponding listener anymore
for instancePort, found := range foundBackends {
if !found {
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort)
klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort)
err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{})
if err != nil {
return nil, err
@ -1147,7 +1164,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
{
// Add additional tags
glog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName)
klog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName)
tags := getLoadBalancerAdditionalTags(annotations)
if len(tags) > 0 {
err := c.addLoadBalancerTags(loadBalancerName, tags)
@ -1166,7 +1183,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
describeAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
describeAttributesOutput, err := c.elb.DescribeLoadBalancerAttributes(describeAttributesRequest)
if err != nil {
glog.Warning("Unable to retrieve load balancer attributes during attribute sync")
klog.Warning("Unable to retrieve load balancer attributes during attribute sync")
return nil, err
}
@ -1174,7 +1191,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
// Update attributes if they're dirty
if !reflect.DeepEqual(loadBalancerAttributes, foundAttributes) {
glog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName)
klog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName)
modifyAttributesRequest := &elb.ModifyLoadBalancerAttributesInput{}
modifyAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
@ -1190,7 +1207,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
if dirty {
loadBalancer, err = c.describeLoadBalancer(loadBalancerName)
if err != nil {
glog.Warning("Unable to retrieve load balancer after creation/update")
klog.Warning("Unable to retrieve load balancer after creation/update")
return nil, err
}
}
@ -1314,16 +1331,16 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances
removals := actual.Difference(expected)
addInstances := []*elb.Instance{}
for _, instanceId := range additions.List() {
for _, instanceID := range additions.List() {
addInstance := &elb.Instance{}
addInstance.InstanceId = aws.String(instanceId)
addInstance.InstanceId = aws.String(instanceID)
addInstances = append(addInstances, addInstance)
}
removeInstances := []*elb.Instance{}
for _, instanceId := range removals.List() {
for _, instanceID := range removals.List() {
removeInstance := &elb.Instance{}
removeInstance.InstanceId = aws.String(instanceId)
removeInstance.InstanceId = aws.String(instanceID)
removeInstances = append(removeInstances, removeInstance)
}
@ -1335,7 +1352,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances
if err != nil {
return err
}
glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName)
klog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName)
}
if len(removeInstances) > 0 {
@ -1346,7 +1363,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances
if err != nil {
return err
}
glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName)
klog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName)
}
return nil
@ -1365,7 +1382,7 @@ func (c *Cloud) getLoadBalancerTLSPorts(loadBalancer *elb.LoadBalancerDescriptio
}
func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescription, policyName string) error {
glog.V(2).Info("Describing load balancer policies on load balancer")
klog.V(2).Info("Describing load balancer policies on load balancer")
result, err := c.elb.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{
LoadBalancerName: loadBalancer.LoadBalancerName,
PolicyNames: []*string{
@ -1386,7 +1403,7 @@ func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescrip
return nil
}
glog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName))
klog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName))
// there is an upper limit of 98 policies on an ELB, we're pretty safe from
// running into it
_, err = c.elb.CreateLoadBalancerPolicy(&elb.CreateLoadBalancerPolicyInput{
@ -1415,7 +1432,7 @@ func (c *Cloud) setSSLNegotiationPolicy(loadBalancerName, sslPolicyName string,
aws.String(policyName),
},
}
glog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName)
klog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName)
_, err := c.elb.SetLoadBalancerPoliciesOfListener(request)
if err != nil {
return fmt.Errorf("error setting SSL negotiation policy '%s' on load balancer: %q", policyName, err)
@ -1435,7 +1452,7 @@ func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error {
},
},
}
glog.V(2).Info("Creating proxy protocol policy on load balancer")
klog.V(2).Info("Creating proxy protocol policy on load balancer")
_, err := c.elb.CreateLoadBalancerPolicy(request)
if err != nil {
return fmt.Errorf("error creating proxy protocol policy on load balancer: %q", err)
@ -1451,9 +1468,9 @@ func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64,
PolicyNames: policies,
}
if len(policies) > 0 {
glog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort)
klog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort)
} else {
glog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort)
klog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort)
}
_, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request)
if err != nil {

View File

@ -17,8 +17,9 @@ limitations under the License.
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"testing"
"github.com/aws/aws-sdk-go/aws"
)
func TestElbProtocolsAreEqual(t *testing.T) {

View File

@ -22,8 +22,9 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/klog"
cloudprovider "k8s.io/cloud-provider"
)
func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) {
@ -116,7 +117,7 @@ func (c *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpro
route.TargetNode = mapInstanceToNodeName(instance)
routes = append(routes, route)
} else {
glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID)
klog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID)
}
}
}
@ -171,7 +172,7 @@ func (c *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st
}
if deleteRoute != nil {
glog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock))
klog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock))
request := &ec2.DeleteRouteInput{}
request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock

View File

@ -27,18 +27,18 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
)
const TestClusterId = "clusterid.test"
const TestClusterID = "clusterid.test"
const TestClusterName = "testCluster"
type MockedFakeEC2 struct {
@ -46,10 +46,10 @@ type MockedFakeEC2 struct {
mock.Mock
}
func (m *MockedFakeEC2) expectDescribeSecurityGroups(clusterId, groupName, clusterID string) {
func (m *MockedFakeEC2) expectDescribeSecurityGroups(clusterID, groupName string) {
tags := []*ec2.Tag{
{Key: aws.String(TagNameKubernetesClusterLegacy), Value: aws.String(clusterId)},
{Key: aws.String(fmt.Sprintf("%s%s", TagNameKubernetesClusterPrefix, clusterId)), Value: aws.String(ResourceLifecycleOwned)},
{Key: aws.String(TagNameKubernetesClusterLegacy), Value: aws.String(clusterID)},
{Key: aws.String(fmt.Sprintf("%s%s", TagNameKubernetesClusterPrefix, clusterID)), Value: aws.String(ResourceLifecycleOwned)},
}
m.On("DescribeSecurityGroups", &ec2.DescribeSecurityGroupsInput{Filters: []*ec2.Filter{
@ -68,6 +68,11 @@ func (m *MockedFakeEC2) DescribeSecurityGroups(request *ec2.DescribeSecurityGrou
return args.Get(0).([]*ec2.SecurityGroup), nil
}
func (m *MockedFakeEC2) CreateVolume(request *ec2.CreateVolumeInput) (*ec2.Volume, error) {
args := m.Called(request)
return args.Get(0).(*ec2.Volume), nil
}
type MockedFakeELB struct {
*FakeELB
mock.Mock
@ -139,17 +144,17 @@ func TestReadAWSCloudConfig(t *testing.T) {
},
{
"No zone in config, metadata does not have zone",
strings.NewReader("[global]\n"), newMockedFakeAWSServices(TestClusterId).WithAz(""),
strings.NewReader("[global]\n"), newMockedFakeAWSServices(TestClusterID).WithAz(""),
true, "",
},
{
"No zone in config, metadata has zone",
strings.NewReader("[global]\n"), newMockedFakeAWSServices(TestClusterId),
strings.NewReader("[global]\n"), newMockedFakeAWSServices(TestClusterID),
false, "us-east-1a",
},
{
"Zone in config should take precedence over metadata",
strings.NewReader("[global]\nzone = eu-west-1a"), newMockedFakeAWSServices(TestClusterId),
strings.NewReader("[global]\nzone = eu-west-1a"), newMockedFakeAWSServices(TestClusterID),
false, "eu-west-1a",
},
}
@ -192,24 +197,24 @@ func TestNewAWSCloud(t *testing.T) {
}{
{
"No config reader",
nil, newMockedFakeAWSServices(TestClusterId).WithAz(""),
nil, newMockedFakeAWSServices(TestClusterID).WithAz(""),
true, "",
},
{
"Config specifies valid zone",
strings.NewReader("[global]\nzone = eu-west-1a"), newMockedFakeAWSServices(TestClusterId),
strings.NewReader("[global]\nzone = eu-west-1a"), newMockedFakeAWSServices(TestClusterID),
false, "eu-west-1",
},
{
"Gets zone from metadata when not in config",
strings.NewReader("[global]\n"),
newMockedFakeAWSServices(TestClusterId),
newMockedFakeAWSServices(TestClusterID),
false, "us-east-1",
},
{
"No zone in config or metadata",
strings.NewReader("[global]\n"),
newMockedFakeAWSServices(TestClusterId).WithAz(""),
newMockedFakeAWSServices(TestClusterID).WithAz(""),
true, "",
},
}
@ -237,7 +242,7 @@ func TestNewAWSCloud(t *testing.T) {
}
func mockInstancesResp(selfInstance *ec2.Instance, instances []*ec2.Instance) (*Cloud, *FakeAWSServices) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
awsServices.instances = instances
awsServices.selfInstance = selfInstance
awsCloud, err := newAWSCloud(CloudConfig{}, awsServices)
@ -248,7 +253,7 @@ func mockInstancesResp(selfInstance *ec2.Instance, instances []*ec2.Instance) (*
}
func mockAvailabilityZone(availabilityZone string) *Cloud {
awsServices := newMockedFakeAWSServices(TestClusterId).WithAz(availabilityZone)
awsServices := newMockedFakeAWSServices(TestClusterID).WithAz(availabilityZone)
awsCloud, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
panic(err)
@ -275,7 +280,7 @@ func TestNodeAddresses(t *testing.T) {
// ClusterID needs to be set
var tag ec2.Tag
tag.Key = aws.String(TagNameKubernetesClusterLegacy)
tag.Value = aws.String(TestClusterId)
tag.Value = aws.String(TestClusterID)
tags := []*ec2.Tag{&tag}
//0
@ -348,13 +353,14 @@ func TestNodeAddresses(t *testing.T) {
if err3 != nil {
t.Errorf("Should not error when instance found")
}
if len(addrs3) != 4 {
t.Errorf("Should return exactly 4 NodeAddresses")
if len(addrs3) != 5 {
t.Errorf("Should return exactly 5 NodeAddresses")
}
testHasNodeAddress(t, addrs3, v1.NodeInternalIP, "192.168.0.1")
testHasNodeAddress(t, addrs3, v1.NodeExternalIP, "1.2.3.4")
testHasNodeAddress(t, addrs3, v1.NodeExternalDNS, "instance-same.ec2.external")
testHasNodeAddress(t, addrs3, v1.NodeInternalDNS, "instance-same.ec2.internal")
testHasNodeAddress(t, addrs3, v1.NodeHostName, "instance-same.ec2.internal")
}
func TestNodeAddressesWithMetadata(t *testing.T) {
@ -363,7 +369,7 @@ func TestNodeAddressesWithMetadata(t *testing.T) {
// ClusterID needs to be set
var tag ec2.Tag
tag.Key = aws.String(TagNameKubernetesClusterLegacy)
tag.Value = aws.String(TestClusterId)
tag.Value = aws.String(TestClusterID)
tags := []*ec2.Tag{&tag}
instanceName := "instance.ec2.internal"
@ -411,7 +417,7 @@ func TestGetRegion(t *testing.T) {
}
func TestFindVPCID(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
@ -485,7 +491,7 @@ func constructRouteTable(subnetID string, public bool) *ec2.RouteTable {
}
func TestSubnetIDsinVPC(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
@ -531,13 +537,13 @@ func TestSubnetIDsinVPC(t *testing.T) {
return
}
result_set := make(map[string]bool)
resultSet := make(map[string]bool)
for _, v := range result {
result_set[v] = true
resultSet[v] = true
}
for i := range subnets {
if !result_set[subnets[i]["id"]] {
if !resultSet[subnets[i]["id"]] {
t.Errorf("Expected subnet%d '%s' in result: %v", i, subnets[i]["id"], result)
return
}
@ -561,13 +567,13 @@ func TestSubnetIDsinVPC(t *testing.T) {
return
}
result_set = make(map[string]bool)
resultSet = make(map[string]bool)
for _, v := range result {
result_set[v] = true
resultSet[v] = true
}
for i := range subnets {
if !result_set[subnets[i]["id"]] {
if !resultSet[subnets[i]["id"]] {
t.Errorf("Expected subnet%d '%s' in result: %v", i, subnets[i]["id"], result)
return
}
@ -664,7 +670,7 @@ func TestSubnetIDsinVPC(t *testing.T) {
}
func TestIpPermissionExistsHandlesMultipleGroupIds(t *testing.T) {
oldIpPermission := ec2.IpPermission{
oldIPPermission := ec2.IpPermission{
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{GroupId: aws.String("firstGroupId")},
{GroupId: aws.String("secondGroupId")},
@ -672,36 +678,36 @@ func TestIpPermissionExistsHandlesMultipleGroupIds(t *testing.T) {
},
}
existingIpPermission := ec2.IpPermission{
existingIPPermission := ec2.IpPermission{
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{GroupId: aws.String("secondGroupId")},
},
}
newIpPermission := ec2.IpPermission{
newIPPermission := ec2.IpPermission{
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{GroupId: aws.String("fourthGroupId")},
},
}
equals := ipPermissionExists(&existingIpPermission, &oldIpPermission, false)
equals := ipPermissionExists(&existingIPPermission, &oldIPPermission, false)
if !equals {
t.Errorf("Should have been considered equal since first is in the second array of groups")
}
equals = ipPermissionExists(&newIpPermission, &oldIpPermission, false)
equals = ipPermissionExists(&newIPPermission, &oldIPPermission, false)
if equals {
t.Errorf("Should have not been considered equal since first is not in the second array of groups")
}
// The first pair matches, but the second does not
newIpPermission2 := ec2.IpPermission{
newIPPermission2 := ec2.IpPermission{
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{GroupId: aws.String("firstGroupId")},
{GroupId: aws.String("fourthGroupId")},
},
}
equals = ipPermissionExists(&newIpPermission2, &oldIpPermission, false)
equals = ipPermissionExists(&newIPPermission2, &oldIPPermission, false)
if equals {
t.Errorf("Should have not been considered equal since first is not in the second array of groups")
}
@ -709,9 +715,9 @@ func TestIpPermissionExistsHandlesMultipleGroupIds(t *testing.T) {
func TestIpPermissionExistsHandlesRangeSubsets(t *testing.T) {
// Two existing scenarios we'll test against
emptyIpPermission := ec2.IpPermission{}
emptyIPPermission := ec2.IpPermission{}
oldIpPermission := ec2.IpPermission{
oldIPPermission := ec2.IpPermission{
IpRanges: []*ec2.IpRange{
{CidrIp: aws.String("10.0.0.0/8")},
{CidrIp: aws.String("192.168.1.0/24")},
@ -719,53 +725,53 @@ func TestIpPermissionExistsHandlesRangeSubsets(t *testing.T) {
}
// Two already existing ranges and a new one
existingIpPermission := ec2.IpPermission{
existingIPPermission := ec2.IpPermission{
IpRanges: []*ec2.IpRange{
{CidrIp: aws.String("10.0.0.0/8")},
},
}
existingIpPermission2 := ec2.IpPermission{
existingIPPermission2 := ec2.IpPermission{
IpRanges: []*ec2.IpRange{
{CidrIp: aws.String("192.168.1.0/24")},
},
}
newIpPermission := ec2.IpPermission{
newIPPermission := ec2.IpPermission{
IpRanges: []*ec2.IpRange{
{CidrIp: aws.String("172.16.0.0/16")},
},
}
exists := ipPermissionExists(&emptyIpPermission, &emptyIpPermission, false)
exists := ipPermissionExists(&emptyIPPermission, &emptyIPPermission, false)
if !exists {
t.Errorf("Should have been considered existing since we're comparing a range array against itself")
}
exists = ipPermissionExists(&oldIpPermission, &oldIpPermission, false)
exists = ipPermissionExists(&oldIPPermission, &oldIPPermission, false)
if !exists {
t.Errorf("Should have been considered existing since we're comparing a range array against itself")
}
exists = ipPermissionExists(&existingIpPermission, &oldIpPermission, false)
exists = ipPermissionExists(&existingIPPermission, &oldIPPermission, false)
if !exists {
t.Errorf("Should have been considered existing since 10.* is in oldIpPermission's array of ranges")
t.Errorf("Should have been considered existing since 10.* is in oldIPPermission's array of ranges")
}
exists = ipPermissionExists(&existingIpPermission2, &oldIpPermission, false)
exists = ipPermissionExists(&existingIPPermission2, &oldIPPermission, false)
if !exists {
t.Errorf("Should have been considered existing since 192.* is in oldIpPermission2's array of ranges")
}
exists = ipPermissionExists(&newIpPermission, &emptyIpPermission, false)
exists = ipPermissionExists(&newIPPermission, &emptyIPPermission, false)
if exists {
t.Errorf("Should have not been considered existing since we compared against a missing array of ranges")
}
exists = ipPermissionExists(&newIpPermission, &oldIpPermission, false)
exists = ipPermissionExists(&newIPPermission, &oldIPPermission, false)
if exists {
t.Errorf("Should have not been considered existing since 172.* is not in oldIpPermission's array of ranges")
t.Errorf("Should have not been considered existing since 172.* is not in oldIPPermission's array of ranges")
}
}
func TestIpPermissionExistsHandlesMultipleGroupIdsWithUserIds(t *testing.T) {
oldIpPermission := ec2.IpPermission{
oldIPPermission := ec2.IpPermission{
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{GroupId: aws.String("firstGroupId"), UserId: aws.String("firstUserId")},
{GroupId: aws.String("secondGroupId"), UserId: aws.String("secondUserId")},
@ -773,78 +779,95 @@ func TestIpPermissionExistsHandlesMultipleGroupIdsWithUserIds(t *testing.T) {
},
}
existingIpPermission := ec2.IpPermission{
existingIPPermission := ec2.IpPermission{
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{GroupId: aws.String("secondGroupId"), UserId: aws.String("secondUserId")},
},
}
newIpPermission := ec2.IpPermission{
newIPPermission := ec2.IpPermission{
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{GroupId: aws.String("secondGroupId"), UserId: aws.String("anotherUserId")},
},
}
equals := ipPermissionExists(&existingIpPermission, &oldIpPermission, true)
equals := ipPermissionExists(&existingIPPermission, &oldIPPermission, true)
if !equals {
t.Errorf("Should have been considered equal since first is in the second array of groups")
}
equals = ipPermissionExists(&newIpPermission, &oldIpPermission, true)
equals = ipPermissionExists(&newIPPermission, &oldIPPermission, true)
if equals {
t.Errorf("Should have not been considered equal since first is not in the second array of groups")
}
}
func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsStates := []struct {
id int64
state string
expected bool
}{
{0, ec2.InstanceStateNamePending, true},
{16, ec2.InstanceStateNameRunning, true},
{32, ec2.InstanceStateNameShuttingDown, true},
{48, ec2.InstanceStateNameTerminated, false},
{64, ec2.InstanceStateNameStopping, true},
{80, ec2.InstanceStateNameStopped, true},
}
awsServices := newMockedFakeAWSServices(TestClusterID)
nodeName := types.NodeName("my-dns.internal")
var tag ec2.Tag
tag.Key = aws.String(TagNameKubernetesClusterLegacy)
tag.Value = aws.String(TestClusterId)
tag.Value = aws.String(TestClusterID)
tags := []*ec2.Tag{&tag}
var runningInstance ec2.Instance
runningInstance.InstanceId = aws.String("i-running")
runningInstance.PrivateDnsName = aws.String(string(nodeName))
runningInstance.State = &ec2.InstanceState{Code: aws.Int64(16), Name: aws.String("running")}
runningInstance.Tags = tags
var testInstance ec2.Instance
testInstance.PrivateDnsName = aws.String(string(nodeName))
testInstance.Tags = tags
var terminatedInstance ec2.Instance
terminatedInstance.InstanceId = aws.String("i-terminated")
terminatedInstance.PrivateDnsName = aws.String(string(nodeName))
terminatedInstance.State = &ec2.InstanceState{Code: aws.Int64(48), Name: aws.String("terminated")}
terminatedInstance.Tags = tags
awsDefaultInstances := awsServices.instances
for _, awsState := range awsStates {
id := "i-" + awsState.state
testInstance.InstanceId = aws.String(id)
testInstance.State = &ec2.InstanceState{Code: aws.Int64(awsState.id), Name: aws.String(awsState.state)}
instances := []*ec2.Instance{&terminatedInstance, &runningInstance}
awsServices.instances = append(awsServices.instances, instances...)
awsServices.instances = append(awsDefaultInstances, &testInstance)
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
return
}
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
return
}
instance, err := c.findInstanceByNodeName(nodeName)
resultInstance, err := c.findInstanceByNodeName(nodeName)
if err != nil {
t.Errorf("Failed to find instance: %v", err)
return
}
if *instance.InstanceId != "i-running" {
t.Errorf("Expected running instance but got %v", *instance.InstanceId)
if awsState.expected {
if err != nil || resultInstance == nil {
t.Errorf("Expected to find instance %v", *testInstance.InstanceId)
return
}
if *resultInstance.InstanceId != *testInstance.InstanceId {
t.Errorf("Wrong instance returned by findInstanceByNodeName() expected: %v, actual: %v", *testInstance.InstanceId, *resultInstance.InstanceId)
return
}
} else {
if err == nil && resultInstance != nil {
t.Errorf("Did not expect to find instance %v", *resultInstance.InstanceId)
return
}
}
}
}
func TestGetInstanceByNodeNameBatching(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
assert.Nil(t, err, "Error building aws cloud: %v", err)
var tag ec2.Tag
tag.Key = aws.String(TagNameKubernetesClusterPrefix + TestClusterId)
tag.Key = aws.String(TagNameKubernetesClusterPrefix + TestClusterID)
tag.Value = aws.String("")
tags := []*ec2.Tag{&tag}
nodeNames := []string{}
@ -852,8 +875,8 @@ func TestGetInstanceByNodeNameBatching(t *testing.T) {
nodeName := fmt.Sprintf("ip-171-20-42-%d.ec2.internal", i)
nodeNames = append(nodeNames, nodeName)
ec2Instance := &ec2.Instance{}
instanceId := fmt.Sprintf("i-abcedf%d", i)
ec2Instance.InstanceId = aws.String(instanceId)
instanceID := fmt.Sprintf("i-abcedf%d", i)
ec2Instance.InstanceId = aws.String(instanceID)
ec2Instance.PrivateDnsName = aws.String(nodeName)
ec2Instance.State = &ec2.InstanceState{Code: aws.Int64(48), Name: aws.String("running")}
ec2Instance.Tags = tags
@ -862,24 +885,25 @@ func TestGetInstanceByNodeNameBatching(t *testing.T) {
}
instances, err := c.getInstancesByNodeNames(nodeNames)
assert.Nil(t, err, "Error getting instances by nodeNames %v: %v", nodeNames, err)
assert.NotEmpty(t, instances)
assert.Equal(t, 200, len(instances), "Expected 200 but got less")
}
func TestGetVolumeLabels(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
assert.Nil(t, err, "Error building aws cloud: %v", err)
volumeId := awsVolumeID("vol-VolumeId")
expectedVolumeRequest := &ec2.DescribeVolumesInput{VolumeIds: []*string{volumeId.awsString()}}
volumeID := EBSVolumeID("vol-VolumeId")
expectedVolumeRequest := &ec2.DescribeVolumesInput{VolumeIds: []*string{volumeID.awsString()}}
awsServices.ec2.(*MockedFakeEC2).On("DescribeVolumes", expectedVolumeRequest).Return([]*ec2.Volume{
{
VolumeId: volumeId.awsString(),
VolumeId: volumeID.awsString(),
AvailabilityZone: aws.String("us-east-1a"),
},
})
labels, err := c.GetVolumeLabels(KubernetesVolumeID("aws:///" + string(volumeId)))
labels, err := c.GetVolumeLabels(KubernetesVolumeID("aws:///" + string(volumeID)))
assert.Nil(t, err, "Error creating Volume %v", err)
assert.Equal(t, map[string]string{
@ -889,7 +913,7 @@ func TestGetVolumeLabels(t *testing.T) {
}
func TestDescribeLoadBalancerOnDelete(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
@ -897,7 +921,7 @@ func TestDescribeLoadBalancerOnDelete(t *testing.T) {
}
func TestDescribeLoadBalancerOnUpdate(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
@ -905,7 +929,7 @@ func TestDescribeLoadBalancerOnUpdate(t *testing.T) {
}
func TestDescribeLoadBalancerOnGet(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
@ -913,7 +937,7 @@ func TestDescribeLoadBalancerOnGet(t *testing.T) {
}
func TestDescribeLoadBalancerOnEnsure(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
@ -1104,7 +1128,7 @@ func TestGetLoadBalancerAdditionalTags(t *testing.T) {
{
Annotations: map[string]string{
ServiceAnnotationLoadBalancerAdditionalTags: "Key1=, Key2=Val2",
"anotherKey": "anotherValue",
"anotherKey": "anotherValue",
},
Tags: map[string]string{
"Key1": "",
@ -1145,7 +1169,7 @@ func TestGetLoadBalancerAdditionalTags(t *testing.T) {
}
func TestLBExtraSecurityGroupsAnnotation(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
sg1 := map[string]string{ServiceAnnotationLoadBalancerExtraSecurityGroups: "sg-000001"}
@ -1164,7 +1188,7 @@ func TestLBExtraSecurityGroupsAnnotation(t *testing.T) {
{"Multiple SGs specified", sg3, []string{sg1[ServiceAnnotationLoadBalancerExtraSecurityGroups], sg2[ServiceAnnotationLoadBalancerExtraSecurityGroups]}},
}
awsServices.ec2.(*MockedFakeEC2).expectDescribeSecurityGroups(TestClusterId, "k8s-elb-aid", "cluster.test")
awsServices.ec2.(*MockedFakeEC2).expectDescribeSecurityGroups(TestClusterID, "k8s-elb-aid")
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
@ -1179,10 +1203,42 @@ func TestLBExtraSecurityGroupsAnnotation(t *testing.T) {
}
}
func TestLBSecurityGroupsAnnotation(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
sg1 := map[string]string{ServiceAnnotationLoadBalancerSecurityGroups: "sg-000001"}
sg2 := map[string]string{ServiceAnnotationLoadBalancerSecurityGroups: "sg-000002"}
sg3 := map[string]string{ServiceAnnotationLoadBalancerSecurityGroups: "sg-000001, sg-000002"}
tests := []struct {
name string
annotations map[string]string
expectedSGs []string
}{
{"SG specified", sg1, []string{sg1[ServiceAnnotationLoadBalancerSecurityGroups]}},
{"Multiple SGs specified", sg3, []string{sg1[ServiceAnnotationLoadBalancerSecurityGroups], sg2[ServiceAnnotationLoadBalancerSecurityGroups]}},
}
awsServices.ec2.(*MockedFakeEC2).expectDescribeSecurityGroups(TestClusterID, "k8s-elb-aid")
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
serviceName := types.NamespacedName{Namespace: "default", Name: "myservice"}
sgList, err := c.buildELBSecurityGroupList(serviceName, "aid", test.annotations)
assert.NoError(t, err, "buildELBSecurityGroupList failed")
assert.True(t, sets.NewString(test.expectedSGs...).Equal(sets.NewString(sgList...)),
"Security Groups expected=%q , returned=%q", test.expectedSGs, sgList)
})
}
}
// Test that we can add a load balancer tag
func TestAddLoadBalancerTags(t *testing.T) {
loadBalancerName := "test-elb"
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
want := make(map[string]string)
@ -1238,7 +1294,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
assert.Nil(t, err, "Error building aws cloud: %v", err)
expectedHC := *defaultHC
@ -1256,7 +1312,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
}
t.Run("does not make an API call if the current health check is the same", func(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
assert.Nil(t, err, "Error building aws cloud: %v", err)
expectedHC := *defaultHC
@ -1278,7 +1334,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
})
t.Run("validates resulting expected health check before making an API call", func(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
assert.Nil(t, err, "Error building aws cloud: %v", err)
expectedHC := *defaultHC
@ -1294,7 +1350,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
})
t.Run("handles invalid override values", func(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
assert.Nil(t, err, "Error building aws cloud: %v", err)
annotations := map[string]string{ServiceAnnotationLoadBalancerHCTimeout: "3.3"}
@ -1306,7 +1362,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
})
t.Run("returns error when updating the health check fails", func(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterId)
awsServices := newMockedFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
assert.Nil(t, err, "Error building aws cloud: %v", err)
returnErr := fmt.Errorf("throttling error")
@ -1342,6 +1398,38 @@ func TestFindSecurityGroupForInstanceMultipleTagged(t *testing.T) {
assert.Contains(t, err.Error(), "sg123(another_group)")
}
func TestCreateDisk(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
volumeOptions := &VolumeOptions{
AvailabilityZone: "us-east-1a",
CapacityGB: 10,
}
request := &ec2.CreateVolumeInput{
AvailabilityZone: aws.String("us-east-1a"),
Encrypted: aws.Bool(false),
VolumeType: aws.String(DefaultVolumeType),
Size: aws.Int64(10),
TagSpecifications: []*ec2.TagSpecification{
{ResourceType: aws.String(ec2.ResourceTypeVolume), Tags: []*ec2.Tag{
{Key: aws.String(TagNameKubernetesClusterLegacy), Value: aws.String(TestClusterID)},
{Key: aws.String(fmt.Sprintf("%s%s", TagNameKubernetesClusterPrefix, TestClusterID)), Value: aws.String(ResourceLifecycleOwned)},
}},
},
}
volume := &ec2.Volume{
AvailabilityZone: aws.String("us-east-1a"),
VolumeId: aws.String("vol-volumeId0"),
}
awsServices.ec2.(*MockedFakeEC2).On("CreateVolume", request).Return(volume, nil)
volumeID, err := c.CreateDisk(volumeOptions)
assert.Nil(t, err, "Error creating disk: %v", err)
assert.Equal(t, volumeID, KubernetesVolumeID("aws://us-east-1a/vol-volumeId0"))
awsServices.ec2.(*MockedFakeEC2).AssertExpectations(t)
}
func newMockedFakeAWSServices(id string) *FakeAWSServices {
s := NewFakeAWSServices(id)
s.ec2 = &MockedFakeEC2{FakeEC2Impl: s.ec2.(*FakeEC2Impl)}

View File

@ -18,6 +18,7 @@ package aws
import (
"github.com/aws/aws-sdk-go/aws"
"k8s.io/apimachinery/pkg/util/sets"
)

View File

@ -27,18 +27,20 @@ import (
// can be used for anything that DeviceAllocator user wants.
// Only the relevant part of device name should be in the map, e.g. "ba" for
// "/dev/xvdba".
type ExistingDevices map[mountDevice]awsVolumeID
type ExistingDevices map[mountDevice]EBSVolumeID
// On AWS, we should assign new (not yet used) device names to attached volumes.
// If we reuse a previously used name, we may get the volume "attaching" forever,
// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/.
// DeviceAllocator finds available device name, taking into account already
// assigned device names from ExistingDevices map. It tries to find the next
// device name to the previously assigned one (from previous DeviceAllocator
// call), so all available device names are used eventually and it minimizes
// device name reuse.
//
// All these allocations are in-memory, nothing is written to / read from
// /dev directory.
//
// On AWS, we should assign new (not yet used) device names to attached volumes.
// If we reuse a previously used name, we may get the volume "attaching" forever,
// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/.
type DeviceAllocator interface {
// GetNext returns a free device name or error when there is no free device
// name. Only the device suffix is returned, e.g. "ba" for "/dev/xvdba".
@ -74,9 +76,9 @@ func (p devicePairList) Len() int { return len(p) }
func (p devicePairList) Less(i, j int) bool { return p[i].deviceIndex < p[j].deviceIndex }
func (p devicePairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Allocates device names according to scheme ba..bz, ca..cz
// it moves along the ring and always picks next device until
// device list is exhausted.
// NewDeviceAllocator allocates device names according to scheme ba..bz, ca..cz
// it moves along the ring and always picks next device until device list is
// exhausted.
func NewDeviceAllocator() DeviceAllocator {
possibleDevices := make(map[mountDevice]int)
for _, firstChar := range []rune{'b', 'c'} {

View File

@ -19,15 +19,16 @@ package aws
import (
"fmt"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"regexp"
"sync"
"time"
)
// awsInstanceRegMatch represents Regex Match for AWS instance.
@ -109,12 +110,12 @@ func mapToAWSInstanceIDsTolerant(nodes []*v1.Node) []awsInstanceID {
var instanceIDs []awsInstanceID
for _, node := range nodes {
if node.Spec.ProviderID == "" {
glog.Warningf("node %q did not have ProviderID set", node.Name)
klog.Warningf("node %q did not have ProviderID set", node.Name)
continue
}
instanceID, err := kubernetesInstanceID(node.Spec.ProviderID).mapToAWSInstanceID()
if err != nil {
glog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name)
klog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name)
continue
}
instanceIDs = append(instanceIDs, instanceID)
@ -155,7 +156,7 @@ type instanceCache struct {
func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, error) {
now := time.Now()
glog.V(4).Infof("EC2 DescribeInstances - fetching all instances")
klog.V(4).Infof("EC2 DescribeInstances - fetching all instances")
filters := []*ec2.Filter{}
instances, err := c.cloud.describeInstances(filters)
@ -176,7 +177,7 @@ func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, e
if c.snapshot != nil && snapshot.olderThan(c.snapshot) {
// If this happens a lot, we could run this function in a mutex and only return one result
glog.Infof("Not caching concurrent AWS DescribeInstances results")
klog.Infof("Not caching concurrent AWS DescribeInstances results")
} else {
c.snapshot = snapshot
}
@ -209,7 +210,7 @@ func (c *instanceCache) describeAllInstancesCached(criteria cacheCriteria) (*all
return nil, err
}
} else {
glog.V(6).Infof("EC2 DescribeInstances - using cached results")
klog.V(6).Infof("EC2 DescribeInstances - using cached results")
}
return snapshot, nil
@ -235,7 +236,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool {
// Sub() is technically broken by time changes until we have monotonic time
now := time.Now()
if now.Sub(s.timestamp) > criteria.MaxAge {
glog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge)
klog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge)
return false
}
}
@ -243,7 +244,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool {
if len(criteria.HasInstances) != 0 {
for _, id := range criteria.HasInstances {
if nil == s.instances[id] {
glog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id)
klog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id)
return false
}
}

View File

@ -17,15 +17,17 @@ limitations under the License.
package aws
import (
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
"testing"
"time"
)
func TestParseInstance(t *testing.T) {
func TestMapToAWSInstanceIDs(t *testing.T) {
tests := []struct {
Kubernetes kubernetesInstanceID
Aws awsInstanceID

View File

@ -18,23 +18,23 @@ package aws
import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/golang/glog"
"k8s.io/klog"
)
// Handler for aws-sdk-go that logs all requests
func awsHandlerLogger(req *request.Request) {
service, name := awsServiceAndName(req)
glog.V(4).Infof("AWS request: %s %s", service, name)
klog.V(4).Infof("AWS request: %s %s", service, name)
}
func awsSendHandlerLogger(req *request.Request) {
service, name := awsServiceAndName(req)
glog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params)
klog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params)
}
func awsValidateResponseHandlerLogger(req *request.Request) {
service, name := awsServiceAndName(req)
glog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status)
klog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status)
}
func awsServiceAndName(req *request.Request) (string, string) {

View File

@ -17,18 +17,21 @@ limitations under the License.
package aws
import (
"github.com/golang/glog"
"sync"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/sets"
awscredentialprovider "k8s.io/kubernetes/pkg/credentialprovider/aws"
"sync"
)
// WellKnownRegions is the complete list of regions known to the AWS cloudprovider
// wellKnownRegions is the complete list of regions known to the AWS cloudprovider
// and credentialprovider.
var WellKnownRegions = [...]string{
var wellKnownRegions = [...]string{
// from `aws ec2 describe-regions --region us-east-1 --query Regions[].RegionName | sort`
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
@ -36,6 +39,7 @@ var WellKnownRegions = [...]string{
"eu-central-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"sa-east-1",
"us-east-1",
"us-east-2",
@ -44,6 +48,7 @@ var WellKnownRegions = [...]string{
// these are not registered in many / most accounts
"cn-north-1",
"cn-northwest-1",
"us-gov-west-1",
}
@ -53,12 +58,12 @@ var awsRegionsMutex sync.Mutex
// awsRegions is a set of recognized regions
var awsRegions sets.String
// RecognizeRegion is called for each AWS region we know about.
// recognizeRegion is called for each AWS region we know about.
// It currently registers a credential provider for that region.
// There are two paths to discovering a region:
// * we hard-code some well-known regions
// * if a region is discovered from instance metadata, we add that
func RecognizeRegion(region string) {
func recognizeRegion(region string) {
awsRegionsMutex.Lock()
defer awsRegionsMutex.Unlock()
@ -67,21 +72,21 @@ func RecognizeRegion(region string) {
}
if awsRegions.Has(region) {
glog.V(6).Infof("found AWS region %q again - ignoring", region)
klog.V(6).Infof("found AWS region %q again - ignoring", region)
return
}
glog.V(4).Infof("found AWS region %q", region)
klog.V(4).Infof("found AWS region %q", region)
awscredentialprovider.RegisterCredentialsProvider(region)
awsRegions.Insert(region)
}
// RecognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion
func RecognizeWellKnownRegions() {
for _, region := range WellKnownRegions {
RecognizeRegion(region)
// recognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion
func recognizeWellKnownRegions() {
for _, region := range wellKnownRegions {
recognizeRegion(region)
}
}

View File

@ -22,7 +22,7 @@ import (
// TestRegions does basic checking of region verification / addition
func TestRegions(t *testing.T) {
RecognizeWellKnownRegions()
recognizeWellKnownRegions()
tests := []struct {
Add string
@ -55,7 +55,7 @@ func TestRegions(t *testing.T) {
for _, test := range tests {
if test.Add != "" {
RecognizeRegion(test.Add)
recognizeRegion(test.Add)
}
if test.Lookup != "" {
@ -73,7 +73,7 @@ func TestRecognizesNewRegion(t *testing.T) {
t.Fatalf("region already valid: %q", region)
}
awsServices := NewFakeAWSServices(TestClusterId).WithAz(region + "a")
awsServices := NewFakeAWSServices(TestClusterID).WithAz(region + "a")
_, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("error building AWS cloud: %v", err)

View File

@ -24,7 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -40,19 +40,19 @@ type CrossRequestRetryDelay struct {
backoff Backoff
}
// Create a new CrossRequestRetryDelay
// NewCrossRequestRetryDelay creates a new CrossRequestRetryDelay
func NewCrossRequestRetryDelay() *CrossRequestRetryDelay {
c := &CrossRequestRetryDelay{}
c.backoff.init(decayIntervalSeconds, decayFraction, maxDelay)
return c
}
// Added to the Sign chain; called before each request
// BeforeSign is added to the Sign chain; called before each request
func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) {
now := time.Now()
delay := c.backoff.ComputeDelayForRequest(now)
if delay > 0 {
glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s",
klog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s",
describeRequest(r), delay.String())
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
@ -84,7 +84,7 @@ func describeRequest(r *request.Request) string {
return service + "::" + operationName(r)
}
// Added to the AfterRetry chain; called after any error
// AfterRetry is added to the AfterRetry chain; called after any error
func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) {
if r.Error == nil {
return
@ -96,7 +96,7 @@ func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) {
if awsError.Code() == "RequestLimitExceeded" {
c.backoff.ReportError()
recordAWSThrottlesMetric(operationName(r))
glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)",
klog.Warningf("Got RequestLimitExceeded error on AWS request (%s)",
describeRequest(r))
}
}
@ -126,7 +126,8 @@ func (b *Backoff) init(decayIntervalSeconds int, decayFraction float64, maxDelay
b.maxDelay = maxDelay
}
// Computes the delay required for a request, also updating internal state to count this request
// ComputeDelayForRequest computes the delay required for a request, also
// updates internal state to count this request
func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration {
b.mutex.Lock()
defer b.mutex.Unlock()
@ -165,7 +166,7 @@ func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration {
return time.Second * time.Duration(int(delay.Seconds()))
}
// Called when we observe a throttling error
// ReportError is called when we observe a throttling error
func (b *Backoff) ReportError() {
b.mutex.Lock()
defer b.mutex.Unlock()

View File

@ -23,8 +23,10 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
)
// IPPermissionSet maps IP strings of strings to EC2 IpPermissions
type IPPermissionSet map[string]*ec2.IpPermission
// NewIPPermissionSet creates a new IPPermissionSet
func NewIPPermissionSet(items ...*ec2.IpPermission) IPPermissionSet {
s := make(IPPermissionSet)
s.Insert(items...)
@ -97,10 +99,10 @@ func (s IPPermissionSet) List() []*ec2.IpPermission {
return res
}
// IsSuperset returns true if and only if s1 is a superset of s2.
func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool {
// IsSuperset returns true if and only if s is a superset of s2.
func (s IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool {
for k := range s2 {
_, found := s1[k]
_, found := s[k]
if !found {
return false
}
@ -108,11 +110,11 @@ func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool {
return true
}
// Equal returns true if and only if s1 is equal (as a set) to s2.
// Equal returns true if and only if s is equal (as a set) to s2.
// Two sets are equal if their membership is identical.
// (In practice, this means same elements, order doesn't matter)
func (s1 IPPermissionSet) Equal(s2 IPPermissionSet) bool {
return len(s1) == len(s2) && s1.IsSuperset(s2)
func (s IPPermissionSet) Equal(s2 IPPermissionSet) bool {
return len(s) == len(s2) && s.IsSuperset(s2)
}
// Difference returns a set of objects that are not in s2

View File

@ -18,12 +18,12 @@ package aws
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/wait"
)
@ -38,6 +38,7 @@ const TagNameKubernetesClusterPrefix = "kubernetes.io/cluster/"
// did not allow shared resources.
const TagNameKubernetesClusterLegacy = "KubernetesCluster"
// ResourceLifecycle is the cluster lifecycle state used in tagging
type ResourceLifecycle string
const (
@ -73,7 +74,7 @@ func (t *awsTagging) init(legacyClusterID string, clusterID string) error {
t.ClusterID = clusterID
if clusterID != "" {
glog.Infof("AWS cloud filtering on ClusterID: %v", clusterID)
klog.Infof("AWS cloud filtering on ClusterID: %v", clusterID)
} else {
return fmt.Errorf("AWS cloud failed to find ClusterID")
}
@ -91,7 +92,7 @@ func (t *awsTagging) initFromTags(tags []*ec2.Tag) error {
}
if legacyClusterID == "" && newClusterID == "" {
glog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...")
klog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...")
}
return t.init(legacyClusterID, newClusterID)
@ -152,13 +153,13 @@ func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool {
// Ensure that a resource has the correct tags
// If it has no tags, we assume that this was a problem caused by an error in between creation and tagging,
// and we add the tags. If it has a different cluster's tags, that is an error.
func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string, observedTags []*ec2.Tag) error {
func (t *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string, observedTags []*ec2.Tag) error {
actualTagMap := make(map[string]string)
for _, tag := range observedTags {
actualTagMap[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value)
}
expectedTags := c.buildTags(lifecycle, additionalTags)
expectedTags := t.buildTags(lifecycle, additionalTags)
addTags := make(map[string]string)
for k, expected := range expectedTags {
@ -167,7 +168,7 @@ func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy
continue
}
if actual == "" {
glog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected)
klog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected)
addTags[k] = expected
} else {
return fmt.Errorf("resource %q has tag belonging to another cluster: %q=%q (expected %q)", resourceID, k, actual, expected)
@ -178,7 +179,7 @@ func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy
return nil
}
if err := c.createTags(client, resourceID, lifecycle, addTags); err != nil {
if err := t.createTags(client, resourceID, lifecycle, addTags); err != nil {
return fmt.Errorf("error adding missing tags to resource %q: %q", resourceID, err)
}
@ -222,7 +223,7 @@ func (t *awsTagging) createTags(client EC2, resourceID string, lifecycle Resourc
// We could check that the error is retryable, but the error code changes based on what we are tagging
// SecurityGroup: InvalidGroup.NotFound
glog.V(2).Infof("Failed to create tags; will retry. Error was %q", err)
klog.V(2).Infof("Failed to create tags; will retry. Error was %q", err)
lastErr = err
return false, nil
})

View File

@ -17,20 +17,21 @@ limitations under the License.
package aws
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"testing"
)
func TestFilterTags(t *testing.T) {
awsServices := NewFakeAWSServices(TestClusterId)
awsServices := NewFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
return
}
if c.tagging.ClusterID != TestClusterId {
if c.tagging.ClusterID != TestClusterID {
t.Errorf("unexpected ClusterID: %v", c.tagging.ClusterID)
}
}
@ -57,6 +58,12 @@ func TestFindClusterID(t *testing.T) {
},
ExpectedNew: "a",
},
{
Tags: map[string]string{
TagNameKubernetesClusterPrefix + "a": "shared",
},
ExpectedNew: "a",
},
{
Tags: map[string]string{
TagNameKubernetesClusterPrefix + "a": "",
@ -108,3 +115,68 @@ func TestFindClusterID(t *testing.T) {
}
}
}
func TestHasClusterTag(t *testing.T) {
awsServices := NewFakeAWSServices(TestClusterID)
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
return
}
grid := []struct {
Tags map[string]string
Expected bool
}{
{
Tags: map[string]string{},
},
{
Tags: map[string]string{
TagNameKubernetesClusterLegacy: TestClusterID,
},
Expected: true,
},
{
Tags: map[string]string{
TagNameKubernetesClusterLegacy: "a",
},
Expected: false,
},
{
Tags: map[string]string{
TagNameKubernetesClusterPrefix + TestClusterID: "owned",
},
Expected: true,
},
{
Tags: map[string]string{
TagNameKubernetesClusterPrefix + TestClusterID: "",
},
Expected: true,
},
{
Tags: map[string]string{
TagNameKubernetesClusterLegacy: "a",
TagNameKubernetesClusterPrefix + TestClusterID: "shared",
},
Expected: true,
},
{
Tags: map[string]string{
TagNameKubernetesClusterPrefix + TestClusterID: "shared",
TagNameKubernetesClusterPrefix + "b": "shared",
},
Expected: true,
},
}
for _, g := range grid {
var ec2Tags []*ec2.Tag
for k, v := range g.Tags {
ec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)})
}
result := c.tagging.hasClusterTag(ec2Tags)
if result != g.Expected {
t.Errorf("Unexpected result for tags %v: %t", g.Tags, result)
}
}
}

View File

@ -24,21 +24,22 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
)
// awsVolumeRegMatch represents Regex Match for AWS volume.
var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$")
// awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678
// The "traditional" format is "vol-12345678"
// A new longer format is also being introduced: "vol-12345678abcdef01"
// We should not assume anything about the length or format, though it seems
// reasonable to assume that volumes will continue to start with "vol-".
type awsVolumeID string
// EBSVolumeID represents the ID of the volume in the AWS API, e.g.
// vol-12345678 The "traditional" format is "vol-12345678" A new longer format
// is also being introduced: "vol-12345678abcdef01" We should not assume
// anything about the length or format, though it seems reasonable to assume
// that volumes will continue to start with "vol-".
type EBSVolumeID string
func (i awsVolumeID) awsString() *string {
func (i EBSVolumeID) awsString() *string {
return aws.String(string(i))
}
@ -59,8 +60,8 @@ type diskInfo struct {
disk *awsDisk
}
// MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) {
// MapToAWSVolumeID extracts the EBSVolumeID from the KubernetesVolumeID
func (name KubernetesVolumeID) MapToAWSVolumeID() (EBSVolumeID, error) {
// name looks like aws://availability-zone/awsVolumeId
// The original idea of the URL-style name was to put the AZ into the
@ -96,9 +97,10 @@ func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) {
return "", fmt.Errorf("Invalid format for AWS volume (%s)", name)
}
return awsVolumeID(awsID), nil
return EBSVolumeID(awsID), nil
}
// GetAWSVolumeID converts a Kubernetes volume ID to an AWS volume ID
func GetAWSVolumeID(kubeVolumeID string) (string, error) {
kid := KubernetesVolumeID(kubeVolumeID)
awsID, err := kid.MapToAWSVolumeID()
@ -119,7 +121,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type
info, err := disk.describeVolume()
if err != nil {
glog.Warning("Error describing volume %s with %v", diskName, err)
klog.Warningf("Error describing volume %s with %v", diskName, err)
awsDiskInfo.volumeState = "unknown"
return awsDiskInfo, false, err
}
@ -136,7 +138,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type
// has been deleted
if err != nil {
fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName)
glog.Warning(fetchErr)
klog.Warning(fetchErr)
return awsDiskInfo, false, fetchErr
}

View File

@ -37,33 +37,39 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
deps = [
"//pkg/api/v1/service:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/version:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
],
)
@ -72,6 +78,7 @@ go_test(
srcs = [
"azure_backoff_test.go",
"azure_cache_test.go",
"azure_instances_test.go",
"azure_loadbalancer_test.go",
"azure_metrics_test.go",
"azure_routes_test.go",
@ -82,22 +89,24 @@ go_test(
"azure_vmss_cache_test.go",
"azure_vmss_test.go",
"azure_wrap_test.go",
"azure_zones_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/api/v1/service:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)

View File

@ -1,16 +1,13 @@
approvers:
- andyzhangx
- brendandburns
- colemickens
- feiskyer
- jdumars
- karataliu
- khenidak
reviewers:
- andyzhangx
- brendandburns
- colemickens
- feiskyer
- jdumars
- justaugustus
- karataliu
- khenidak

View File

@ -8,8 +8,8 @@ go_library(
deps = [
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -24,8 +24,8 @@ import (
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/golang/glog"
"golang.org/x/crypto/pkcs12"
"k8s.io/klog"
)
// AzureAuthConfig holds auth related part of cloud config
@ -44,30 +44,41 @@ type AzureAuthConfig struct {
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
// Use managed service identity for the virtual machine to access Azure ARM APIs
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
// UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used.
// More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview
// For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true.
UserAssignedIdentityID string `json:"userAssignedIdentityID" yaml:"userAssignedIdentityID"`
// The ID of the Azure Subscription that the cluster is deployed in
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
}
// GetServicePrincipalToken creates a new service principal token based on the configuration
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
if err != nil {
return nil, fmt.Errorf("creating the OAuth config: %v", err)
}
if config.UseManagedIdentityExtension {
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
klog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
msiEndpoint, err := adal.GetMSIVMEndpoint()
if err != nil {
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
}
if len(config.UserAssignedIdentityID) > 0 {
klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token")
return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint,
env.ServiceManagementEndpoint,
config.UserAssignedIdentityID)
}
klog.V(4).Info("azure: using System Assigned MSI to retrieve access token")
return adal.NewServicePrincipalTokenFromMSI(
msiEndpoint,
env.ServiceManagementEndpoint)
}
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
if err != nil {
return nil, fmt.Errorf("creating the OAuth config: %v", err)
}
if len(config.AADClientSecret) > 0 {
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
return adal.NewServicePrincipalToken(
*oauthConfig,
config.AADClientID,
@ -76,7 +87,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (
}
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
certData, err := ioutil.ReadFile(config.AADClientCertPath)
if err != nil {
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)

View File

@ -21,19 +21,28 @@ import (
"io"
"io/ioutil"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
"k8s.io/kubernetes/pkg/controller"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/version"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/ghodss/yaml"
"github.com/golang/glog"
"k8s.io/klog"
"sigs.k8s.io/yaml"
)
const (
@ -52,6 +61,9 @@ const (
loadBalancerSkuBasic = "basic"
loadBalancerSkuStandard = "standard"
externalResourceGroupLabel = "kubernetes.azure.com/resource-group"
managedByAzureLabel = "kubernetes.azure.com/managed"
)
var (
@ -59,6 +71,9 @@ var (
defaultExcludeMasterFromStandardLB = true
)
// Azure implements PVLabeler.
var _ cloudprovider.PVLabeler = (*Cloud)(nil)
// Config holds the configuration parsed from the --cloud-config flag
// All fields are required unless otherwise specified
type Config struct {
@ -144,13 +159,37 @@ type Cloud struct {
DisksClient DisksClient
FileClient FileClient
resourceRequestBackoff wait.Backoff
metadata *InstanceMetadata
metadata *InstanceMetadataService
vmSet VMSet
// Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes.
nodeCachesLock sync.Mutex
// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
// it is updated by the nodeInformer
nodeZones map[string]sets.String
// nodeResourceGroups holds nodes external resource groups
nodeResourceGroups map[string]string
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
unmanagedNodes sets.String
// nodeInformerSynced is for determining if the informer has synced.
nodeInformerSynced cache.InformerSynced
// routeCIDRsLock holds lock for routeCIDRs cache.
routeCIDRsLock sync.Mutex
// routeCIDRs holds cache for route CIDRs.
routeCIDRs map[string]string
// Clients for vmss.
VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient
VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient
// client for vm sizes list
VirtualMachineSizesClient VirtualMachineSizesClient
kubeClient clientset.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
vmCache *timedCache
lbCache *timedCache
nsgCache *timedCache
@ -216,11 +255,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
config.CloudProviderRateLimitQPSWrite,
config.CloudProviderRateLimitBucketWrite)
glog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
klog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
config.CloudProviderRateLimitQPS,
config.CloudProviderRateLimitBucket)
glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
klog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
config.CloudProviderRateLimitQPSWrite,
config.CloudProviderRateLimitBucketWrite)
}
@ -238,8 +277,12 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
rateLimiterWriter: operationPollRateLimiterWrite,
}
az := Cloud{
Config: *config,
Environment: *env,
Config: *config,
Environment: *env,
nodeZones: map[string]sets.String{},
nodeResourceGroups: map[string]string{},
unmanagedNodes: sets.NewString(),
routeCIDRs: map[string]string{},
DisksClient: newAzDisksClient(azClientConfig),
RoutesClient: newAzRoutesClient(azClientConfig),
@ -251,6 +294,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
StorageAccountClient: newAzStorageAccountClient(azClientConfig),
VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig),
PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig),
VirtualMachineSizesClient: newAzVirtualMachineSizesClient(azClientConfig),
VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig),
VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig),
FileClient: &azureFileClient{env: *env},
@ -277,14 +321,17 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
Duration: time.Duration(az.CloudProviderBackoffDuration) * time.Second,
Jitter: az.CloudProviderBackoffJitter,
}
glog.V(2).Infof("Azure cloudprovider using retry backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
az.CloudProviderBackoffRetries,
az.CloudProviderBackoffExponent,
az.CloudProviderBackoffDuration,
az.CloudProviderBackoffJitter)
}
az.metadata = NewInstanceMetadata()
az.metadata, err = NewInstanceMetadataService(metadataURL)
if err != nil {
return nil, err
}
if az.MaximumLoadBalancerRuleCount == 0 {
az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
@ -346,7 +393,12 @@ func parseConfig(configReader io.Reader) (*Config, error) {
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider")
az.eventBroadcaster = record.NewBroadcaster()
az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")})
az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"})
}
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
@ -424,3 +476,194 @@ func initDiskControllers(az *Cloud) error {
return nil
}
// SetInformers sets informers for Azure cloud provider.
func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
klog.Infof("Setting up informers for Azure cloud provider")
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
node := obj.(*v1.Node)
az.updateNodeCaches(nil, node)
},
UpdateFunc: func(prev, obj interface{}) {
prevNode := prev.(*v1.Node)
newNode := obj.(*v1.Node)
if newNode.Labels[kubeletapis.LabelZoneFailureDomain] ==
prevNode.Labels[kubeletapis.LabelZoneFailureDomain] {
return
}
az.updateNodeCaches(prevNode, newNode)
},
DeleteFunc: func(obj interface{}) {
node, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *v1.Node here
// and we need to handle that correctly.
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
}
az.updateNodeCaches(node, nil)
},
})
az.nodeInformerSynced = nodeInformer.HasSynced
}
// updateNodeCaches updates local cache for node's zones and external resource groups.
func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if prevNode != nil {
// Remove from nodeZones cache.
prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
if ok && az.isAvailabilityZone(prevZone) {
az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
if az.nodeZones[prevZone].Len() == 0 {
az.nodeZones[prevZone] = nil
}
}
// Remove from nodeResourceGroups cache.
_, ok = prevNode.ObjectMeta.Labels[externalResourceGroupLabel]
if ok {
delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name)
}
// Remove from unmanagedNodes cache.
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" {
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
}
}
if newNode != nil {
// Add to nodeZones cache.
newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
if ok && az.isAvailabilityZone(newZone) {
if az.nodeZones[newZone] == nil {
az.nodeZones[newZone] = sets.NewString()
}
az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
}
// Add to nodeResourceGroups cache.
newRG, ok := newNode.ObjectMeta.Labels[externalResourceGroupLabel]
if ok && len(newRG) > 0 {
az.nodeResourceGroups[newNode.ObjectMeta.Name] = newRG
}
// Add to unmanagedNodes cache.
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" {
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
}
}
}
// GetActiveZones returns all the zones in which k8s nodes are currently running.
func (az *Cloud) GetActiveZones() (sets.String, error) {
if az.nodeInformerSynced == nil {
return nil, fmt.Errorf("Azure cloud provider doesn't have informers set")
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones")
}
zones := sets.NewString()
for zone, nodes := range az.nodeZones {
if len(nodes) > 0 {
zones.Insert(zone)
}
}
return zones, nil
}
// GetLocation returns the location in which k8s cluster is currently running.
func (az *Cloud) GetLocation() string {
return az.Location
}
// GetNodeResourceGroup gets resource group for given node.
func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) {
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
if az.nodeInformerSynced == nil {
return az.ResourceGroup, nil
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return "", fmt.Errorf("node informer is not synced when trying to GetNodeResourceGroup")
}
// Return external resource group if it has been cached.
if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok {
return cachedRG, nil
}
// Return resource group from cloud provider options.
return az.ResourceGroup, nil
}
// GetResourceGroups returns a set of resource groups that all nodes are running on.
func (az *Cloud) GetResourceGroups() (sets.String, error) {
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
if az.nodeInformerSynced == nil {
return sets.NewString(az.ResourceGroup), nil
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups")
}
resourceGroups := sets.NewString(az.ResourceGroup)
for _, rg := range az.nodeResourceGroups {
resourceGroups.Insert(rg)
}
return resourceGroups, nil
}
// GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes).
func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
// Kubelet won't set az.nodeInformerSynced, always return nil.
if az.nodeInformerSynced == nil {
return nil, nil
}
az.nodeCachesLock.Lock()
defer az.nodeCachesLock.Unlock()
if !az.nodeInformerSynced() {
return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes")
}
return sets.NewString(az.unmanagedNodes.List()...), nil
}
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
labels := node.ObjectMeta.Labels
if rg, ok := labels[externalResourceGroupLabel]; ok && rg != az.ResourceGroup {
return true
}
if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" {
return true
}
return false
}

View File

@ -18,16 +18,18 @@ package azure
import (
"context"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)
// requestBackoff if backoff is disabled in cloud provider it
@ -45,6 +47,13 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
return resourceRequestBackoff
}
// Event creates a event for the specified object.
func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) {
if obj != nil && reason != "" {
az.eventRecorder.Event(obj, eventtype, reason, message)
}
}
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
var machine compute.VirtualMachine
@ -55,10 +64,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
return true, cloudprovider.InstanceNotFound
}
if retryErr != nil {
glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
return false, nil
}
glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
return true, nil
})
if err == wait.ErrWaitTimeout {
@ -69,20 +78,20 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
}
// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) {
func (az *Cloud) VirtualMachineClientListWithRetry(resourceGroup string) ([]compute.VirtualMachine, error) {
allNodes := []compute.VirtualMachine{}
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
var retryErr error
ctx, cancel := getContextWithCancel()
defer cancel()
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, az.ResourceGroup)
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, resourceGroup)
if retryErr != nil {
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
az.ResourceGroup,
klog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
resourceGroup,
retryErr)
return false, retryErr
}
glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", az.ResourceGroup)
klog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup)
return true, nil
})
if err != nil {
@ -99,24 +108,24 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
var retryErr error
ip, publicIP, retryErr = az.getIPForMachine(name)
if retryErr != nil {
glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
return false, nil
}
glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
klog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
return true, nil
})
return ip, publicIP, err
}
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
func (az *Cloud) CreateOrUpdateSGWithRetry(service *v1.Service, sg network.SecurityGroup) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
done, err := processHTTPRetryResponse(resp, err)
klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateSecurityGroup", resp, err)
if done && err == nil {
// Invalidate the cache right after updating
az.nsgCache.Delete(*sg.Name)
@ -126,14 +135,14 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
}
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
func (az *Cloud) CreateOrUpdateLBWithRetry(service *v1.Service, lb network.LoadBalancer) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
done, err := processHTTPRetryResponse(resp, err)
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err)
if done && err == nil {
// Invalidate the cache right after updating
az.lbCache.Delete(*lb.Name)
@ -143,7 +152,7 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
}
// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
func (az *Cloud) ListLBWithRetry(service *v1.Service) ([]network.LoadBalancer, error) {
var allLBs []network.LoadBalancer
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
@ -153,12 +162,13 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup)
if retryErr != nil {
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", retryErr.Error())
klog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
az.ResourceGroup,
retryErr)
return false, retryErr
}
glog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup)
klog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup)
return true, nil
})
if err != nil {
@ -169,7 +179,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
}
// ListPIPWithRetry list the PIP resources in the given resource group
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
func (az *Cloud) ListPIPWithRetry(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
var allPIPs []network.PublicIPAddress
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
@ -179,12 +189,13 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
if retryErr != nil {
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
az.Event(service, v1.EventTypeWarning, "ListPublicIPs", retryErr.Error())
klog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
pipResourceGroup,
retryErr)
return false, retryErr
}
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
klog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
return true, nil
})
if err != nil {
@ -195,48 +206,48 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
}
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
func (az *Cloud) CreateOrUpdatePIPWithRetry(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
return processHTTPRetryResponse(resp, err)
klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
return az.processHTTPRetryResponse(service, "CreateOrUpdatePublicIPAddress", resp, err)
})
}
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(service *v1.Service, nic network.Interface) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
return processHTTPRetryResponse(resp, err)
klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
return az.processHTTPRetryResponse(service, "CreateOrUpdateInterface", resp, err)
})
}
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
func (az *Cloud) DeletePublicIPWithRetry(service *v1.Service, pipResourceGroup string, pipName string) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
return processHTTPRetryResponse(resp, err)
return az.processHTTPRetryResponse(service, "DeletePublicIPAddress", resp, err)
})
}
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
func (az *Cloud) DeleteLBWithRetry(service *v1.Service, lbName string) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
done, err := processHTTPRetryResponse(resp, err)
done, err := az.processHTTPRetryResponse(service, "DeleteLoadBalancer", resp, err)
if done && err == nil {
// Invalidate the cache right after deleting
az.lbCache.Delete(lbName)
@ -252,7 +263,7 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable
defer cancel()
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
return processHTTPRetryResponse(resp, err)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
@ -263,8 +274,8 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
defer cancel()
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
return processHTTPRetryResponse(resp, err)
klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
@ -275,20 +286,20 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
defer cancel()
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
return processHTTPRetryResponse(resp, err)
klog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.ResourceGroup, vmName, newVM)
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
return processHTTPRetryResponse(resp, err)
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM)
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
@ -296,40 +307,13 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualM
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
return processHTTPRetryResponse(resp, err)
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
if isSuccessHTTPResponse(resp) {
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
return true, nil
}
if shouldRetryAPIRequest(resp, err) {
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
// suppress the error object so that backoff process continues
return false, nil
}
// Fall-through: stop periodic backoff
return true, nil
}
// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior
func shouldRetryAPIRequest(resp autorest.Response, err error) bool {
if err != nil {
return true
}
// HTTP 4xx or 5xx suggests we should retry
if 399 < resp.StatusCode && resp.StatusCode < 600 {
return true
}
return false
}
// isSuccessHTTPResponse determines if the response from an HTTP request suggests success
func isSuccessHTTPResponse(resp autorest.Response) bool {
func isSuccessHTTPResponse(resp http.Response) bool {
// HTTP 2xx suggests a successful response
if 199 < resp.StatusCode && resp.StatusCode < 300 {
return true
@ -352,16 +336,21 @@ func shouldRetryHTTPRequest(resp *http.Response, err error) bool {
return false
}
func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
if resp != nil {
func (az *Cloud) processHTTPRetryResponse(service *v1.Service, reason string, resp *http.Response, err error) (bool, error) {
if resp != nil && isSuccessHTTPResponse(*resp) {
// HTTP 2xx suggests a successful response
if 199 < resp.StatusCode && resp.StatusCode < 300 {
return true, nil
}
return true, nil
}
if shouldRetryHTTPRequest(resp, err) {
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
if err != nil {
az.Event(service, v1.EventTypeWarning, reason, err.Error())
klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err)
} else {
az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode))
klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode)
}
// suppress the error object so that backoff process continues
return false, nil
}

View File

@ -20,11 +20,9 @@ import (
"fmt"
"net/http"
"testing"
"github.com/Azure/go-autorest/autorest"
)
func TestShouldRetry(t *testing.T) {
func TestShouldRetryHTTPRequest(t *testing.T) {
tests := []struct {
code int
err error
@ -54,12 +52,10 @@ func TestShouldRetry(t *testing.T) {
}
for _, test := range tests {
resp := autorest.Response{
Response: &http.Response{
StatusCode: test.code,
},
resp := &http.Response{
StatusCode: test.code,
}
res := shouldRetryAPIRequest(resp, test.err)
res := shouldRetryHTTPRequest(resp, test.err)
if res != test.expected {
t.Errorf("expected: %v, saw: %v", test.expected, res)
}
@ -86,10 +82,8 @@ func TestIsSuccessResponse(t *testing.T) {
}
for _, test := range tests {
resp := autorest.Response{
Response: &http.Response{
StatusCode: test.code,
},
resp := http.Response{
StatusCode: test.code,
}
res := isSuccessHTTPResponse(resp)
if res != test.expected {
@ -99,6 +93,7 @@ func TestIsSuccessResponse(t *testing.T) {
}
func TestProcessRetryResponse(t *testing.T) {
az := &Cloud{}
tests := []struct {
code int
err error
@ -132,12 +127,10 @@ func TestProcessRetryResponse(t *testing.T) {
}
for _, test := range tests {
resp := autorest.Response{
Response: &http.Response{
StatusCode: test.code,
},
resp := &http.Response{
StatusCode: test.code,
}
res, err := processRetryResponse(resp, test.err)
res, err := az.processHTTPRetryResponse(nil, "", resp, test.err)
if res != test.stop {
t.Errorf("expected: %v, saw: %v", test.stop, res)
}

View File

@ -27,11 +27,12 @@ import (
"sync/atomic"
"time"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
azstorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog"
"github.com/rubiojr/go-vhd/vhd"
"k8s.io/klog"
kwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume"
)
@ -67,7 +68,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
// get accounts
accounts, err := c.getAllStorageAccounts()
if err != nil {
glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
klog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
c.accounts = make(map[string]*storageAccountState)
return &c, nil
}
@ -79,7 +80,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
// fits storage type and location.
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix)
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, string(defaultStorageAccountKind), c.common.resourceGroup, location, dedicatedDiskAccountNamePrefix)
if err != nil {
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
@ -96,24 +97,24 @@ func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, lo
return "", "", 0, err
}
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
klog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
return diskName, diskURI, requestGB, err
}
// DeleteVolume deletes a VHD blob
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
klog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
if err != nil {
return fmt.Errorf("failed to parse vhd URI %v", err)
}
key, err := c.common.cloud.getStorageAccesskey(accountName)
key, err := c.common.cloud.getStorageAccesskey(accountName, c.common.resourceGroup)
if err != nil {
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
}
err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
if err != nil {
glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
klog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
detail := err.Error()
if strings.Contains(detail, errLeaseIDMissing) {
// disk is still being used
@ -122,7 +123,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error {
}
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
}
glog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
klog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
return nil
}
@ -152,7 +153,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC
tags := make(map[string]string)
tags["createdby"] = "k8sAzureDataDisk"
glog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
klog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
blob := container.GetBlobReference(vhdName)
blob.Properties.ContentLength = vhdSize
@ -184,7 +185,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC
End: uint64(vhdSize - 1),
}
if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
klog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
vhdName, containerName, accountName, err.Error())
return "", "", err
}
@ -214,7 +215,7 @@ func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName str
//CreateBlobDisk : create a blob disk in a node
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) {
glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
klog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
storageAccountName, err := c.findSANameForDisk(storageAccountType)
if err != nil {
@ -246,7 +247,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
_, ok := c.accounts[storageAccountName]
if !ok {
// the storage account is specified by user
glog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
klog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
return c.DeleteVolume(diskURI)
}
@ -255,7 +256,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
return err
}
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
klog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
container := blobSvc.GetContainerReference(vhdContainerName)
blob := container.GetBlobReference(vhdName)
@ -265,7 +266,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
c.accounts[storageAccountName].diskCount = int32(diskCount)
} else {
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
klog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
return nil // we have failed to acquire a new count. not an error condition
}
}
@ -290,7 +291,7 @@ func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error)
for _, v := range *listKeysResult.Keys {
if v.Value != nil && *v.Value == "key1" {
if _, ok := c.accounts[SAName]; !ok {
glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
klog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
return *v.Value, nil
}
}
@ -365,7 +366,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
_, provisionState, err := c.getStorageAccountState(storageAccountName)
if err != nil {
glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
klog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
return false, nil // error performing the query - retryable
}
@ -373,7 +374,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
return true, nil
}
glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
klog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
return false, nil // back off and see if the account becomes ready on next retry
})
// we have failed to ensure that account is ready for us to create
@ -396,7 +397,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
return err
}
if bCreated {
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
klog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
}
// flag so we no longer have to check on ARM
@ -428,7 +429,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
if err != nil {
return 0, err
}
glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
klog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
c.accounts[SAName].diskCount = int32(len(response.Blobs))
return int(c.accounts[SAName].diskCount), nil
@ -448,13 +449,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount
accounts := make(map[string]*storageAccountState)
for _, v := range *accountListResult.Value {
if v.Name == nil || v.Sku == nil {
glog.Info("azureDisk - accountListResult Name or Sku is nil")
klog.Info("azureDisk - accountListResult Name or Sku is nil")
continue
}
if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
continue
}
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
sastate := &storageAccountState{
name: *v.Name,
@ -485,10 +486,12 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
}
glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
klog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
cp := storage.AccountCreateParameters{
Sku: &storage.Sku{Name: storageAccountType},
Sku: &storage.Sku{Name: storageAccountType},
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
Kind: defaultStorageAccountKind,
Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")},
Location: &location}
ctx, cancel := getContextWithCancel()
@ -539,7 +542,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
countAccounts = countAccounts + 1
// empty account
if dCount == 0 {
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
klog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
return v.name, nil // short circuit, avg is good and no need to adjust
}
// if this account is less allocated
@ -552,7 +555,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
// if we failed to find storageaccount
if SAName == "" {
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
klog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil {
@ -568,7 +571,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
// avg are not create and we should create more accounts if we can
if aboveAvg && countAccounts < maxStorageAccounts {
glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
klog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil {
@ -579,7 +582,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
// averages are not ok and we are at capacity (max storage accounts allowed)
if aboveAvg && countAccounts == maxStorageAccounts {
glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
klog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
}

View File

@ -22,23 +22,23 @@ import (
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/client-go/util/flowcontrol"
)
// Helpers for rate limiting error/error channel creation
func createARMRateLimitErr(isWrite bool, opName string) error {
func createRateLimitErr(isWrite bool, opName string) error {
opType := "read"
if isWrite {
opType = "write"
}
return fmt.Errorf("azure - ARM rate limited(%s) for operation:%s", opType, opName)
return fmt.Errorf("azure - cloud provider rate limited(%s) for operation:%s", opType, opName)
}
// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient
@ -131,6 +131,11 @@ type DisksClient interface {
Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error)
}
// VirtualMachineSizesClient defines needed functions for azure compute.VirtualMachineSizesClient
type VirtualMachineSizesClient interface {
List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error)
}
// azClientConfig contains all essential information to create an Azure client.
type azClientConfig struct {
subscriptionID string
@ -170,13 +175,13 @@ func newAzVirtualMachinesClient(config *azClientConfig) *azVirtualMachinesClient
func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) {
// /* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "VMCreateOrUpdate")
err = createRateLimitErr(true, "VMCreateOrUpdate")
return
}
glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName)
klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName)
defer func() {
glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName)
klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName)
}()
mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -185,20 +190,20 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "VMGet")
err = createRateLimitErr(false, "VMGet")
return
}
glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName)
klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName)
defer func() {
glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName)
klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName)
}()
mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID)
@ -209,13 +214,13 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st
func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "VMList")
err = createRateLimitErr(false, "VMList")
return
}
glog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName)
klog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName)
defer func() {
glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName)
klog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName)
}()
mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID)
@ -261,13 +266,13 @@ func newAzInterfacesClient(config *azClientConfig) *azInterfacesClient {
func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "NiCreateOrUpdate")
err = createRateLimitErr(true, "NiCreateOrUpdate")
return
}
glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName)
klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName)
defer func() {
glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName)
klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName)
}()
mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -277,20 +282,20 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "NicGet")
err = createRateLimitErr(false, "NicGet")
return
}
glog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName)
klog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName)
defer func() {
glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName)
klog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName)
}()
mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID)
@ -301,13 +306,13 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string,
func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface")
err = createRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface")
return
}
glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
defer func() {
glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName)
}()
mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID)
@ -340,13 +345,13 @@ func newAzLoadBalancersClient(config *azClientConfig) *azLoadBalancersClient {
func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "LBCreateOrUpdate")
err = createRateLimitErr(true, "LBCreateOrUpdate")
return nil, err
}
glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName)
klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName)
defer func() {
glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName)
klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName)
}()
mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -356,7 +361,7 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -364,13 +369,13 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro
func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "LBDelete")
err = createRateLimitErr(true, "LBDelete")
return nil, err
}
glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName)
klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName)
defer func() {
glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName)
klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName)
}()
mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID)
@ -380,20 +385,20 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "LBGet")
err = createRateLimitErr(false, "LBGet")
return
}
glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName)
klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName)
defer func() {
glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName)
klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName)
}()
mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID)
@ -404,13 +409,13 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri
func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {
if !az.rateLimiterReader.TryAccept() {
err := createARMRateLimitErr(false, "LBList")
err := createRateLimitErr(false, "LBList")
return nil, err
}
glog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName)
klog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName)
defer func() {
glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName)
klog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName)
}()
mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID)
@ -456,13 +461,13 @@ func newAzPublicIPAddressesClient(config *azClientConfig) *azPublicIPAddressesCl
func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "PublicIPCreateOrUpdate")
err = createRateLimitErr(true, "PublicIPCreateOrUpdate")
return nil, err
}
glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName)
klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName)
defer func() {
glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName)
klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName)
}()
mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -472,7 +477,7 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -480,13 +485,13 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc
func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "PublicIPDelete")
err = createRateLimitErr(true, "PublicIPDelete")
return nil, err
}
glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName)
klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName)
defer func() {
glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName)
klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName)
}()
mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID)
@ -496,20 +501,20 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "PublicIPGet")
err = createRateLimitErr(false, "PublicIPGet")
return
}
glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName)
klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName)
defer func() {
glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName)
klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName)
}()
mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID)
@ -520,12 +525,12 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName
func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, error) {
if !az.rateLimiterReader.TryAccept() {
return nil, createARMRateLimitErr(false, "PublicIPList")
return nil, createRateLimitErr(false, "PublicIPList")
}
glog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName)
klog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName)
defer func() {
glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName)
klog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName)
}()
mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID)
@ -571,13 +576,13 @@ func newAzSubnetsClient(config *azClientConfig) *azSubnetsClient {
func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "SubnetCreateOrUpdate")
err = createRateLimitErr(true, "SubnetCreateOrUpdate")
return
}
glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
defer func() {
glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
}()
mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -587,7 +592,7 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -595,13 +600,13 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName
func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "SubnetDelete")
err = createRateLimitErr(true, "SubnetDelete")
return
}
glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
defer func() {
glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
}()
mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID)
@ -611,20 +616,20 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string,
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "SubnetGet")
err = createRateLimitErr(false, "SubnetGet")
return
}
glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName)
defer func() {
glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName)
}()
mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID)
@ -635,12 +640,12 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi
func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) ([]network.Subnet, error) {
if !az.rateLimiterReader.TryAccept() {
return nil, createARMRateLimitErr(false, "SubnetList")
return nil, createRateLimitErr(false, "SubnetList")
}
glog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName)
klog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName)
defer func() {
glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName)
klog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName)
}()
mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID)
@ -686,13 +691,13 @@ func newAzSecurityGroupsClient(config *azClientConfig) *azSecurityGroupsClient {
func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "NSGCreateOrUpdate")
err = createRateLimitErr(true, "NSGCreateOrUpdate")
return
}
glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName)
klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName)
defer func() {
glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName)
klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName)
}()
mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -702,7 +707,7 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -710,13 +715,13 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr
func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "NSGDelete")
err = createRateLimitErr(true, "NSGDelete")
return
}
glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName)
klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName)
defer func() {
glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName)
klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName)
}()
mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID)
@ -726,20 +731,20 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "NSGGet")
err = createRateLimitErr(false, "NSGGet")
return
}
glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName)
klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName)
defer func() {
glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName)
klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName)
}()
mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID)
@ -750,12 +755,12 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str
func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, error) {
if !az.rateLimiterReader.TryAccept() {
return nil, createARMRateLimitErr(false, "NSGList")
return nil, createRateLimitErr(false, "NSGList")
}
glog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName)
klog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName)
defer func() {
glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName)
klog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName)
}()
mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID)
@ -801,13 +806,13 @@ func newAzVirtualMachineScaleSetsClient(config *azClientConfig) *azVirtualMachin
func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "VMSSCreateOrUpdate")
err = createRateLimitErr(true, "VMSSCreateOrUpdate")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName)
}()
mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -817,20 +822,20 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, r
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "VMSSGet")
err = createRateLimitErr(false, "VMSSGet")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName)
}()
mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID)
@ -841,13 +846,13 @@ func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGrou
func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "VMSSList")
err = createRateLimitErr(false, "VMSSList")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName)
}()
mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID)
@ -872,13 +877,13 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro
func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "VMSSUpdateInstances")
err = createRateLimitErr(true, "VMSSUpdateInstances")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
}()
mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID)
@ -888,7 +893,7 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context,
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -916,13 +921,13 @@ func newAzVirtualMachineScaleSetVMsClient(config *azClientConfig) *azVirtualMach
func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "VMSSGet")
err = createRateLimitErr(false, "VMSSGet")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
}()
mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID)
@ -933,13 +938,13 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGr
func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "VMSSGetInstanceView")
err = createRateLimitErr(false, "VMSSGetInstanceView")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
}()
mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID)
@ -950,13 +955,13 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context
func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "VMSSList")
err = createRateLimitErr(false, "VMSSList")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter)
}()
mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID)
@ -980,13 +985,13 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG
func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "VMSSUpdate")
err = createRateLimitErr(true, "VMSSUpdate")
return
}
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID)
}()
mc := newMetricContext("vmssvm", "update", resourceGroupName, az.client.SubscriptionID)
@ -996,7 +1001,7 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -1025,13 +1030,13 @@ func newAzRoutesClient(config *azClientConfig) *azRoutesClient {
func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "RouteCreateOrUpdate")
err = createRateLimitErr(true, "RouteCreateOrUpdate")
return
}
glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
defer func() {
glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
}()
mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -1041,7 +1046,7 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -1049,13 +1054,13 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "RouteDelete")
err = createRateLimitErr(true, "RouteDelete")
return
}
glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName)
defer func() {
glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName)
}()
mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID)
@ -1065,7 +1070,7 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string,
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -1094,13 +1099,13 @@ func newAzRouteTablesClient(config *azClientConfig) *azRouteTablesClient {
func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "RouteTableCreateOrUpdate")
err = createRateLimitErr(true, "RouteTableCreateOrUpdate")
return
}
glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName)
klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName)
defer func() {
glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName)
klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName)
}()
mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -1110,20 +1115,20 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "GetRouteTable")
err = createRateLimitErr(false, "GetRouteTable")
return
}
glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName)
klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName)
defer func() {
glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName)
klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName)
}()
mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID)
@ -1155,13 +1160,13 @@ func newAzStorageAccountClient(config *azClientConfig) *azStorageAccountClient {
func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) (result *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "StorageAccountCreate")
err = createRateLimitErr(true, "StorageAccountCreate")
return
}
glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName)
defer func() {
glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName)
}()
mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID)
@ -1170,20 +1175,20 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "DeleteStorageAccount")
err = createRateLimitErr(false, "DeleteStorageAccount")
return
}
glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName)
defer func() {
glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName)
}()
mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID)
@ -1194,13 +1199,13 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName
func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "ListStorageAccountKeys")
err = createRateLimitErr(false, "ListStorageAccountKeys")
return
}
glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName)
defer func() {
glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName)
}()
mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID)
@ -1211,13 +1216,13 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam
func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result storage.AccountListResult, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "ListStorageAccountsByResourceGroup")
err = createRateLimitErr(false, "ListStorageAccountsByResourceGroup")
return
}
glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName)
klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName)
defer func() {
glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName)
klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName)
}()
mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID)
@ -1228,13 +1233,13 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou
func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "GetStorageAccount/Properties")
err = createRateLimitErr(false, "GetStorageAccount/Properties")
return
}
glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName)
defer func() {
glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName)
klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName)
}()
mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID)
@ -1266,13 +1271,13 @@ func newAzDisksClient(config *azClientConfig) *azDisksClient {
func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "DiskCreateOrUpdate")
err = createRateLimitErr(true, "DiskCreateOrUpdate")
return
}
glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName)
klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName)
defer func() {
glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName)
klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName)
}()
mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID)
@ -1282,7 +1287,7 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
@ -1290,13 +1295,13 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s
func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) {
/* Write rate limiting */
if !az.rateLimiterWriter.TryAccept() {
err = createARMRateLimitErr(true, "DiskDelete")
err = createRateLimitErr(true, "DiskDelete")
return
}
glog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName)
klog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName)
defer func() {
glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName)
klog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName)
}()
mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID)
@ -1306,20 +1311,20 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d
return future.Response(), err
}
err = future.WaitForCompletion(ctx, az.client.Client)
err = future.WaitForCompletionRef(ctx, az.client.Client)
mc.Observe(err)
return future.Response(), err
}
func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createARMRateLimitErr(false, "GetDisk")
err = createRateLimitErr(false, "GetDisk")
return
}
glog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName)
klog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName)
defer func() {
glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName)
klog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName)
}()
mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID)
@ -1327,3 +1332,41 @@ func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, disk
mc.Observe(err)
return
}
// azVirtualMachineSizesClient implements VirtualMachineSizesClient.
type azVirtualMachineSizesClient struct {
client compute.VirtualMachineSizesClient
rateLimiterReader flowcontrol.RateLimiter
rateLimiterWriter flowcontrol.RateLimiter
}
func newAzVirtualMachineSizesClient(config *azClientConfig) *azVirtualMachineSizesClient {
VirtualMachineSizesClient := compute.NewVirtualMachineSizesClient(config.subscriptionID)
VirtualMachineSizesClient.BaseURI = config.resourceManagerEndpoint
VirtualMachineSizesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken)
VirtualMachineSizesClient.PollingDelay = 5 * time.Second
configureUserAgent(&VirtualMachineSizesClient.Client)
return &azVirtualMachineSizesClient{
rateLimiterReader: config.rateLimiterReader,
rateLimiterWriter: config.rateLimiterWriter,
client: VirtualMachineSizesClient,
}
}
func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error) {
if !az.rateLimiterReader.TryAccept() {
err = createRateLimitErr(false, "VMSizesList")
return
}
klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): start", location)
defer func() {
klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location)
}()
mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID)
result, err = az.client.List(ctx, location)
mc.Observe(err)
return
}

View File

@ -20,12 +20,12 @@ import (
"fmt"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/golang/glog"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
kwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)
const (
@ -119,7 +119,7 @@ func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
disks, err := c.getNodeDataDisks(nodeName)
if err != nil {
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
return -1, err
}
@ -128,7 +128,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
// found the disk
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
return *disk.Lun, nil
}
}
@ -139,7 +139,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
disks, err := c.getNodeDataDisks(nodeName)
if err != nil {
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
return -1, err
}
@ -168,7 +168,7 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
if err != nil {
if err == cloudprovider.InstanceNotFound {
// if host doesn't exist, no need to detach
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
klog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
nodeName, diskNames)
return attached, nil
}

View File

@ -20,8 +20,8 @@ import (
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/golang/glog"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
)
@ -34,6 +34,12 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
return err
}
vmName := mapNodeNameToVMName(nodeName)
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
if err != nil {
return err
}
disks := *vm.StorageProfile.DataDisks
if isManagedDisk {
disks = append(disks,
@ -67,31 +73,24 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
},
},
}
vmName := mapNodeNameToVMName(nodeName)
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, vmName, diskName)
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.resourceGroup, vmName)
}
}
// Invalidate the cache right after updating
defer as.cloud.vmCache.Delete(vmName)
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
if err != nil {
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
klog.Errorf("azureDisk - attach disk(%s) failed, err: %v", diskName, err)
detail := err.Error()
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
glog.Infof("azureDisk - err %s, try detach", detail)
klog.V(2).Infof("azureDisk - err %v, try detach disk(%s)", err, diskName)
as.DetachDiskByName(diskName, diskURI, nodeName)
}
} else {
glog.V(4).Info("azureDisk - azure attach succeeded")
// Invalidate the cache right after updating
as.cloud.vmCache.Delete(vmName)
klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName)
}
return err
}
@ -102,10 +101,16 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
vm, err := as.getVirtualMachine(nodeName)
if err != nil {
// if host doesn't exist, no need to detach
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
klog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
return nil
}
vmName := mapNodeNameToVMName(nodeName)
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
if err != nil {
return err
}
disks := *vm.StorageProfile.DataDisks
bFoundDisk := false
for i, disk := range disks {
@ -113,7 +118,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
// found the disk
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
bFoundDisk = true
break
@ -132,25 +137,18 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
},
},
}
vmName := mapNodeNameToVMName(nodeName)
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, vmName, diskName)
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.ResourceGroup, vmName)
}
}
// Invalidate the cache right after updating
defer as.cloud.vmCache.Delete(vmName)
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
if err != nil {
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
klog.Errorf("azureDisk - detach disk(%s) failed, err: %v", diskName, err)
} else {
glog.V(4).Info("azureDisk - azure disk detach succeeded")
// Invalidate the cache right after updating
as.cloud.vmCache.Delete(vmName)
klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName)
}
return err
}

View File

@ -20,8 +20,8 @@ import (
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/golang/glog"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/types"
)
@ -29,7 +29,13 @@ import (
// AttachDisk attaches a vhd to vm
// the vhd must exist, can be identified by diskName, diskURI, and lun.
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
if err != nil {
return err
}
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
if err != nil {
return err
}
@ -65,27 +71,22 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
ctx, cancel := getContextWithCancel()
defer cancel()
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", ss.resourceGroup, nodeName)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
}
}
// Invalidate the cache right after updating
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
if err != nil {
detail := err.Error()
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
glog.Infof("azureDisk - err %s, try detach", detail)
klog.Infof("azureDisk - err %s, try detach disk(%s)", detail, diskName)
ss.DetachDiskByName(diskName, diskURI, nodeName)
}
} else {
glog.V(4).Info("azureDisk - azure attach succeeded")
// Invalidate the cache right after updating
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName)
}
return err
}
@ -93,7 +94,13 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
// DetachDiskByName detaches a vhd from host
// the vhd can be identified by diskName or diskURI
func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
if err != nil {
return err
}
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
if err != nil {
return err
}
@ -108,7 +115,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
// found the disk
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
bFoundDisk = true
break
@ -122,22 +129,17 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
vm.StorageProfile.DataDisks = &disks
ctx, cancel := getContextWithCancel()
defer cancel()
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", ss.resourceGroup, nodeName)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
}
}
// Invalidate the cache right after updating
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
if err != nil {
glog.Errorf("azureDisk - azure disk detach %q from %s failed, err: %v", diskName, nodeName, err)
klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err)
} else {
glog.V(4).Info("azureDisk - azure detach succeeded")
// Invalidate the cache right after updating
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName)
}
return err

View File

@ -27,11 +27,11 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/to"
)
@ -207,6 +207,13 @@ func (fAPC *fakeAzurePIPClient) List(ctx context.Context, resourceGroupName stri
return value, nil
}
func (fAPC *fakeAzurePIPClient) setFakeStore(store map[string]map[string]network.PublicIPAddress) {
fAPC.mutex.Lock()
defer fAPC.mutex.Unlock()
fAPC.FakeStore = store
}
type fakeAzureInterfacesClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.Interface
@ -247,7 +254,24 @@ func (fIC *fakeAzureInterfacesClient) Get(ctx context.Context, resourceGroupName
}
func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
return result, nil
fIC.mutex.Lock()
defer fIC.mutex.Unlock()
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok {
return entity, nil
}
}
return result, autorest.DetailedError{
StatusCode: http.StatusNotFound,
Message: "Not such Interface",
}
}
func (fIC *fakeAzureInterfacesClient) setFakeStore(store map[string]map[string]network.Interface) {
fIC.mutex.Lock()
defer fIC.mutex.Unlock()
fIC.FakeStore = store
}
type fakeAzureVirtualMachinesClient struct {
@ -302,6 +326,13 @@ func (fVMC *fakeAzureVirtualMachinesClient) List(ctx context.Context, resourceGr
return result, nil
}
func (fVMC *fakeAzureVirtualMachinesClient) setFakeStore(store map[string]map[string]compute.VirtualMachine) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()
fVMC.FakeStore = store
}
type fakeAzureSubnetsClient struct {
mutex *sync.Mutex
FakeStore map[string]map[string]network.Subnet
@ -867,11 +898,11 @@ func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availa
return nil, fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
func (f *fakeVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
return fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
return fmt.Errorf("unimplemented")
}
@ -886,3 +917,7 @@ func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
return nil, fmt.Errorf("unimplemented")
}
func (f *fakeVMSet) GetPowerStatusByNodeName(name string) (string, error) {
return "", fmt.Errorf("unimplemented")
}

View File

@ -21,7 +21,7 @@ import (
azs "github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -58,22 +58,11 @@ func (f *azureFileClient) createFileShare(accountName, accountKey, name string,
if err != nil {
return err
}
// create a file share and set quota
// Note. Per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share,
// setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare
// receives error "The metadata specified is invalid. It has characters that are not permitted."
// As a result,breaking into two API calls: create share and set quota
share := fileClient.GetShareReference(name)
share.Properties.Quota = sizeGiB
if err = share.Create(nil); err != nil {
return fmt.Errorf("failed to create file share, err: %v", err)
}
share.Properties.Quota = sizeGiB
if err = share.SetProperties(nil); err != nil {
if err := share.Delete(nil); err != nil {
glog.Errorf("Error deleting share: %v", err)
}
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
}
return nil
}
@ -93,7 +82,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string,
}
share := fileClient.GetShareReference(name)
if share.Properties.Quota >= sizeGiB {
glog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
klog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
share.Properties.Quota, sizeGiB, accountName, name)
return nil
}
@ -101,7 +90,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string,
if err = share.SetProperties(nil); err != nil {
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
}
glog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
klog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
return nil
}

View File

@ -18,11 +18,17 @@ package azure
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
const metadataURL = "http://169.254.169.254/metadata/"
const (
metadataCacheTTL = time.Minute
metadataCacheKey = "InstanceMetadata"
metadataURL = "http://169.254.169.254/metadata/instance"
)
// NetworkMetadata contains metadata about an instance's network
type NetworkMetadata struct {
@ -54,60 +60,100 @@ type Subnet struct {
Prefix string `json:"prefix"`
}
// InstanceMetadata knows how to query the Azure instance metadata server.
// ComputeMetadata represents compute information
type ComputeMetadata struct {
SKU string `json:"sku,omitempty"`
Name string `json:"name,omitempty"`
Zone string `json:"zone,omitempty"`
VMSize string `json:"vmSize,omitempty"`
OSType string `json:"osType,omitempty"`
Location string `json:"location,omitempty"`
FaultDomain string `json:"platformFaultDomain,omitempty"`
UpdateDomain string `json:"platformUpdateDomain,omitempty"`
ResourceGroup string `json:"resourceGroupName,omitempty"`
VMScaleSetName string `json:"vmScaleSetName,omitempty"`
}
// InstanceMetadata represents instance information.
type InstanceMetadata struct {
baseURL string
Compute *ComputeMetadata `json:"compute,omitempty"`
Network *NetworkMetadata `json:"network,omitempty"`
}
// NewInstanceMetadata creates an instance of the InstanceMetadata accessor object.
func NewInstanceMetadata() *InstanceMetadata {
return &InstanceMetadata{
baseURL: metadataURL,
// InstanceMetadataService knows how to query the Azure instance metadata server.
type InstanceMetadataService struct {
metadataURL string
imsCache *timedCache
}
// NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object.
func NewInstanceMetadataService(metadataURL string) (*InstanceMetadataService, error) {
ims := &InstanceMetadataService{
metadataURL: metadataURL,
}
}
// makeMetadataURL makes a complete metadata URL from the given path.
func (i *InstanceMetadata) makeMetadataURL(path string) string {
return i.baseURL + path
}
// Object queries the metadata server and populates the passed in object
func (i *InstanceMetadata) Object(path string, obj interface{}) error {
data, err := i.queryMetadataBytes(path, "json")
imsCache, err := newTimedcache(metadataCacheTTL, ims.getInstanceMetadata)
if err != nil {
return err
return nil, err
}
return json.Unmarshal(data, obj)
ims.imsCache = imsCache
return ims, nil
}
// Text queries the metadata server and returns the corresponding text
func (i *InstanceMetadata) Text(path string) (string, error) {
data, err := i.queryMetadataBytes(path, "text")
if err != nil {
return "", err
}
return string(data), err
}
func (i *InstanceMetadata) queryMetadataBytes(path, format string) ([]byte, error) {
client := &http.Client{}
req, err := http.NewRequest("GET", i.makeMetadataURL(path), nil)
func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}, error) {
req, err := http.NewRequest("GET", ims.metadataURL, nil)
if err != nil {
return nil, err
}
req.Header.Add("Metadata", "True")
req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
q := req.URL.Query()
q.Add("format", format)
q.Add("api-version", "2017-04-02")
q.Add("format", "json")
q.Add("api-version", "2017-12-01")
req.URL.RawQuery = q.Encode()
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
if resp.StatusCode != 200 {
return nil, fmt.Errorf("failure of getting instance metadata with response %q", resp.Status)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
obj := InstanceMetadata{}
err = json.Unmarshal(data, &obj)
if err != nil {
return nil, err
}
return &obj, nil
}
// GetMetadata gets instance metadata from cache.
func (ims *InstanceMetadataService) GetMetadata() (*InstanceMetadata, error) {
cache, err := ims.imsCache.Get(metadataCacheKey)
if err != nil {
return nil, err
}
// Cache shouldn't be nil, but added a check incase something wrong.
if cache == nil {
return nil, fmt.Errorf("failure of getting instance metadata")
}
if metadata, ok := cache.(*InstanceMetadata); ok {
return metadata, nil
}
return nil, fmt.Errorf("failure of getting instance metadata")
}

View File

@ -18,22 +18,39 @@ package azure
import (
"context"
"fmt"
"os"
"strings"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
)
const (
vmPowerStatePrefix = "PowerState/"
vmPowerStateStopped = "stopped"
vmPowerStateDeallocated = "deallocated"
)
// NodeAddresses returns the addresses of the specified instance.
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
unmanaged, err := az.IsNodeUnmanaged(string(name))
if err != nil {
return nil, err
}
if unmanaged {
klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name)
return nil, nil
}
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
if err != nil {
glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
return nil, err
}
@ -51,7 +68,16 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
}
if az.UseInstanceMetadata {
isLocalInstance, err := az.isCurrentInstance(name)
metadata, err := az.metadata.GetMetadata()
if err != nil {
return nil, err
}
if metadata.Compute == nil || metadata.Network == nil {
return nil, fmt.Errorf("failure of getting instance metadata")
}
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
if err != nil {
return nil, err
}
@ -61,21 +87,38 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
return addressGetter(name)
}
ipAddress := IPAddress{}
err = az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
if err != nil {
return nil, err
if len(metadata.Network.Interface) == 0 {
return nil, fmt.Errorf("no interface is found for the instance")
}
// Use ip address got from instance metadata.
ipAddress := metadata.Network.Interface[0]
addresses := []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP},
{Type: v1.NodeHostName, Address: string(name)},
}
if len(ipAddress.PublicIP) > 0 {
addr := v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: ipAddress.PublicIP,
for _, address := range ipAddress.IPV4.IPAddress {
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: address.PrivateIP,
})
if len(address.PublicIP) > 0 {
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: address.PublicIP,
})
}
}
for _, address := range ipAddress.IPV6.IPAddress {
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeInternalIP,
Address: address.PrivateIP,
})
if len(address.PublicIP) > 0 {
addresses = append(addresses, v1.NodeAddress{
Type: v1.NodeExternalIP,
Address: address.PublicIP,
})
}
addresses = append(addresses, addr)
}
return addresses, nil
}
@ -87,6 +130,12 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID)
return nil, nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return nil, err
@ -98,6 +147,12 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
// Returns true for unmanaged nodes because azure cloud provider always assumes them exists.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID)
return true, nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return false, err
@ -116,35 +171,60 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
return false, cloudprovider.NotImplemented
}
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
nodeName := mapNodeNameToVMName(name)
metadataName, err := az.metadata.Text("instance/compute/name")
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return false, err
}
powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName))
if err != nil {
return false, err
}
klog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
return strings.ToLower(powerStatus) == vmPowerStateStopped || strings.ToLower(powerStatus) == vmPowerStateDeallocated, nil
}
func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
var err error
nodeName := mapNodeNameToVMName(name)
if az.VMType == vmTypeVMSS {
// VMSS vmName is not same with hostname, use hostname instead.
metadataName, err = os.Hostname()
metadataVMName, err = os.Hostname()
if err != nil {
return false, err
}
}
metadataName = strings.ToLower(metadataName)
return (metadataName == nodeName), err
metadataVMName = strings.ToLower(metadataVMName)
return (metadataVMName == nodeName), err
}
// InstanceID returns the cloud provider ID of the specified instance.
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
nodeName := mapNodeNameToVMName(name)
unmanaged, err := az.IsNodeUnmanaged(nodeName)
if err != nil {
return "", err
}
if unmanaged {
// InstanceID is same with nodeName for unmanaged nodes.
klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name)
return nodeName, nil
}
if az.UseInstanceMetadata {
isLocalInstance, err := az.isCurrentInstance(name)
metadata, err := az.metadata.GetMetadata()
if err != nil {
return "", err
}
if metadata.Compute == nil {
return "", fmt.Errorf("failure of getting instance metadata")
}
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
if err != nil {
return "", err
}
@ -154,26 +234,25 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
return az.vmSet.GetInstanceIDByNodeName(nodeName)
}
// Get resource group name.
resourceGroup := metadata.Compute.ResourceGroup
// Compose instanceID based on nodeName for standard instance.
if az.VMType == vmTypeStandard {
return az.getStandardMachineID(nodeName), nil
return az.getStandardMachineID(resourceGroup, nodeName), nil
}
// Get scale set name and instanceID from vmName for vmss.
metadataName, err := az.metadata.Text("instance/compute/name")
if err != nil {
return "", err
}
ssName, instanceID, err := extractVmssVMName(metadataName)
ssName, instanceID, err := extractVmssVMName(metadata.Compute.Name)
if err != nil {
if err == ErrorNotVmssInstance {
// Compose machineID for standard Node.
return az.getStandardMachineID(nodeName), nil
return az.getStandardMachineID(resourceGroup, nodeName), nil
}
return "", err
}
// Compose instanceID based on ssName and instanceID for vmss instance.
return az.getVmssMachineID(ssName, instanceID), nil
return az.getVmssMachineID(resourceGroup, ssName, instanceID), nil
}
return az.vmSet.GetInstanceIDByNodeName(nodeName)
@ -183,6 +262,12 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
// This method will not be called from the node that is requesting this ID. i.e. metadata service
// and other local methods cannot be used here
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID)
return "", nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return "", err
@ -196,15 +281,33 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
unmanaged, err := az.IsNodeUnmanaged(string(name))
if err != nil {
return "", err
}
if unmanaged {
klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name)
return "", nil
}
if az.UseInstanceMetadata {
isLocalInstance, err := az.isCurrentInstance(name)
metadata, err := az.metadata.GetMetadata()
if err != nil {
return "", err
}
if metadata.Compute == nil {
return "", fmt.Errorf("failure of getting instance metadata")
}
isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
if err != nil {
return "", err
}
if isLocalInstance {
machineType, err := az.metadata.Text("instance/compute/vmSize")
if err == nil {
return machineType, nil
if metadata.Compute.VMSize != "" {
return metadata.Compute.VMSize, nil
}
}
}

View File

@ -0,0 +1,218 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"fmt"
"net"
"net/http"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/apimachinery/pkg/types"
)
// setTestVirtualMachines sets test virtual machine with powerstate.
func setTestVirtualMachines(c *Cloud, vmList map[string]string) {
virtualMachineClient := c.VirtualMachinesClient.(*fakeAzureVirtualMachinesClient)
store := map[string]map[string]compute.VirtualMachine{
"rg": make(map[string]compute.VirtualMachine),
}
for nodeName, powerState := range vmList {
instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName)
vm := compute.VirtualMachine{
Name: &nodeName,
ID: &instanceID,
Location: &c.Location,
}
if powerState != "" {
status := []compute.InstanceViewStatus{
{
Code: to.StringPtr(powerState),
},
{
Code: to.StringPtr("ProvisioningState/succeeded"),
},
}
vm.VirtualMachineProperties = &compute.VirtualMachineProperties{
InstanceView: &compute.VirtualMachineInstanceView{
Statuses: &status,
},
}
}
store["rg"][nodeName] = vm
}
virtualMachineClient.setFakeStore(store)
}
func TestInstanceID(t *testing.T) {
cloud := getTestCloud()
testcases := []struct {
name string
vmList []string
nodeName string
metadataName string
expected string
expectError bool
}{
{
name: "InstanceID should get instanceID if node's name are equal to metadataName",
vmList: []string{"vm1"},
nodeName: "vm1",
metadataName: "vm1",
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
},
{
name: "InstanceID should get instanceID from Azure API if node is not local instance",
vmList: []string{"vm2"},
nodeName: "vm2",
metadataName: "vm1",
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
},
{
name: "InstanceID should report error if VM doesn't exist",
vmList: []string{"vm1"},
nodeName: "vm3",
expectError: true,
},
}
for _, test := range testcases {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, fmt.Sprintf(`{"compute":{"name":"%s"}}`, test.metadataName))
}))
go func() {
http.Serve(listener, mux)
}()
defer listener.Close()
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
vmListWithPowerState := make(map[string]string)
for _, vm := range test.vmList {
vmListWithPowerState[vm] = ""
}
setTestVirtualMachines(cloud, vmListWithPowerState)
instanceID, err := cloud.InstanceID(context.Background(), types.NodeName(test.nodeName))
if test.expectError {
if err == nil {
t.Errorf("Test [%s] unexpected nil err", test.name)
}
} else {
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
}
if instanceID != test.expected {
t.Errorf("Test [%s] unexpected instanceID: %s, expected %q", test.name, instanceID, test.expected)
}
}
}
func TestInstanceShutdownByProviderID(t *testing.T) {
testcases := []struct {
name string
vmList map[string]string
nodeName string
expected bool
expectError bool
}{
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Running status",
vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Deallocated status",
vmList: map[string]string{"vm2": "PowerState/Deallocated"},
nodeName: "vm2",
expected: true,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Deallocating status",
vmList: map[string]string{"vm3": "PowerState/Deallocating"},
nodeName: "vm3",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Starting status",
vmList: map[string]string{"vm4": "PowerState/Starting"},
nodeName: "vm4",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Stopped status",
vmList: map[string]string{"vm5": "PowerState/Stopped"},
nodeName: "vm5",
expected: true,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Stopping status",
vmList: map[string]string{"vm6": "PowerState/Stopping"},
nodeName: "vm6",
expected: false,
},
{
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Unknown status",
vmList: map[string]string{"vm7": "PowerState/Unknown"},
nodeName: "vm7",
expected: false,
},
{
name: "InstanceShutdownByProviderID should report error if VM doesn't exist",
vmList: map[string]string{"vm1": "PowerState/running"},
nodeName: "vm8",
expectError: true,
},
}
for _, test := range testcases {
cloud := getTestCloud()
setTestVirtualMachines(cloud, test.vmList)
providerID := "azure://" + cloud.getStandardMachineID("rg", test.nodeName)
hasShutdown, err := cloud.InstanceShutdownByProviderID(context.Background(), providerID)
if test.expectError {
if err == nil {
t.Errorf("Test [%s] unexpected nil err", test.name)
}
} else {
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
}
if hasShutdown != test.expected {
t.Errorf("Test [%s] unexpected hasShutdown: %v, expected %v", test.name, hasShutdown, test.expected)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,77 +0,0 @@
# Azure LoadBalancer
The way azure define LoadBalancer is different with GCE or AWS. Azure's LB can have multiple frontend IP refs. The GCE and AWS can only allow one, if you want more, you better to have another LB. Because of the fact, Public IP is not part of the LB in Azure. NSG is not part of LB in Azure either. However, you cannot delete them in parallel, Public IP can only be delete after LB's frontend IP ref is removed.
For different Azure Resources, such as LB, Public IP, NSG. They are the same tier azure resources. We need to make sure there is no connection in their own ensure loops. In another words, They would be eventually reconciled regardless of other resources' state. They should only depends on service state.
Despite the ideal philosophy above, we have to face the reality. NSG depends on LB's frontend ip to adjust NSG rules. So when we want to reconcile NSG, the LB should contain the corresponding frontend ip config.
And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict.
There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer.
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost in case of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
Service Annotation for Auto and specific load balancer mode
- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...)
## Introduce Functions
- reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error)
- Go through lb's properties, update based on wantLb
- If any change on the lb, no matter if the lb exists or not
- Call az cloud to CreateOrUpdate on this lb, or Delete if nothing left
- return lb, err
- reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error)
- Go though NSG' properties, update based on wantLb
- Use destinationIPAddress as target address if possible
- Consolidate NSG rules if possible
- If any change on the NSG, (the NSG should always exists)
- Call az cloud to CreateOrUpdate on this NSG
- return sg, err
- reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error)
- List all the public ip in the resource group
- Make sure we only touch Public IP resources has tags[service] = "namespace/serviceName"
- skip for wantLb && !isInternal && pipName == desiredPipName
- delete other public ip resources if any
- if !isInternal && wantLb
- ensure Public IP with desiredPipName exists
- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error)
- gets the loadbalancer for the service if it already exists
- If wantLb is TRUE then -it selects a new load balancer, the selection helps distribute the services across load balancers
- In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE
- By default - cluster default LB is returned
## Define interface behaviors
### GetLoadBalancer
- Get LoadBalancer status, return status, error
- return the load balancer status for this service
- it will not create or update or delete any resource
### EnsureLoadBalancer
- Reconcile LB for the flipped service
- Call reconcileLoadBalancer(clusterName, flipedService, nil, false/* wantLb */)
- Reconcile Public IP
- Call reconcilePublicIP(cluster, service, true)
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
- Call reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
- Reconcile NSG rules, it need to be called after reconcileLB
- Call reconcileSecurityGroup(clusterName, service, lbStatus, true /* wantLb */)
### UpdateLoadBalancer
- Has no difference with EnsureLoadBalancer
### EnsureLoadBalancerDeleted
- Reconcile NSG first, before reconcile LB, because SG need LB to be there
- Call reconcileSecurityGroup(clusterName, service, nil, false /* wantLb */)
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
- Call reconcileLoadBalancer(clusterName, service, nodes, false)
- Reconcile Public IP, public IP needs related LB reconciled first
- Call reconcilePublicIP(cluster, service, false)

View File

@ -18,11 +18,13 @@ package azure
import (
"fmt"
"reflect"
"testing"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
)
func TestFindProbe(t *testing.T) {
@ -210,3 +212,34 @@ func TestFindRule(t *testing.T) {
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
}
}
func TestGetIdleTimeout(t *testing.T) {
for _, c := range []struct {
desc string
annotations map[string]string
i *int32
err bool
}{
{desc: "no annotation"},
{desc: "annotation empty value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: ""}, err: true},
{desc: "annotation not a number", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "cookies"}, err: true},
{desc: "annotation negative value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "-6"}, err: true},
{desc: "annotation zero value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "0"}, err: true},
{desc: "annotation too low value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "3"}, err: true},
{desc: "annotation too high value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "31"}, err: true},
{desc: "annotation good value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "24"}, i: to.Int32Ptr(24)},
} {
t.Run(c.desc, func(t *testing.T) {
s := &v1.Service{}
s.Annotations = c.annotations
i, err := getIdleTimeout(s)
if !reflect.DeepEqual(c.i, i) {
t.Fatalf("got unexpected value: %d", to.Int32(i))
}
if (err != nil) != c.err {
t.Fatalf("expected error=%v, got %v", c.err, err)
}
})
}
}

View File

@ -17,40 +17,78 @@ limitations under the License.
package azure
import (
"context"
"fmt"
"path"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/golang/glog"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
kwait "k8s.io/apimachinery/pkg/util/wait"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
// default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd
defaultDiskIOPSReadWrite = 500
defaultDiskMBpsReadWrite = 100
)
//ManagedDiskController : managed disk controller struct
type ManagedDiskController struct {
common *controllerCommon
}
// ManagedDiskOptions specifies the options of managed disks.
type ManagedDiskOptions struct {
// The name of the disk.
DiskName string
// The size in GB.
SizeGB int
// The name of PVC.
PVCName string
// The name of resource group.
ResourceGroup string
// The AvailabilityZone to create the disk.
AvailabilityZone string
// The tags of the disk.
Tags map[string]string
// The SKU of storage account.
StorageAccountType compute.DiskStorageAccountTypes
// IOPS Caps for UltraSSD disk
DiskIOPSReadWrite string
// Throughput Cap (MBps) for UltraSSD disk
DiskMBpsReadWrite string
}
func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) {
return &ManagedDiskController{common: common}, nil
}
//CreateManagedDisk : create managed disk
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, resourceGroup string,
sizeGB int, tags map[string]string) (string, error) {
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) {
var err error
klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
var createZones *[]string
if len(options.AvailabilityZone) > 0 {
zoneList := []string{c.common.cloud.GetZoneID(options.AvailabilityZone)}
createZones = &zoneList
}
// insert original tags to newTags
newTags := make(map[string]*string)
azureDDTag := "kubernetes-azure-dd"
newTags["created-by"] = &azureDDTag
// insert original tags to newTags
if tags != nil {
for k, v := range tags {
if options.Tags != nil {
for k, v := range options.Tags {
// Azure won't allow / (forward slash) in tags
newKey := strings.Replace(k, "/", "-", -1)
newValue := strings.Replace(v, "/", "-", -1)
@ -58,25 +96,59 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
}
}
diskSizeGB := int32(sizeGB)
diskSizeGB := int32(options.SizeGB)
diskSku := compute.DiskStorageAccountTypes(options.StorageAccountType)
diskProperties := compute.DiskProperties{
DiskSizeGB: &diskSizeGB,
CreationData: &compute.CreationData{CreateOption: compute.Empty},
}
if diskSku == compute.UltraSSDLRS {
diskIOPSReadWrite := int64(defaultDiskIOPSReadWrite)
if options.DiskIOPSReadWrite != "" {
v, err := strconv.Atoi(options.DiskIOPSReadWrite)
if err != nil {
return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %v", err)
}
diskIOPSReadWrite = int64(v)
}
diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite)
diskMBpsReadWrite := int32(defaultDiskMBpsReadWrite)
if options.DiskMBpsReadWrite != "" {
v, err := strconv.Atoi(options.DiskMBpsReadWrite)
if err != nil {
return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %v", err)
}
diskMBpsReadWrite = int32(v)
}
diskProperties.DiskMBpsReadWrite = to.Int32Ptr(diskMBpsReadWrite)
} else {
if options.DiskIOPSReadWrite != "" {
return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type")
}
if options.DiskMBpsReadWrite != "" {
return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type")
}
}
model := compute.Disk{
Location: &c.common.location,
Tags: newTags,
Zones: createZones,
Sku: &compute.DiskSku{
Name: compute.StorageAccountTypes(storageAccountType),
Name: diskSku,
},
DiskProperties: &compute.DiskProperties{
DiskSizeGB: &diskSizeGB,
CreationData: &compute.CreationData{CreateOption: compute.Empty},
}}
DiskProperties: &diskProperties,
}
if resourceGroup == "" {
resourceGroup = c.common.resourceGroup
if options.ResourceGroup == "" {
options.ResourceGroup = c.common.resourceGroup
}
ctx, cancel := getContextWithCancel()
defer cancel()
_, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, model)
_, err = c.common.cloud.DisksClient.CreateOrUpdate(ctx, options.ResourceGroup, options.DiskName, model)
if err != nil {
return "", err
}
@ -84,7 +156,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
diskID := ""
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
provisionState, id, err := c.getDisk(resourceGroup, diskName)
provisionState, id, err := c.getDisk(options.ResourceGroup, options.DiskName)
diskID = id
// We are waiting for provisioningState==Succeeded
// We don't want to hand-off managed disks to k8s while they are
@ -99,9 +171,9 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
})
if err != nil {
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB)
klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB)
} else {
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
}
return diskID, nil
@ -125,7 +197,7 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
// We don't need poll here, k8s will immediately stop referencing the disk
// the disk will be eventually deleted - cleanly - by ARM
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
return nil
}
@ -172,7 +244,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
requestGiB := int32(util.RoundUpSize(requestBytes, 1024*1024*1024))
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
glog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
// If disk already of greater or equal size than requested we return
if *result.DiskProperties.DiskSizeGB >= requestGiB {
return newSizeQuant, nil
@ -186,7 +258,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
return oldSize, err
}
glog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
return newSizeQuant, nil
}
@ -201,3 +273,58 @@ func getResourceGroupFromDiskURI(diskURI string) (string, error) {
}
return fields[4], nil
}
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// Ignore if not AzureDisk.
if pv.Spec.AzureDisk == nil {
return nil, nil
}
// Ignore any volumes that are being provisioned
if pv.Spec.AzureDisk.DiskName == volume.ProvisionedVolumeName {
return nil, nil
}
return c.GetAzureDiskLabels(pv.Spec.AzureDisk.DataDiskURI)
}
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
// Get disk's resource group.
diskName := path.Base(diskURI)
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
if err != nil {
klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err)
return nil, err
}
// Get information of the disk.
ctx, cancel := getContextWithCancel()
defer cancel()
disk, err := c.DisksClient.Get(ctx, resourceGroup, diskName)
if err != nil {
klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err)
return nil, err
}
// Check whether availability zone is specified.
if disk.Zones == nil || len(*disk.Zones) == 0 {
klog.V(4).Infof("Azure disk %q is not zoned", diskName)
return nil, nil
}
zones := *disk.Zones
zoneID, err := strconv.Atoi(zones[0])
if err != nil {
return nil, fmt.Errorf("failed to parse zone %v for AzureDisk %v: %v", zones, diskName, err)
}
zone := c.makeZone(zoneID)
klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
labels := map[string]string{
kubeletapis.LabelZoneRegion: c.Location,
kubeletapis.LabelZoneFailureDomain: zone,
}
return labels, nil
}

View File

@ -20,19 +20,41 @@ import (
"context"
"fmt"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
)
// ListRoutes lists all managed routes that belong to the specified clusterName
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
routeTable, existsRouteTable, err := az.getRouteTable()
return processRoutes(routeTable, existsRouteTable, err)
routes, err := processRoutes(routeTable, existsRouteTable, err)
if err != nil {
return nil, err
}
// Compose routes for unmanaged routes so that node controller won't retry creating routes for them.
unmanagedNodes, err := az.GetUnmanagedNodes()
if err != nil {
return nil, err
}
az.routeCIDRsLock.Lock()
defer az.routeCIDRsLock.Unlock()
for _, nodeName := range unmanagedNodes.List() {
if cidr, ok := az.routeCIDRs[nodeName]; ok {
routes = append(routes, &cloudprovider.Route{
Name: nodeName,
TargetNode: mapRouteNameToNodeName(nodeName),
DestinationCIDR: cidr,
})
}
}
return routes, nil
}
// Injectable for testing
@ -50,7 +72,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
for i, route := range *routeTable.Routes {
instance := mapRouteNameToNodeName(*route.Name)
cidr := *route.AddressPrefix
glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
kubeRoutes[i] = &cloudprovider.Route{
Name: *route.Name,
@ -60,13 +82,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
}
}
glog.V(10).Info("ListRoutes: FINISH")
klog.V(10).Info("ListRoutes: FINISH")
return kubeRoutes, nil
}
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return err
} else if existsRouteTable {
return nil
@ -81,17 +103,17 @@ func (az *Cloud) createRouteTable() error {
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
}
glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
klog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
klog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
klog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
}
}
if err != nil {
@ -107,7 +129,21 @@ func (az *Cloud) createRouteTable() error {
// route.Name will be ignored, although the cloud-provider may use nameHint
// to create a more user-meaningful name.
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
nodeName := string(kubeRoute.TargetNode)
unmanaged, err := az.IsNodeUnmanaged(nodeName)
if err != nil {
return err
}
if unmanaged {
klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
az.routeCIDRsLock.Lock()
defer az.routeCIDRsLock.Unlock()
az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR
return nil
}
klog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
return err
}
@ -126,51 +162,65 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
},
}
glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
klog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
retryErr := az.CreateOrUpdateRouteWithRetry(route)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
}
}
if err != nil {
return err
}
glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return nil
}
// DeleteRoute deletes the specified managed route
// Route should be as returned by ListRoutes
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
nodeName := string(kubeRoute.TargetNode)
unmanaged, err := az.IsNodeUnmanaged(nodeName)
if err != nil {
return err
}
if unmanaged {
klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
az.routeCIDRsLock.Lock()
defer az.routeCIDRsLock.Unlock()
delete(az.routeCIDRs, nodeName)
return nil
}
klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
ctx, cancel := getContextWithCancel()
defer cancel()
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
klog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
retryErr := az.DeleteRouteWithRetry(routeName)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
}
}
if err != nil {
return err
}
glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return nil
}

View File

@ -22,7 +22,8 @@ import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/apimachinery/pkg/util/sets"
cloudprovider "k8s.io/cloud-provider"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
@ -38,6 +39,8 @@ func TestDeleteRoute(t *testing.T) {
RouteTableName: "bar",
Location: "location",
},
unmanagedNodes: sets.NewString(),
nodeInformerSynced: func() bool { return true },
}
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
routeName := mapNodeNameToRouteName(route.TargetNode)
@ -62,6 +65,28 @@ func TestDeleteRoute(t *testing.T) {
ob, found := mp[routeName]
if found {
t.Errorf("unexpectedly found: %v that should have been deleted.", ob)
t.FailNow()
}
// test delete route for unmanaged nodes.
nodeName := "node1"
nodeCIDR := "4.3.2.1/24"
cloud.unmanagedNodes.Insert(nodeName)
cloud.routeCIDRs = map[string]string{
nodeName: nodeCIDR,
}
route1 := cloudprovider.Route{
TargetNode: mapRouteNameToNodeName(nodeName),
DestinationCIDR: nodeCIDR,
}
err = cloud.DeleteRoute(context.TODO(), "cluster", &route1)
if err != nil {
t.Errorf("unexpected error deleting route: %v", err)
t.FailNow()
}
cidr, found := cloud.routeCIDRs[nodeName]
if found {
t.Errorf("unexpected CIDR item (%q) for %s", cidr, nodeName)
}
}
@ -79,6 +104,8 @@ func TestCreateRoute(t *testing.T) {
RouteTableName: "bar",
Location: "location",
},
unmanagedNodes: sets.NewString(),
nodeInformerSynced: func() bool { return true },
}
cache, _ := cloud.newRouteTableCache()
cloud.rtCache = cache
@ -122,6 +149,29 @@ func TestCreateRoute(t *testing.T) {
if *routeInfo.NextHopIPAddress != nodeIP {
t.Errorf("Expected IP address: %s, saw %s", nodeIP, *routeInfo.NextHopIPAddress)
}
// test create route for unmanaged nodes.
nodeName := "node1"
nodeCIDR := "4.3.2.1/24"
cloud.unmanagedNodes.Insert(nodeName)
cloud.routeCIDRs = map[string]string{}
route1 := cloudprovider.Route{
TargetNode: mapRouteNameToNodeName(nodeName),
DestinationCIDR: nodeCIDR,
}
err = cloud.CreateRoute(context.TODO(), "cluster", "unused", &route1)
if err != nil {
t.Errorf("unexpected error creating route: %v", err)
t.FailNow()
}
cidr, found := cloud.routeCIDRs[nodeName]
if !found {
t.Errorf("unexpected missing item for %s", nodeName)
t.FailNow()
}
if cidr != nodeCIDR {
t.Errorf("unexpected cidr %s, saw %s", nodeCIDR, cidr)
}
}
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) {

View File

@ -17,6 +17,7 @@ limitations under the License.
package azure
import (
"context"
"errors"
"fmt"
"hash/crc32"
@ -26,16 +27,16 @@ import (
"strings"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/klog"
)
const (
@ -61,22 +62,24 @@ const (
var errNotInVMSet = errors.New("vm is not in the vmset")
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
var publicIPResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/publicIPAddresses/(?:.*)`)
// getStandardMachineID returns the full identifier of a virtual machine.
func (az *Cloud) getStandardMachineID(machineName string) string {
func (az *Cloud) getStandardMachineID(resourceGroup, machineName string) string {
return fmt.Sprintf(
machineIDTemplate,
az.SubscriptionID,
az.ResourceGroup,
resourceGroup,
machineName)
}
// returns the full identifier of an availabilitySet
func (az *Cloud) getAvailabilitySetID(availabilitySetName string) string {
func (az *Cloud) getAvailabilitySetID(resourceGroup, availabilitySetName string) string {
return fmt.Sprintf(
availabilitySetIDTemplate,
az.SubscriptionID,
az.ResourceGroup,
resourceGroup,
availabilitySetName)
}
@ -123,7 +126,7 @@ func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (
// Thus Azure do not allow mixed type (public and internal) load balancer.
// So we'd have a separate name for internal load balancer.
// This would be the name for Azure LoadBalancer resource.
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
lbNamePrefix := vmSetName
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
lbNamePrefix = clusterName
@ -220,20 +223,22 @@ func getBackendPoolName(clusterName string) string {
return clusterName
}
func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
prefix := az.getRulePrefix(service)
if subnetName == nil {
return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port)
return fmt.Sprintf("%s-%s-%d", prefix, port.Protocol, port.Port)
}
return fmt.Sprintf("%s-%s-%s-%d", getRulePrefix(service), *subnetName, port.Protocol, port.Port)
return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, port.Protocol, port.Port)
}
func getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
if useSharedSecurityRule(service) {
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix)
}
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
return fmt.Sprintf("%s-%s-%d-%s", getRulePrefix(service), port.Protocol, port.Port, safePrefix)
rulePrefix := az.getRulePrefix(service)
return fmt.Sprintf("%s-%s-%d-%s", rulePrefix, port.Protocol, port.Port, safePrefix)
}
// This returns a human-readable version of the Service used to tag some resources.
@ -243,26 +248,26 @@ func getServiceName(service *v1.Service) string {
}
// This returns a prefix for loadbalancer/security rules.
func getRulePrefix(service *v1.Service) string {
return cloudprovider.GetLoadBalancerName(service)
func (az *Cloud) getRulePrefix(service *v1.Service) string {
return az.GetLoadBalancerName(context.TODO(), "", service)
}
func getPublicIPName(clusterName string, service *v1.Service) string {
return fmt.Sprintf("%s-%s", clusterName, cloudprovider.GetLoadBalancerName(service))
func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service) string {
return fmt.Sprintf("%s-%s", clusterName, az.GetLoadBalancerName(context.TODO(), clusterName, service))
}
func serviceOwnsRule(service *v1.Service, rule string) bool {
prefix := getRulePrefix(service)
func (az *Cloud) serviceOwnsRule(service *v1.Service, rule string) bool {
prefix := az.getRulePrefix(service)
return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
}
func serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
baseName := cloudprovider.GetLoadBalancerName(service)
func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
return strings.HasPrefix(*fip.Name, baseName)
}
func getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
baseName := cloudprovider.GetLoadBalancerName(service)
func (az *Cloud) getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
if subnetName != nil {
return fmt.Sprintf("%s-%s", baseName, *subnetName)
}
@ -303,51 +308,6 @@ func MakeCRC32(str string) string {
return strconv.FormatUint(uint64(hash), 10)
}
//ExtractVMData : extract dataDisks, storageProfile from a map struct
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
storageProfile map[string]interface{},
hardwareProfile map[string]interface{}, err error) {
props, ok := vmData["properties"].(map[string]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
}
storageProfile, ok = props["storageProfile"].(map[string]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
}
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
}
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
if !ok {
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
}
return dataDisks, storageProfile, hardwareProfile, nil
}
//ExtractDiskData : extract provisioningState, diskState from a map struct
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
fragment, ok := diskData.(map[string]interface{})
if !ok {
return "", "", fmt.Errorf("convert diskData to map error")
}
properties, ok := fragment["properties"].(map[string]interface{})
if !ok {
return "", "", fmt.Errorf("convert diskData(properties) to map error")
}
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
if ref, ok := properties["diskState"]; ok {
diskState = ref.(string)
}
return provisioningState, diskState, nil
}
// availabilitySet implements VMSet interface for Azure availability sets.
type availabilitySet struct {
*Cloud
@ -373,10 +333,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
}
if err != nil {
if as.CloudProviderBackoff {
glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
if err != nil {
glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
return "", err
}
} else {
@ -386,6 +346,26 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
return *machine.ID, nil
}
// GetPowerStatusByNodeName returns the power state of the specified node.
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
vm, err := as.getVirtualMachine(types.NodeName(name))
if err != nil {
return powerState, err
}
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
statuses := *vm.InstanceView.Statuses
for _, status := range statuses {
state := to.String(status.Code)
if strings.HasPrefix(state, vmPowerStatePrefix) {
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
}
}
}
return "", fmt.Errorf("failed to get power status for node %q", name)
}
// GetNodeNameByProviderID gets the node name by provider ID.
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
// NodeName is part of providerID for standard instances.
@ -401,21 +381,36 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
machine, err := as.getVirtualMachine(types.NodeName(name))
if err != nil {
glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
return "", err
}
return string(machine.HardwareProfile.VMSize), nil
}
// GetZoneByNodeName gets zone from instance view.
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain.
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
vm, err := as.getVirtualMachine(types.NodeName(name))
if err != nil {
return cloudprovider.Zone{}, err
}
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
var failureDomain string
if vm.Zones != nil && len(*vm.Zones) > 0 {
// Get availability zone for the node.
zones := *vm.Zones
zoneID, err := strconv.Atoi(zones[0])
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
}
failureDomain = as.makeZone(zoneID)
} else {
// Availability zone is not used for the node, falling back to fault domain.
failureDomain = strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
}
zone := cloudprovider.Zone{
FailureDomain: failureDomain,
Region: *(vm.Location),
@ -438,7 +433,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
ipConfig, err := getPrimaryIPConfig(nic)
if err != nil {
glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
klog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
return "", "", err
}
@ -465,9 +460,9 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
// a list of availability sets that match the nodes available to k8s.
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
vms, err := as.VirtualMachineClientListWithRetry()
vms, err := as.VirtualMachineClientListWithRetry(as.ResourceGroup)
if err != nil {
glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
klog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
return nil, err
}
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
@ -486,7 +481,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
}
asID, ok := vmNameToAvailabilitySetID[nodeName]
if !ok {
glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
}
if availabilitySetIDs.Has(asID) {
@ -495,7 +490,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
}
asName, err := getLastSegment(asID)
if err != nil {
glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
return nil, err
}
// AvailabilitySet ID is currently upper cased in a indeterministic way
@ -521,11 +516,11 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
}
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes)
if err != nil {
glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
return nil, err
}
if len(*availabilitySetNames) == 0 {
glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
}
// sort the list to have deterministic selection
@ -545,7 +540,7 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
}
}
if !found {
glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
klog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
}
}
@ -560,13 +555,33 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interfa
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
}
// extractResourceGroupByNicID extracts the resource group name by nicID.
func extractResourceGroupByNicID(nicID string) (string, error) {
matches := nicResourceGroupRE.FindStringSubmatch(nicID)
if len(matches) != 2 {
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
}
return matches[1], nil
}
// extractResourceGroupByPipID extracts the resource group name by publicIP ID.
func extractResourceGroupByPipID(pipID string) (string, error) {
matches := publicIPResourceGroupRE.FindStringSubmatch(pipID)
if len(matches) != 2 {
return "", fmt.Errorf("error of extracting resourceGroup from pipID %q", pipID)
}
return matches[1], nil
}
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
var machine compute.VirtualMachine
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
if err != nil {
glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
return network.Interface{}, err
}
@ -578,6 +593,10 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
if err != nil {
return network.Interface{}, err
}
nodeResourceGroup, err := as.GetNodeResourceGroup(nodeName)
if err != nil {
return network.Interface{}, err
}
// Check availability set name. Note that vmSetName is empty string when getting
// the Node's IP address. While vmSetName is not empty, it should be checked with
@ -587,17 +606,22 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
// don't check vmSet for it.
if vmSetName != "" && !as.useStandardLoadBalancer() {
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
glog.V(3).Infof(
klog.V(3).Infof(
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
return network.Interface{}, errNotInVMSet
}
}
nicResourceGroup, err := extractResourceGroupByNicID(primaryNicID)
if err != nil {
return network.Interface{}, err
}
ctx, cancel := getContextWithCancel()
defer cancel()
nic, err := as.InterfacesClient.Get(ctx, as.ResourceGroup, nicName, "")
nic, err := as.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
if err != nil {
return network.Interface{}, err
}
@ -607,21 +631,22 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
// participating in the specified LoadBalancer Backend Pool.
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
vmName := mapNodeNameToVMName(nodeName)
serviceName := getServiceName(service)
nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
if err != nil {
if err == errNotInVMSet {
glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
klog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
return nil
}
glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
klog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
return err
}
if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState {
glog.V(3).Infof("ensureHostInPool skips node %s because its primdary nic %s is in Failed state", nodeName, nic.Name)
klog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
return nil
}
@ -654,7 +679,7 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
if len(matches) == 2 {
lbName := matches[1]
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName)
klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName)
return nil
}
}
@ -669,17 +694,17 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
nicName := *nic.Name
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, *nic.Name, nic)
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
klog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
retryErr := as.CreateOrUpdateInterfaceWithRetry(service, nic)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
klog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
}
}
if err != nil {
@ -691,19 +716,24 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
// EnsureHostsInPool ensures the given Node's primary IP configurations are
// participating in the specified LoadBalancer Backend Pool.
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
hostUpdates := make([]func() error, 0, len(nodes))
for _, node := range nodes {
localNodeName := node.Name
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
continue
}
if as.ShouldNodeExcludedFromLoadBalancer(node) {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue
}
f := func() error {
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
err := as.ensureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
if err != nil {
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err)
}
return nil
}
@ -719,7 +749,7 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
// Do nothing for availability set.
return nil
}

View File

@ -171,7 +171,7 @@ func TestMapLoadBalancerNameToVMSet(t *testing.T) {
}
}
func TestGetLoadBalancerName(t *testing.T) {
func TestGetAzureLoadBalancerName(t *testing.T) {
az := getTestCloud()
az.PrimaryAvailabilitySetName = "primary"
@ -247,7 +247,7 @@ func TestGetLoadBalancerName(t *testing.T) {
} else {
az.Config.LoadBalancerSku = loadBalancerSkuBasic
}
loadbalancerName := az.getLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
loadbalancerName := az.getAzureLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
assert.Equal(t, c.expected, loadbalancerName, c.description)
}
}

View File

@ -19,20 +19,26 @@ package azure
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/golang/glog"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"k8s.io/klog"
)
const (
defaultStorageAccountType = string(storage.StandardLRS)
defaultStorageAccountKind = storage.StorageV2
fileShareAccountNamePrefix = "f"
sharedDiskAccountNamePrefix = "ds"
dedicatedDiskAccountNamePrefix = "dd"
)
// CreateFileShare creates a file share, using a matching storage account
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) {
account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix)
// CreateFileShare creates a file share, using a matching storage account type, account kind, etc.
// storage account will be created if specified account is not found
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, accountKind, resourceGroup, location string, requestGiB int) (string, string, error) {
if resourceGroup == "" {
resourceGroup = az.resourceGroup
}
account, key, err := az.ensureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, fileShareAccountNamePrefix)
if err != nil {
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
@ -40,7 +46,7 @@ func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location s
if err := az.createFileShare(account, key, shareName, requestGiB); err != nil {
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err)
}
glog.V(4).Infof("created share %s in account %s", shareName, account)
klog.V(4).Infof("created share %s in account %s", shareName, account)
return account, key, nil
}
@ -49,7 +55,7 @@ func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) erro
if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil {
return err
}
glog.V(4).Infof("share %s deleted", shareName)
klog.V(4).Infof("share %s deleted", shareName)
return nil
}

View File

@ -19,7 +19,7 @@ package azure
import (
"testing"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
)
func TestCreateFileShare(t *testing.T) {
@ -30,6 +30,7 @@ func TestCreateFileShare(t *testing.T) {
name := "baz"
sku := "sku"
kind := "StorageV2"
location := "centralus"
value := "foo key"
bogus := "bogus"
@ -38,6 +39,7 @@ func TestCreateFileShare(t *testing.T) {
name string
acct string
acctType string
acctKind string
loc string
gb int
accounts storage.AccountListResult
@ -52,6 +54,7 @@ func TestCreateFileShare(t *testing.T) {
name: "foo",
acct: "bar",
acctType: "type",
acctKind: "StorageV2",
loc: "eastus",
gb: 10,
expectErr: true,
@ -60,6 +63,7 @@ func TestCreateFileShare(t *testing.T) {
name: "foo",
acct: "",
acctType: "type",
acctKind: "StorageV2",
loc: "eastus",
gb: 10,
expectErr: true,
@ -68,11 +72,12 @@ func TestCreateFileShare(t *testing.T) {
name: "foo",
acct: "",
acctType: sku,
acctKind: kind,
loc: location,
gb: 10,
accounts: storage.AccountListResult{
Value: &[]storage.Account{
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Kind: storage.Kind(kind), Location: &location},
},
},
keys: storage.AccountListKeysResult{
@ -87,6 +92,7 @@ func TestCreateFileShare(t *testing.T) {
name: "foo",
acct: "",
acctType: sku,
acctKind: kind,
loc: location,
gb: 10,
accounts: storage.AccountListResult{
@ -100,6 +106,7 @@ func TestCreateFileShare(t *testing.T) {
name: "foo",
acct: "",
acctType: sku,
acctKind: kind,
loc: location,
gb: 10,
accounts: storage.AccountListResult{
@ -116,7 +123,7 @@ func TestCreateFileShare(t *testing.T) {
fake.Keys = test.keys
fake.Err = test.err
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.loc, test.gb)
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.acctKind, "rg", test.loc, test.gb)
if test.expectErr && err == nil {
t.Errorf("unexpected non-error")
continue

View File

@ -20,9 +20,9 @@ import (
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog"
"k8s.io/klog"
)
type accountWithLocation struct {
@ -30,15 +30,15 @@ type accountWithLocation struct {
}
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingAccountKind, resourceGroup, matchingLocation string) ([]accountWithLocation, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, az.ResourceGroup)
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, resourceGroup)
if err != nil {
return nil, err
}
if result.Value == nil {
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", az.ResourceGroup)
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", resourceGroup)
}
accounts := []accountWithLocation{}
@ -49,6 +49,10 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string
continue
}
if matchingAccountKind != "" && !strings.EqualFold(matchingAccountKind, string(acct.Kind)) {
continue
}
location := *acct.Location
if matchingLocation != "" && !strings.EqualFold(matchingLocation, location) {
continue
@ -61,11 +65,11 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string
}
// getStorageAccesskey gets the storage account access key
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
func (az *Cloud) getStorageAccesskey(account, resourceGroup string) (string, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := az.StorageAccountClient.ListKeys(ctx, az.ResourceGroup, account)
result, err := az.StorageAccountClient.ListKeys(ctx, resourceGroup, account)
if err != nil {
return "", err
}
@ -86,17 +90,17 @@ func (az *Cloud) getStorageAccesskey(account string) (string, error) {
}
// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) {
func (az *Cloud) ensureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, genAccountNamePrefix string) (string, string, error) {
if len(accountName) == 0 {
// find a storage account that matches accountType
accounts, err := az.getStorageAccounts(accountType, location)
accounts, err := az.getStorageAccounts(accountType, accountKind, resourceGroup, location)
if err != nil {
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
}
if len(accounts) > 0 {
accountName = accounts[0].Name
glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
}
if len(accountName) == 0 {
@ -109,19 +113,23 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
accountType = defaultStorageAccountType
}
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
accountName, az.ResourceGroup, location, accountType)
// use StorageV2 by default per https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
kind := defaultStorageAccountKind
if accountKind != "" {
kind = storage.Kind(accountKind)
}
klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s",
accountName, resourceGroup, location, accountType, kind)
cp := storage.AccountCreateParameters{
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
Kind: storage.StorageV2,
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
Kind: kind,
AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{EnableHTTPSTrafficOnly: to.BoolPtr(true)},
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
Location: &location}
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
Location: &location}
ctx, cancel := getContextWithCancel()
defer cancel()
_, err := az.StorageAccountClient.Create(ctx, az.ResourceGroup, accountName, cp)
_, err := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp)
if err != nil {
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
}
@ -129,7 +137,7 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
}
// find the access key with this account
accountKey, err := az.getStorageAccesskey(accountName)
accountKey, err := az.getStorageAccesskey(accountName, resourceGroup)
if err != nil {
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"testing"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage"
)
func TestGetStorageAccessKeys(t *testing.T) {
@ -64,7 +64,7 @@ func TestGetStorageAccessKeys(t *testing.T) {
expectedKey := test.expectedKey
fake.Keys = test.results
fake.Err = test.err
key, err := cloud.getStorageAccesskey("acct")
key, err := cloud.getStorageAccesskey("acct", "rg")
if test.expectErr && err == nil {
t.Errorf("Unexpected non-error")
continue

View File

@ -19,23 +19,22 @@ package azure
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math"
"net"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
@ -859,13 +858,13 @@ func TestReconcilePublicIPWithNewService(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svc, true)
pip2, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb */)
pip2, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -880,7 +879,7 @@ func TestReconcilePublicIPRemoveService(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", v1.ProtocolTCP, 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -888,7 +887,7 @@ func TestReconcilePublicIPRemoveService(t *testing.T) {
validatePublicIP(t, pip, &svc, true)
// Remove the service
pip, err = az.reconcilePublicIP(testClusterName, &svc, false /* wantLb */)
pip, err = az.reconcilePublicIP(testClusterName, &svc, nil, false /* wantLb */)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -900,7 +899,7 @@ func TestReconcilePublicIPWithInternalService(t *testing.T) {
az := getTestCloud()
svc := getInternalTestService("servicea", 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -912,7 +911,7 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) {
az := getTestCloud()
svc := getInternalTestService("servicea", 80, 443)
pip, err := az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
pip, err := az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -920,14 +919,14 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) {
// Update to external service
svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80)
pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, true /* wantLb*/)
pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, nil, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validatePublicIP(t, pip, &svcUpdated, true)
// Update to internal service again
pip, err = az.reconcilePublicIP(testClusterName, &svc, true /* wantLb*/)
pip, err = az.reconcilePublicIP(testClusterName, &svc, nil, true /* wantLb*/)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -950,8 +949,13 @@ func getTestCloud() (az *Cloud) {
RouteTableName: "rt",
PrimaryAvailabilitySetName: "as",
MaximumLoadBalancerRuleCount: 250,
VMType: vmTypeStandard,
VMType: vmTypeStandard,
},
nodeZones: map[string]sets.String{},
nodeInformerSynced: func() bool { return true },
nodeResourceGroups: map[string]string{},
unmanagedNodes: sets.NewString(),
routeCIDRs: map[string]string{},
}
az.DisksClient = newFakeDisksClient()
az.InterfacesClient = newFakeAzureInterfacesClient()
@ -1061,7 +1065,7 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
az.InterfacesClient.CreateOrUpdate(ctx, az.Config.ResourceGroup, nicName, newNIC)
// create vm
asID := az.getAvailabilitySetID(asName)
asID := az.getAvailabilitySetID(az.Config.ResourceGroup, asName)
newVM := compute.VirtualMachine{
Name: &vmName,
Location: &az.Config.Location,
@ -1159,7 +1163,7 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
for _, port := range service.Spec.Ports {
sources := getServiceSourceRanges(&service)
for _, src := range sources {
ruleName := getSecurityRuleName(&service, port, src)
ruleName := az.getSecurityRuleName(&service, port, src)
rules = append(rules, network.SecurityRule{
Name: to.StringPtr(ruleName),
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
@ -1190,6 +1194,7 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
}
func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, services ...v1.Service) {
az := getTestCloud()
expectedRuleCount := 0
expectedFrontendIPCount := 0
expectedProbeCount := 0
@ -1198,14 +1203,14 @@ func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, serv
if len(svc.Spec.Ports) > 0 {
expectedFrontendIPCount++
expectedFrontendIP := ExpectedFrontendIPInfo{
Name: getFrontendIPConfigName(&svc, subnet(&svc)),
Name: az.getFrontendIPConfigName(&svc, subnet(&svc)),
Subnet: subnet(&svc),
}
expectedFrontendIPs = append(expectedFrontendIPs, expectedFrontendIP)
}
for _, wantedRule := range svc.Spec.Ports {
expectedRuleCount++
wantedRuleName := getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc))
wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc))
foundRule := false
for _, actualRule := range *loadBalancer.LoadBalancingRules {
if strings.EqualFold(*actualRule.Name, wantedRuleName) &&
@ -1396,12 +1401,13 @@ func securityRuleMatches(serviceSourceRange string, servicePort v1.ServicePort,
}
func validateSecurityGroup(t *testing.T, securityGroup *network.SecurityGroup, services ...v1.Service) {
az := getTestCloud()
seenRules := make(map[string]string)
for _, svc := range services {
for _, wantedRule := range svc.Spec.Ports {
sources := getServiceSourceRanges(&svc)
for _, source := range sources {
wantedRuleName := getSecurityRuleName(&svc, wantedRule, source)
wantedRuleName := az.getSecurityRuleName(&svc, wantedRule, source)
seenRules[wantedRuleName] = wantedRuleName
foundRule := false
for _, actualRule := range *securityGroup.SecurityRules {
@ -1667,60 +1673,67 @@ func validateEmptyConfig(t *testing.T, config string) {
t.Errorf("got incorrect value for CloudProviderRateLimit")
}
}
func TestGetZone(t *testing.T) {
data := `{"ID":"_azdev","UD":"0","FD":"99"}`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, data)
}))
defer ts.Close()
cloud := &Cloud{}
cloud.Location = "eastus"
zone, err := cloud.getZoneFromURL(ts.URL)
if err != nil {
t.Errorf("Unexpected error: %v", err)
cloud := &Cloud{
Config: Config{
Location: "eastus",
},
}
if zone.FailureDomain != "99" {
t.Errorf("Unexpected value: %s, expected '99'", zone.FailureDomain)
}
if zone.Region != cloud.Location {
t.Errorf("Expected: %s, saw: %s", cloud.Location, zone.Region)
}
}
func TestFetchFaultDomain(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, `{"ID":"_azdev","UD":"0","FD":"99"}`)
}))
defer ts.Close()
faultDomain, err := fetchFaultDomain(ts.URL)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if faultDomain == nil {
t.Errorf("Unexpected nil fault domain")
}
if *faultDomain != "99" {
t.Errorf("Expected '99', saw '%s'", *faultDomain)
}
}
func TestDecodeInstanceInfo(t *testing.T) {
response := `{"ID":"_azdev","UD":"0","FD":"99"}`
faultDomain, err := readFaultDomain(strings.NewReader(response))
if err != nil {
t.Errorf("Unexpected error in ReadFaultDomain: %v", err)
testcases := []struct {
name string
zone string
faultDomain string
expected string
}{
{
name: "GetZone should get real zone if only node's zone is set",
zone: "1",
expected: "eastus-1",
},
{
name: "GetZone should get real zone if both node's zone and FD are set",
zone: "1",
faultDomain: "99",
expected: "eastus-1",
},
{
name: "GetZone should get faultDomain if node's zone isn't set",
faultDomain: "99",
expected: "99",
},
}
if faultDomain == nil {
t.Error("Fault domain was unexpectedly nil")
}
for _, test := range testcases {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
if *faultDomain != "99" {
t.Error("got incorrect fault domain")
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, fmt.Sprintf(`{"compute":{"zone":"%s", "platformFaultDomain":"%s"}}`, test.zone, test.faultDomain))
}))
go func() {
http.Serve(listener, mux)
}()
defer listener.Close()
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
zone, err := cloud.GetZone(context.Background())
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
if zone.FailureDomain != test.expected {
t.Errorf("Test [%s] unexpected zone: %s, expected %q", test.name, zone.FailureDomain, test.expected)
}
if zone.Region != cloud.Location {
t.Errorf("Test [%s] unexpected region: %s, expected: %s", test.name, zone.Region, cloud.Location)
}
}
}
@ -1776,73 +1789,6 @@ func TestGetNodeNameByProviderID(t *testing.T) {
}
}
func TestMetadataURLGeneration(t *testing.T) {
metadata := NewInstanceMetadata()
fullPath := metadata.makeMetadataURL("some/path")
if fullPath != "http://169.254.169.254/metadata/some/path" {
t.Errorf("Expected http://169.254.169.254/metadata/some/path saw %s", fullPath)
}
}
func TestMetadataParsing(t *testing.T) {
data := `
{
"interface": [
{
"ipv4": {
"ipAddress": [
{
"privateIpAddress": "10.0.1.4",
"publicIpAddress": "X.X.X.X"
}
],
"subnet": [
{
"address": "10.0.1.0",
"prefix": "24"
}
]
},
"ipv6": {
"ipAddress": [
]
},
"macAddress": "002248020E1E"
}
]
}
`
network := NetworkMetadata{}
if err := json.Unmarshal([]byte(data), &network); err != nil {
t.Errorf("Unexpected error: %v", err)
}
ip := network.Interface[0].IPV4.IPAddress[0].PrivateIP
if ip != "10.0.1.4" {
t.Errorf("Unexpected value: %s, expected 10.0.1.4", ip)
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, data)
}))
defer server.Close()
metadata := &InstanceMetadata{
baseURL: server.URL,
}
networkJSON := NetworkMetadata{}
if err := metadata.Object("/some/path", &networkJSON); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !reflect.DeepEqual(network, networkJSON) {
t.Errorf("Unexpected inequality:\n%#v\nvs\n%#v", network, networkJSON)
}
}
func addTestSubnet(t *testing.T, az *Cloud, svc *v1.Service) {
if svc.Annotations[ServiceAnnotationLoadBalancerInternal] != "true" {
t.Error("Subnet added to non-internal service")
@ -1929,8 +1875,8 @@ func TestIfServiceSpecifiesSharedRuleAndRuleExistsThenTheServicesPortAndAddressA
SourceAddressPrefix: to.StringPtr("Internet"),
DestinationPortRange: to.StringPtr("80"),
DestinationAddressPrefix: to.StringPtr("192.168.33.44"),
Access: network.SecurityRuleAccessAllow,
Direction: network.SecurityRuleDirectionInbound,
Access: network.SecurityRuleAccessAllow,
Direction: network.SecurityRuleDirectionInbound,
},
},
}
@ -2537,8 +2483,8 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
expectedRuleName13 := "shared-TCP-4444-Internet"
expectedRuleName2 := "shared-TCP-8888-Internet"
expectedRuleName4 := getSecurityRuleName(&svc4, v1.ServicePort{Port: 4444, Protocol: v1.ProtocolTCP}, "Internet")
expectedRuleName5 := getSecurityRuleName(&svc5, v1.ServicePort{Port: 8888, Protocol: v1.ProtocolTCP}, "Internet")
expectedRuleName4 := az.getSecurityRuleName(&svc4, v1.ServicePort{Port: 4444, Protocol: v1.ProtocolTCP}, "Internet")
expectedRuleName5 := az.getSecurityRuleName(&svc5, v1.ServicePort{Port: 8888, Protocol: v1.ProtocolTCP}, "Internet")
sg := getTestSecurityGroup(az)
@ -2735,3 +2681,100 @@ func TestGetResourceGroupFromDiskURI(t *testing.T) {
}
}
}
func TestGetResourceGroups(t *testing.T) {
tests := []struct {
name string
nodeResourceGroups map[string]string
expected sets.String
informerSynced bool
expectError bool
}{
{
name: "cloud provider configured RG should be returned by default",
nodeResourceGroups: map[string]string{},
informerSynced: true,
expected: sets.NewString("rg"),
},
{
name: "cloud provider configured RG and node RGs should be returned",
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
informerSynced: true,
expected: sets.NewString("rg", "rg1", "rg2"),
},
{
name: "error should be returned if informer hasn't synced yet",
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
informerSynced: false,
expectError: true,
},
}
az := getTestCloud()
for _, test := range tests {
az.nodeResourceGroups = test.nodeResourceGroups
if test.informerSynced {
az.nodeInformerSynced = func() bool { return true }
} else {
az.nodeInformerSynced = func() bool { return false }
}
actual, err := az.GetResourceGroups()
if test.expectError {
assert.NotNil(t, err, test.name)
continue
}
assert.Nil(t, err, test.name)
assert.Equal(t, test.expected, actual, test.name)
}
}
func TestGetNodeResourceGroup(t *testing.T) {
tests := []struct {
name string
nodeResourceGroups map[string]string
node string
expected string
informerSynced bool
expectError bool
}{
{
name: "cloud provider configured RG should be returned by default",
nodeResourceGroups: map[string]string{},
informerSynced: true,
node: "node1",
expected: "rg",
},
{
name: "node RGs should be returned",
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
informerSynced: true,
node: "node1",
expected: "rg1",
},
{
name: "error should be returned if informer hasn't synced yet",
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
informerSynced: false,
expectError: true,
},
}
az := getTestCloud()
for _, test := range tests {
az.nodeResourceGroups = test.nodeResourceGroups
if test.informerSynced {
az.nodeInformerSynced = func() bool { return true }
} else {
az.nodeInformerSynced = func() bool { return false }
}
actual, err := az.GetNodeResourceGroup(test.node)
if test.expectError {
assert.NotNil(t, err, test.name)
continue
}
assert.Nil(t, err, test.name)
assert.Equal(t, test.expected, actual, test.name)
}
}

View File

@ -17,12 +17,12 @@ limitations under the License.
package azure
import (
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)
// VMSet defines functions all vmsets (including scale set and availability
@ -54,9 +54,9 @@ type VMSet interface {
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
// EnsureHostsInPool ensures the given Node's primary IP configurations are
// participating in the specified LoadBalancer Backend Pool.
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
@ -64,4 +64,7 @@ type VMSet interface {
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
// GetDataDisks gets a list of data disks attached to the node.
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
// GetPowerStatusByNodeName returns the power state of the specified node.
GetPowerStatusByNodeName(name string) (string, error)
}

View File

@ -24,24 +24,26 @@ import (
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)
var (
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
ErrorNotVmssInstance = errors.New("not a vmss instance")
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
resourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`)
vmssNicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines/(?:.*)/networkInterfaces/(?:.*)`)
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
)
// scaleSet implements VMSet interface for Azure scale set.
@ -106,31 +108,58 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co
return "", "", vm, cloudprovider.InstanceNotFound
}
glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
cachedVM, err := ss.vmssVMCache.Get(ss.makeVmssVMName(ssName, instanceID))
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
if err != nil {
return "", "", vm, err
}
klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID))
cachedVM, err := ss.vmssVMCache.Get(key)
if err != nil {
return ssName, instanceID, vm, err
}
if cachedVM == nil {
glog.Errorf("Can't find node (%q) in any scale sets", nodeName)
klog.Errorf("Can't find node (%q) in any scale sets", nodeName)
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
}
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
}
// GetPowerStatusByNodeName returns the power state of the specified node.
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
_, _, vm, err := ss.getVmssVM(name)
if err != nil {
return powerState, err
}
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
statuses := *vm.InstanceView.Statuses
for _, status := range statuses {
state := to.String(status.Code)
if strings.HasPrefix(state, vmPowerStatePrefix) {
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
}
}
}
return "", fmt.Errorf("failed to get power status for node %q", name)
}
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
// The node must belong to one of scale sets.
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
cachedVM, err := ss.vmssVMCache.Get(vmName)
key := buildVmssCacheKey(resourceGroup, vmName)
cachedVM, err := ss.vmssVMCache.Get(key)
if err != nil {
return vm, err
}
if cachedVM == nil {
glog.Errorf("cound't find vmss virtual machine by scaleSetName (%q) and instanceID (%q)", scaleSetName, instanceID)
klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID)
return vm, cloudprovider.InstanceNotFound
}
@ -143,7 +172,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm c
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
if err != nil {
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return "", err
}
if managedByAS {
@ -164,17 +193,22 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
// NodeName is not part of providerID for vmss instances.
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
if err != nil {
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
klog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
}
resourceGroup, err := extractResourceGroupByProviderID(providerID)
if err != nil {
return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
}
instanceID, err := getLastSegment(providerID)
if err != nil {
glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
}
vm, err := ss.getVmssVMByInstanceID(scaleSetName, instanceID)
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID)
if err != nil {
return "", err
}
@ -191,7 +225,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
if err != nil {
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return "", err
}
if managedByAS {
@ -211,11 +245,12 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
return "", nil
}
// GetZoneByNodeName gets cloudprovider.Zone by node name.
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain.
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
if err != nil {
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return cloudprovider.Zone{}, err
}
if managedByAS {
@ -228,14 +263,25 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
return cloudprovider.Zone{}, err
}
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
return cloudprovider.Zone{
FailureDomain: strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)),
Region: *vm.Location,
}, nil
var failureDomain string
if vm.Zones != nil && len(*vm.Zones) > 0 {
// Get availability zone for the node.
zones := *vm.Zones
zoneID, err := strconv.Atoi(zones[0])
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
}
failureDomain = ss.makeZone(zoneID)
} else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
// Availability zone is not used for the node, falling back to fault domain.
failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain))
}
return cloudprovider.Zone{}, nil
return cloudprovider.Zone{
FailureDomain: failureDomain,
Region: *vm.Location,
}, nil
}
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
@ -245,23 +291,43 @@ func (ss *scaleSet) GetPrimaryVMSetName() string {
}
// GetIPByNodeName gets machine private IP and public IP by node name.
// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet,
// fix this after it is supported.
func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
nic, err := ss.GetPrimaryInterface(nodeName)
if err != nil {
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
return "", "", err
}
ipConfig, err := getPrimaryIPConfig(nic)
if err != nil {
glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
klog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
return "", "", err
}
targetIP := *ipConfig.PrivateIPAddress
return targetIP, "", nil
internalIP := *ipConfig.PrivateIPAddress
publicIP := ""
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
pipID := *ipConfig.PublicIPAddress.ID
pipName, err := getLastSegment(pipID)
if err != nil {
return "", "", fmt.Errorf("failed to get publicIP name for node %q with pipID %q", nodeName, pipID)
}
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
if err != nil {
return "", "", err
}
pip, existsPip, err := ss.getPublicIPAddress(resourceGroup, pipName)
if err != nil {
return "", "", err
}
if existsPip {
publicIP = *pip.IPAddress
}
}
return internalIP, publicIP, nil
}
// This returns the full identifier of the primary NIC for the given VM.
@ -296,7 +362,7 @@ func getScaleSetVMInstanceID(machineName string) (string, error) {
return fmt.Sprintf("%d", instanceID), nil
}
// extractScaleSetNameByProviderID extracts the scaleset name by node's ProviderID.
// extractScaleSetNameByProviderID extracts the scaleset name by vmss node's ProviderID.
func extractScaleSetNameByProviderID(providerID string) (string, error) {
matches := scaleSetNameRE.FindStringSubmatch(providerID)
if len(matches) != 2 {
@ -306,15 +372,25 @@ func extractScaleSetNameByProviderID(providerID string) (string, error) {
return matches[1], nil
}
// extractResourceGroupByProviderID extracts the resource group name by vmss node's ProviderID.
func extractResourceGroupByProviderID(providerID string) (string, error) {
matches := resourceGroupRE.FindStringSubmatch(providerID)
if len(matches) != 2 {
return "", ErrorNotVmssInstance
}
return matches[1], nil
}
// listScaleSets lists all scale sets.
func (ss *scaleSet) listScaleSets() ([]string, error) {
func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) {
var err error
ctx, cancel := getContextWithCancel()
defer cancel()
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, ss.ResourceGroup)
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
if err != nil {
glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
return nil, err
}
@ -327,14 +403,14 @@ func (ss *scaleSet) listScaleSets() ([]string, error) {
}
// listScaleSetVMs lists VMs belonging to the specified scale set.
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]compute.VirtualMachineScaleSetVM, error) {
func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compute.VirtualMachineScaleSetVM, error) {
var err error
ctx, cancel := getContextWithCancel()
defer cancel()
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView))
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, "", "", string(compute.InstanceView))
if err != nil {
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
return nil, err
}
@ -350,6 +426,10 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
continue
}
if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
continue
}
nodeName := nodes[nx].Name
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
if err != nil {
@ -357,7 +437,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
}
if ssName == "" {
glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
continue
}
@ -381,11 +461,11 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
if err != nil {
glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
klog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
return nil, err
}
if len(*scaleSetNames) == 0 {
glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes))
}
@ -407,7 +487,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
}
}
if !found {
glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
klog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx])
}
}
@ -417,11 +497,21 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
return vmSetNames, nil
}
// extractResourceGroupByVMSSNicID extracts the resource group name by vmss nicID.
func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
matches := vmssNicResourceGroupRE.FindStringSubmatch(nicID)
if len(matches) != 2 {
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
}
return matches[1], nil
}
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
if err != nil {
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return network.Interface{}, err
}
if managedByAS {
@ -431,27 +521,36 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
if err != nil {
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
// VM is availability set, but not cached yet in availabilitySetNodesCache.
if err == ErrorNotVmssInstance {
return ss.availabilitySet.GetPrimaryInterface(nodeName)
}
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
return network.Interface{}, err
}
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
if err != nil {
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
return network.Interface{}, err
}
nicName, err := getLastSegment(primaryInterfaceID)
if err != nil {
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
return network.Interface{}, err
}
resourceGroup, err := extractResourceGroupByVMSSNicID(primaryInterfaceID)
if err != nil {
return network.Interface{}, err
}
ctx, cancel := getContextWithCancel()
defer cancel()
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, ss.ResourceGroup, ssName, instanceID, nicName, "")
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "")
if err != nil {
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err)
return network.Interface{}, err
}
@ -465,17 +564,18 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
}
// getScaleSetWithRetry gets scale set with exponential backoff retry
func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) {
func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (compute.VirtualMachineScaleSet, bool, error) {
var result compute.VirtualMachineScaleSet
var exists bool
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
cached, retryErr := ss.vmssCache.Get(name)
if retryErr != nil {
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
ss.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error())
klog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
return false, nil
}
glog.V(4).Info("backoff: success for scale set %q", name)
klog.V(4).Infof("backoff: success for scale set %q", name)
if cached != nil {
exists = true
@ -522,24 +622,24 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine
}
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
func (ss *scaleSet) createOrUpdateVMSSWithRetry(service *v1.Service, virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet)
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
return processHTTPRetryResponse(resp, err)
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSS", resp, err)
})
}
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs)
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
return processHTTPRetryResponse(resp, err)
klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSSInstance", resp, err)
})
}
@ -550,13 +650,18 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
for _, curNode := range nodes {
if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) {
glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name)
klog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name)
continue
}
if ss.ShouldNodeExcludedFromLoadBalancer(curNode) {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name)
continue
}
curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID)
if err != nil {
glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
klog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
standardNodes = append(standardNodes, curNode)
continue
}
@ -567,7 +672,7 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
instanceID, err := getLastSegment(curNode.Spec.ProviderID)
if err != nil {
glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err)
klog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err)
return nil, nil, err
}
@ -579,16 +684,17 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
// ensureHostsInVMSetPool ensures the given Node's primary IP configurations are
// participating in the vmSet's LoadBalancer Backend Pool.
func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
klog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
serviceName := getServiceName(service)
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, vmSetName)
if err != nil {
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
klog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
return err
}
if !exists {
errorMessage := fmt.Errorf("Scale set %q not found", vmSetName)
glog.Errorf("%v", errorMessage)
klog.Errorf("%v", errorMessage)
return errorMessage
}
@ -629,7 +735,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
if len(matches) == 2 {
lbName := matches[1]
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName)
klog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName)
return nil
}
}
@ -644,15 +750,15 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
ctx, cancel := getContextWithCancel()
defer cancel()
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName)
klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName)
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
}
}
if err != nil {
@ -667,13 +773,13 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
ctx, cancel := getContextWithCancel()
defer cancel()
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
retryErr := ss.updateVMSSInstancesWithRetry(service, vmSetName, vmInstanceIDs)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
}
}
if err != nil {
@ -685,10 +791,11 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
// EnsureHostsInPool ensures the given Node's primary IP configurations are
// participating in the specified LoadBalancer Backend Pool.
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
serviceName := getServiceName(service)
scalesets, standardNodes, err := ss.getNodesScaleSets(nodes)
if err != nil {
glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
klog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
return err
}
@ -700,22 +807,22 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
if instanceIDs.Len() == 0 {
// This may happen when scaling a vmss capacity to 0.
glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName)
klog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName)
// InstanceIDs is required to update vmss, use * instead here since there are no nodes actually.
instanceIDs.Insert("*")
}
err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal)
err := ss.ensureHostsInVMSetPool(service, backendPoolID, ssName, instanceIDs.List(), isInternal)
if err != nil {
glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
klog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
return err
}
}
if ss.useStandardLoadBalancer() && len(standardNodes) > 0 {
err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal)
err := ss.availabilitySet.EnsureHostsInPool(service, standardNodes, backendPoolID, "", isInternal)
if err != nil {
glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
klog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
return err
}
}
@ -724,15 +831,15 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
}
// ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset.
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error {
glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName)
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID, ssName string) error {
klog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, ssName)
if err != nil {
glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
klog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
return err
}
if !exists {
glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName)
klog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName)
return nil
}
@ -759,7 +866,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
for i := len(existingBackendPools) - 1; i >= 0; i-- {
curPool := existingBackendPools[i]
if strings.EqualFold(poolID, *curPool.ID) {
glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName)
klog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName)
foundPool = true
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
}
@ -771,17 +878,17 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
// Update scale set with backoff.
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName)
klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName)
ctx, cancel := getContextWithCancel()
defer cancel()
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
}
}
if err != nil {
@ -796,13 +903,13 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
instanceCtx, instanceCancel := getContextWithCancel()
defer instanceCancel()
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs)
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs)
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
retryErr := ss.updateVMSSInstancesWithRetry(service, ssName, vmInstanceIDs)
if retryErr != nil {
err = retryErr
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
}
}
if err != nil {
@ -814,14 +921,14 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
if len(newBackendPools) == 0 {
updateCtx, updateCancel := getContextWithCancel()
defer updateCancel()
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName)
klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName)
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
if retryErr != nil {
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
}
}
}
@ -830,7 +937,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
if backendAddressPools == nil {
return nil
}
@ -845,7 +952,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
if err != nil {
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
klog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID)
continue
}
@ -861,9 +968,9 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
continue
}
err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName)
err := ss.ensureScaleSetBackendPoolDeleted(service, poolID, ssName)
if err != nil {
glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
klog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
return err
}
}
@ -872,11 +979,11 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
}
// getVmssMachineID returns the full identifier of a vmss virtual machine.
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
func (az *Cloud) getVmssMachineID(resourceGroup, scaleSetName, instanceID string) string {
return fmt.Sprintf(
vmssMachineIDTemplate,
az.SubscriptionID,
az.ResourceGroup,
resourceGroup,
scaleSetName,
instanceID)
}

View File

@ -21,21 +21,22 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/sets"
)
var (
vmssNameSeparator = "_"
vmssNameSeparator = "_"
vmssCacheSeparator = "#"
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
vmssCacheTTL = time.Minute
vmssVMCacheTTL = time.Minute
availabilitySetNodesCacheTTL = 15 * time.Minute
nodeNameToScaleSetMappingCacheTTL = 15 * time.Minute
availabilitySetNodesCacheTTL = 5 * time.Minute
nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute
)
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
@ -49,19 +50,19 @@ func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
func extractVmssVMName(name string) (string, string, error) {
split := strings.SplitAfter(name, vmssNameSeparator)
if len(split) < 2 {
glog.Errorf("Failed to extract vmssVMName %q", name)
klog.V(3).Infof("Failed to extract vmssVMName %q", name)
return "", "", ErrorNotVmssInstance
}
ssName := strings.Join(split[0:len(split)-1], "")
// removing the trailing `vmssNameSeparator` since we used SplitAfter
ssName = ssName[:len(ssName)-1]
instanceID := split[len(split)-1]
return ssName, instanceID, nil
}
// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups
// will be excluded from LB backends.
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
ctx, cancel := getContextWithCancel()
@ -73,7 +74,7 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) {
}
if !exists {
glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
return nil, nil
}
@ -85,26 +86,34 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) {
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
scaleSetNames, err := ss.listScaleSets()
localCache := make(nodeNameToScaleSetMapping)
allResourceGroups, err := ss.GetResourceGroups()
if err != nil {
return nil, err
}
localCache := make(nodeNameToScaleSetMapping)
for _, ssName := range scaleSetNames {
vms, err := ss.listScaleSetVMs(ssName)
for _, resourceGroup := range allResourceGroups.List() {
scaleSetNames, err := ss.listScaleSets(resourceGroup)
if err != nil {
return nil, err
}
for _, vm := range vms {
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
continue
for _, ssName := range scaleSetNames {
vms, err := ss.listScaleSetVMs(ssName, resourceGroup)
if err != nil {
return nil, err
}
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
localCache[computerName] = ssName
for _, vm := range vms {
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
klog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
continue
}
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
localCache[computerName] = ssName
}
}
}
@ -116,14 +125,23 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry()
localCache := sets.NewString()
resourceGroups, err := ss.GetResourceGroups()
if err != nil {
return nil, err
}
localCache := sets.NewString()
for _, vm := range vmList {
localCache.Insert(*vm.Name)
for _, resourceGroup := range resourceGroups.List() {
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry(resourceGroup)
if err != nil {
return nil, err
}
for _, vm := range vmList {
if vm.Name != nil {
localCache.Insert(*vm.Name)
}
}
}
return localCache, nil
@ -132,10 +150,33 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
return newTimedcache(availabilitySetNodesCacheTTL, getter)
}
func buildVmssCacheKey(resourceGroup, name string) string {
// key is composed of <resourceGroup>#<vmName>
return fmt.Sprintf("%s%s%s", resourceGroup, vmssCacheSeparator, name)
}
func extractVmssCacheKey(key string) (string, string, error) {
// key is composed of <resourceGroup>#<vmName>
keyItems := strings.Split(key, vmssCacheSeparator)
if len(keyItems) != 2 {
return "", "", fmt.Errorf("key %q is not in format '<resouceGroup>#<vmName>'", key)
}
resourceGroup := keyItems[0]
vmName := keyItems[1]
return resourceGroup, vmName, nil
}
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
// vmssVM name's format is 'scaleSetName_instanceID'
ssName, instanceID, err := extractVmssVMName(key)
// key is composed of <resourceGroup>#<vmName>
resourceGroup, vmName, err := extractVmssCacheKey(key)
if err != nil {
return nil, err
}
// vmName's format is 'scaleSetName_instanceID'
ssName, instanceID, err := extractVmssVMName(vmName)
if err != nil {
return nil, err
}
@ -147,17 +188,35 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID)
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
return nil, nil
}
// Get instanceView for vmssVM.
if result.InstanceView == nil {
viewCtx, viewCancel := getContextWithCancel()
defer viewCancel()
view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID)
// It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again.
exists, message, realErr = checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
return nil, nil
}
result.InstanceView = &view
}
return &result, nil
}

View File

@ -20,13 +20,20 @@ import (
"fmt"
"testing"
compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/stretchr/testify/assert"
)
func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
const (
fakePrivateIP = "10.240.0.10"
fakePublicIP = "10.10.10.10"
)
func newTestScaleSet(scaleSetName, zone string, faultDomain int32, vmList []string) (*scaleSet, error) {
cloud := getTestCloud()
setTestVirtualMachineCloud(cloud, scaleSetName, vmList)
setTestVirtualMachineCloud(cloud, scaleSetName, zone, faultDomain, vmList)
ss, err := newScaleSet(cloud)
if err != nil {
return nil, err
@ -35,8 +42,13 @@ func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
return ss.(*scaleSet), nil
}
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomain int32, vmList []string) {
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
publicIPAddressesClient := newFakeAzurePIPClient("rg")
interfaceClient := newFakeAzureInterfacesClient()
// set test scale sets.
scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet)
scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{
scaleSetName: {
@ -45,20 +57,30 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
}
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM)
ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM)
testInterfaces := map[string]map[string]network.Interface{
"rg": make(map[string]network.Interface),
}
testPIPs := map[string]map[string]network.PublicIPAddress{
"rg": make(map[string]network.PublicIPAddress),
}
ssVMs := map[string]map[string]compute.VirtualMachineScaleSetVM{
"rg": make(map[string]compute.VirtualMachineScaleSetVM),
}
for i := range vmList {
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
nodeName := vmList[i]
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
interfaceID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s", scaleSetName, i, nodeName)
publicAddressID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s/ipConfigurations/ipconfig1/publicIPAddresses/%s", scaleSetName, i, nodeName, nodeName)
instanceID := fmt.Sprintf("%d", i)
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
// set vmss virtual machine.
networkInterfaces := []compute.NetworkInterfaceReference{
{
ID: &nodeName,
ID: &interfaceID,
},
}
ssVMs["rg"][vmName] = compute.VirtualMachineScaleSetVM{
vmssVM := compute.VirtualMachineScaleSetVM{
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
OsProfile: &compute.OSProfile{
ComputerName: &nodeName,
@ -66,17 +88,55 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &networkInterfaces,
},
InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{
PlatformFaultDomain: &faultDomain,
},
},
ID: &ID,
InstanceID: &instanceID,
Name: &vmName,
Location: &ss.Location,
}
if zone != "" {
zones := []string{zone}
vmssVM.Zones = &zones
}
ssVMs["rg"][vmName] = vmssVM
// set interfaces.
testInterfaces["rg"][nodeName] = network.Interface{
ID: &interfaceID,
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(true),
PrivateIPAddress: to.StringPtr(fakePrivateIP),
PublicIPAddress: &network.PublicIPAddress{
ID: to.StringPtr(publicAddressID),
},
},
},
},
},
}
// set public IPs.
testPIPs["rg"][nodeName] = network.PublicIPAddress{
ID: to.StringPtr(publicAddressID),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
IPAddress: to.StringPtr(fakePublicIP),
},
}
}
virtualMachineScaleSetVMsClient.setFakeStore(ssVMs)
interfaceClient.setFakeStore(testInterfaces)
publicIPAddressesClient.setFakeStore(testPIPs)
ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
ss.InterfacesClient = interfaceClient
ss.PublicIPAddressesClient = publicIPAddressesClient
}
func TestGetScaleSetVMInstanceID(t *testing.T) {
@ -141,7 +201,7 @@ func TestGetInstanceIDByNodeName(t *testing.T) {
}
for _, test := range testCases {
ss, err := newTestScaleSet(test.scaleSet, test.vmList)
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
assert.NoError(t, err, test.description)
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
@ -154,3 +214,96 @@ func TestGetInstanceIDByNodeName(t *testing.T) {
assert.Equal(t, test.expected, real, test.description)
}
}
func TestGetZoneByNodeName(t *testing.T) {
testCases := []struct {
description string
scaleSet string
vmList []string
nodeName string
zone string
faultDomain int32
expected string
expectError bool
}{
{
description: "scaleSet should get faultDomain for non-zoned nodes",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "vmssee6c2000000",
faultDomain: 3,
expected: "3",
},
{
description: "scaleSet should get availability zone for zoned nodes",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "vmssee6c2000000",
zone: "2",
faultDomain: 3,
expected: "westus-2",
},
{
description: "scaleSet should return error for non-exist nodes",
scaleSet: "ss",
faultDomain: 3,
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "agente6c2000005",
expectError: true,
},
}
for _, test := range testCases {
ss, err := newTestScaleSet(test.scaleSet, test.zone, test.faultDomain, test.vmList)
assert.NoError(t, err, test.description)
real, err := ss.GetZoneByNodeName(test.nodeName)
if test.expectError {
assert.Error(t, err, test.description)
continue
}
assert.NoError(t, err, test.description)
assert.Equal(t, test.expected, real.FailureDomain, test.description)
}
}
func TestGetIPByNodeName(t *testing.T) {
testCases := []struct {
description string
scaleSet string
vmList []string
nodeName string
expected []string
expectError bool
}{
{
description: "GetIPByNodeName should get node's privateIP and publicIP",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "vmssee6c2000000",
expected: []string{fakePrivateIP, fakePublicIP},
},
{
description: "GetIPByNodeName should return error for non-exist nodes",
scaleSet: "ss",
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
nodeName: "agente6c2000005",
expectError: true,
},
}
for _, test := range testCases {
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
assert.NoError(t, err, test.description)
privateIP, publicIP, err := ss.GetIPByNodeName(test.nodeName)
if test.expectError {
assert.Error(t, err, test.description)
continue
}
assert.NoError(t, err, test.description)
assert.Equal(t, test.expected, []string{privateIP, publicIP}, test.description)
}
}

View File

@ -19,16 +19,16 @@ package azure
import (
"fmt"
"net/http"
"regexp"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
"github.com/Azure/go-autorest/autorest"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
var (
@ -36,6 +36,8 @@ var (
lbCacheTTL = 2 * time.Minute
nsgCacheTTL = 2 * time.Minute
rtCacheTTL = 2 * time.Minute
azureNodeProviderIDRE = regexp.MustCompile(`^azure:///subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/(?:.*)`)
)
// checkExistsFromError inspects an error and returns a true if err is nil,
@ -115,7 +117,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi
}
if !exists {
glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
klog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
return pip, false, nil
}
@ -142,7 +144,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
}
if !exists {
glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
klog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
return subnet, false, nil
}
@ -189,14 +191,20 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
ctx, cancel := getContextWithCancel()
defer cancel()
vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView)
resourceGroup, err := az.GetNodeResourceGroup(key)
if err != nil {
return nil, err
}
vm, err := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceView)
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
klog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
return nil, nil
}
@ -218,7 +226,7 @@ func (az *Cloud) newLBCache() (*timedCache, error) {
}
if !exists {
glog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
klog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
return nil, nil
}
@ -239,7 +247,7 @@ func (az *Cloud) newNSGCache() (*timedCache, error) {
}
if !exists {
glog.V(2).Infof("Security group %q not found with message: %q", key, message)
klog.V(2).Infof("Security group %q not found with message: %q", key, message)
return nil, nil
}
@ -260,7 +268,7 @@ func (az *Cloud) newRouteTableCache() (*timedCache, error) {
}
if !exists {
glog.V(2).Infof("Route table %q not found with message: %q", key, message)
klog.V(2).Infof("Route table %q not found with message: %q", key, message)
return nil, nil
}
@ -277,3 +285,21 @@ func (az *Cloud) useStandardLoadBalancer() bool {
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
}
// IsNodeUnmanaged returns true if the node is not managed by Azure cloud provider.
// Those nodes includes on-prem or VMs from other clouds. They will not be added to load balancer
// backends. Azure routes and managed disks are also not supported for them.
func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) {
unmanagedNodes, err := az.GetUnmanagedNodes()
if err != nil {
return false, err
}
return unmanagedNodes.Has(nodeName), nil
}
// IsNodeUnmanagedByProviderID returns true if the node is not managed by Azure cloud provider.
// All managed node's providerIDs are in format 'azure:///subscriptions/<id>/resourceGroups/<rg>/providers/Microsoft.Compute/.*'
func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool {
return !azureNodeProviderIDRE.Match([]byte(providerID))
}

View File

@ -23,6 +23,8 @@ import (
"testing"
"github.com/Azure/go-autorest/autorest"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/sets"
)
func TestExtractNotFound(t *testing.T) {
@ -51,3 +53,92 @@ func TestExtractNotFound(t *testing.T) {
}
}
}
func TestIsNodeUnmanaged(t *testing.T) {
tests := []struct {
name string
unmanagedNodes sets.String
node string
expected bool
expectErr bool
}{
{
name: "unmanaged node should return true",
unmanagedNodes: sets.NewString("node1", "node2"),
node: "node1",
expected: true,
},
{
name: "managed node should return false",
unmanagedNodes: sets.NewString("node1", "node2"),
node: "node3",
expected: false,
},
{
name: "empty unmanagedNodes should return true",
unmanagedNodes: sets.NewString(),
node: "node3",
expected: false,
},
{
name: "no synced informer should report error",
unmanagedNodes: sets.NewString(),
node: "node1",
expectErr: true,
},
}
az := getTestCloud()
for _, test := range tests {
az.unmanagedNodes = test.unmanagedNodes
if test.expectErr {
az.nodeInformerSynced = func() bool {
return false
}
}
real, err := az.IsNodeUnmanaged(test.node)
if test.expectErr {
assert.Error(t, err, test.name)
continue
}
assert.NoError(t, err, test.name)
assert.Equal(t, test.expected, real, test.name)
}
}
func TestIsNodeUnmanagedByProviderID(t *testing.T) {
tests := []struct {
providerID string
expected bool
name string
}{
{
providerID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: false,
},
{
providerID: CloudProviderName + "://",
expected: true,
},
{
providerID: ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: true,
},
{
providerID: "aws:///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
expected: true,
},
{
providerID: "k8s-agent-AAAAAAAA-0",
expected: true,
},
}
az := getTestCloud()
for _, test := range tests {
isUnmanagedNode := az.IsNodeUnmanagedByProviderID(test.providerID)
assert.Equal(t, test.expected, isUnmanagedNode, test.providerID)
}
}

View File

@ -18,54 +18,74 @@ package azure
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"sync"
"fmt"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
var faultMutex = &sync.Mutex{}
var faultDomain *string
type instanceInfo struct {
ID string `json:"ID"`
UpdateDomain string `json:"UD"`
FaultDomain string `json:"FD"`
// makeZone returns the zone value in format of <region>-<zone-id>.
func (az *Cloud) makeZone(zoneID int) string {
return fmt.Sprintf("%s-%d", strings.ToLower(az.Location), zoneID)
}
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
// isAvailabilityZone returns true if the zone is in format of <region>-<zone-id>.
func (az *Cloud) isAvailabilityZone(zone string) bool {
return strings.HasPrefix(zone, fmt.Sprintf("%s-", az.Location))
}
// GetZoneID returns the ID of zone from node's zone label.
func (az *Cloud) GetZoneID(zoneLabel string) string {
if !az.isAvailabilityZone(zoneLabel) {
return ""
}
return strings.TrimPrefix(zoneLabel, fmt.Sprintf("%s-", az.Location))
}
// GetZone returns the Zone containing the current availability zone and locality region that the program is running in.
// If the node is not running with availability zones, then it will fall back to fault domain.
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
return az.getZoneFromURL(instanceInfoURL)
}
metadata, err := az.metadata.GetMetadata()
if err != nil {
return cloudprovider.Zone{}, err
}
// This is injectable for testing.
func (az *Cloud) getZoneFromURL(url string) (cloudprovider.Zone, error) {
faultMutex.Lock()
defer faultMutex.Unlock()
if faultDomain == nil {
var err error
faultDomain, err = fetchFaultDomain(url)
if metadata.Compute == nil {
return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata")
}
zone := ""
if metadata.Compute.Zone != "" {
zoneID, err := strconv.Atoi(metadata.Compute.Zone)
if err != nil {
return cloudprovider.Zone{}, err
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", metadata.Compute.Zone, err)
}
zone = az.makeZone(zoneID)
} else {
klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain")
zone = metadata.Compute.FaultDomain
}
zone := cloudprovider.Zone{
FailureDomain: *faultDomain,
return cloudprovider.Zone{
FailureDomain: zone,
Region: az.Location,
}
return zone, nil
}, nil
}
// GetZoneByProviderID implements Zones.GetZoneByProviderID
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
if az.IsNodeUnmanagedByProviderID(providerID) {
klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID)
return cloudprovider.Zone{}, nil
}
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
if err != nil {
return cloudprovider.Zone{}, err
@ -78,27 +98,15 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl
// This is particularly useful in external cloud providers where the kubelet
// does not initialize node data.
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
unmanaged, err := az.IsNodeUnmanaged(string(nodeName))
if err != nil {
return cloudprovider.Zone{}, err
}
if unmanaged {
klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName)
return cloudprovider.Zone{}, nil
}
return az.vmSet.GetZoneByNodeName(string(nodeName))
}
func fetchFaultDomain(url string) (*string, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return readFaultDomain(resp.Body)
}
func readFaultDomain(reader io.Reader) (*string, error) {
var instanceInfo instanceInfo
body, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
err = json.Unmarshal(body, &instanceInfo)
if err != nil {
return nil, err
}
return &instanceInfo.FaultDomain, nil
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"testing"
)
func TestIsAvailabilityZone(t *testing.T) {
location := "eastus"
az := &Cloud{
Config: Config{
Location: location,
},
}
tests := []struct {
desc string
zone string
expected bool
}{
{"empty string should return false", "", false},
{"wrong farmat should return false", "123", false},
{"wrong location should return false", "chinanorth-1", false},
{"correct zone should return true", "eastus-1", true},
}
for _, test := range tests {
actual := az.isAvailabilityZone(test.zone)
if actual != test.expected {
t.Errorf("test [%q] get unexpected result: %v != %v", test.desc, actual, test.expected)
}
}
}
func TestGetZoneID(t *testing.T) {
location := "eastus"
az := &Cloud{
Config: Config{
Location: location,
},
}
tests := []struct {
desc string
zone string
expected string
}{
{"empty string should return empty string", "", ""},
{"wrong farmat should return empty string", "123", ""},
{"wrong location should return empty string", "chinanorth-1", ""},
{"correct zone should return zone ID", "eastus-1", "1"},
}
for _, test := range tests {
actual := az.GetZoneID(test.zone)
if actual != test.expected {
t.Errorf("test [%q] get unexpected result: %q != %q", test.desc, actual, test.expected)
}
}
}

View File

@ -13,53 +13,19 @@ go_library(
"cloudstack_instances.go",
"cloudstack_loadbalancer.go",
"metadata.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"metadata_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"metadata_other.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"metadata_other.go",
],
"//conditions:default": [],
}),
"metadata_linux.go",
"metadata_other.go",
],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/d2g/dhcp4:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/kardianos/osext:go_default_library",
"//vendor/github.com/xanzy/go-cloudstack/cloudstack:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"//vendor/github.com/d2g/dhcp4client:go_default_library",
@ -103,8 +69,8 @@ go_test(
srcs = ["cloudstack_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)

View File

@ -24,13 +24,12 @@ import (
"os"
"path/filepath"
"github.com/golang/glog"
"github.com/kardianos/osext"
"github.com/xanzy/go-cloudstack/cloudstack"
"gopkg.in/gcfg.v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
// ProviderName is the name of this cloud provider.
@ -99,10 +98,10 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) {
// In CloudStack your metadata is always served by the DHCP server.
dhcpServer, err := findDHCPServer()
if err == nil {
glog.V(4).Infof("Found metadata server: %v", dhcpServer)
klog.V(4).Infof("Found metadata server: %v", dhcpServer)
cs.metadata = &metadata{dhcpServer: dhcpServer, zone: cs.zone}
} else {
glog.Errorf("Error searching metadata server: %v", err)
klog.Errorf("Error searching metadata server: %v", err)
}
}
@ -112,7 +111,7 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) {
if cs.client == nil {
if cs.metadata != nil {
glog.V(2).Infof("No API URL, key and secret are provided, so only using metadata!")
klog.V(2).Infof("No API URL, key and secret are provided, so only using metadata!")
} else {
return nil, errors.New("no cloud provider config given")
}
@ -122,7 +121,8 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) {
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (cs *CSCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
func (cs *CSCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
}
// LoadBalancer returns an implementation of LoadBalancer for CloudStack.
func (cs *CSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
@ -208,7 +208,7 @@ func (cs *CSCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
cs.zone = instance.Zonename
}
glog.V(2).Infof("Current zone is %v", cs.zone)
klog.V(2).Infof("Current zone is %v", cs.zone)
zone.FailureDomain = cs.zone
zone.Region = cs.zone
@ -230,7 +230,7 @@ func (cs *CSCloud) GetZoneByProviderID(ctx context.Context, providerID string) (
return zone, fmt.Errorf("error retrieving zone: %v", err)
}
glog.V(2).Infof("Current zone is %v", cs.zone)
klog.V(2).Infof("Current zone is %v", cs.zone)
zone.FailureDomain = instance.Zonename
zone.Region = instance.Zonename
@ -252,7 +252,7 @@ func (cs *CSCloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeNam
return zone, fmt.Errorf("error retrieving zone: %v", err)
}
glog.V(2).Infof("Current zone is %v", cs.zone)
klog.V(2).Infof("Current zone is %v", cs.zone)
zone.FailureDomain = instance.Zonename
zone.Region = instance.Zonename

View File

@ -21,11 +21,11 @@ import (
"errors"
"fmt"
"github.com/golang/glog"
"github.com/xanzy/go-cloudstack/cloudstack"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
// NodeAddresses returns the addresses of the specified instance.
@ -69,12 +69,16 @@ func (cs *CSCloud) nodeAddresses(instance *cloudstack.VirtualMachine) ([]v1.Node
{Type: v1.NodeInternalIP, Address: instance.Nic[0].Ipaddress},
}
if instance.Hostname != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: instance.Hostname})
}
if instance.Publicip != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: instance.Publicip})
} else {
// Since there is no sane way to determine the external IP if the host isn't
// using static NAT, we will just fire a log message and omit the external IP.
glog.V(4).Infof("Could not determine the public IP of host %v (%v)", instance.Name, instance.Id)
klog.V(4).Infof("Could not determine the public IP of host %v (%v)", instance.Name, instance.Id)
}
return addresses, nil

View File

@ -21,10 +21,11 @@ import (
"fmt"
"strconv"
"github.com/golang/glog"
"github.com/xanzy/go-cloudstack/cloudstack"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)
type loadBalancer struct {
@ -42,7 +43,7 @@ type loadBalancer struct {
// GetLoadBalancer returns whether the specified load balancer exists, and if so, what its status is.
func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
glog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name)
klog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name)
// Get the load balancer details and existing rules.
lb, err := cs.getLoadBalancer(service)
@ -55,7 +56,7 @@ func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, serv
return nil, false, nil
}
glog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr)
klog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr)
status := &v1.LoadBalancerStatus{}
status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: lb.ipAddr})
@ -65,7 +66,7 @@ func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, serv
// EnsureLoadBalancer creates a new load balancer, or updates the existing one. Returns the status of the balancer.
func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (status *v1.LoadBalancerStatus, err error) {
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes)
klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes)
if len(service.Spec.Ports) == 0 {
return nil, fmt.Errorf("requested load balancer with no ports")
@ -103,14 +104,14 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s
defer func(lb *loadBalancer) {
if err != nil {
if err := lb.releaseLoadBalancerIP(); err != nil {
glog.Errorf(err.Error())
klog.Errorf(err.Error())
}
}
}(lb)
}
}
glog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr)
klog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr)
for _, port := range service.Spec.Ports {
// All ports have their own load balancer rule, so add the port to lbName to keep the names unique.
@ -122,14 +123,14 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s
return nil, err
}
if exists && !needsUpdate {
glog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName)
klog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName)
// Delete the rule from the map, to prevent it being deleted.
delete(lb.rules, lbRuleName)
continue
}
if needsUpdate {
glog.V(4).Infof("Updating load balancer rule: %v", lbRuleName)
klog.V(4).Infof("Updating load balancer rule: %v", lbRuleName)
if err := lb.updateLoadBalancerRule(lbRuleName); err != nil {
return nil, err
}
@ -138,13 +139,13 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s
continue
}
glog.V(4).Infof("Creating load balancer rule: %v", lbRuleName)
klog.V(4).Infof("Creating load balancer rule: %v", lbRuleName)
lbRule, err := lb.createLoadBalancerRule(lbRuleName, port)
if err != nil {
return nil, err
}
glog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName)
klog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName)
if err = lb.assignHostsToRule(lbRule, lb.hostIDs); err != nil {
return nil, err
}
@ -153,7 +154,7 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s
// Cleanup any rules that are now still in the rules map, as they are no longer needed.
for _, lbRule := range lb.rules {
glog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name)
klog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name)
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
return nil, err
}
@ -167,7 +168,7 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s
// UpdateLoadBalancer updates hosts under the specified load balancer.
func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes)
klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes)
// Get the load balancer details and existing rules.
lb, err := cs.getLoadBalancer(service)
@ -193,14 +194,14 @@ func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, s
assign, remove := symmetricDifference(lb.hostIDs, l.LoadBalancerRuleInstances)
if len(assign) > 0 {
glog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name)
klog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name)
if err := lb.assignHostsToRule(lbRule, assign); err != nil {
return err
}
}
if len(remove) > 0 {
glog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name)
klog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name)
if err := lb.removeHostsFromRule(lbRule, remove); err != nil {
return err
}
@ -213,7 +214,7 @@ func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, s
// EnsureLoadBalancerDeleted deletes the specified load balancer if it exists, returning
// nil if the load balancer specified either didn't exist or was successfully deleted.
func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name)
klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name)
// Get the load balancer details and existing rules.
lb, err := cs.getLoadBalancer(service)
@ -222,14 +223,14 @@ func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName st
}
for _, lbRule := range lb.rules {
glog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name)
klog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name)
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
return err
}
}
if lb.ipAddr != "" && lb.ipAddr != service.Spec.LoadBalancerIP {
glog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr)
klog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr)
if err := lb.releaseLoadBalancerIP(); err != nil {
return err
}
@ -238,11 +239,16 @@ func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName st
return nil
}
// GetLoadBalancerName retrieves the name of the LoadBalancer.
func (cs *CSCloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
return cloudprovider.DefaultLoadBalancerName(service)
}
// getLoadBalancer retrieves the IP address and ID and all the existing rules it can find.
func (cs *CSCloud) getLoadBalancer(service *v1.Service) (*loadBalancer, error) {
lb := &loadBalancer{
CloudStackClient: cs.client,
name: cloudprovider.GetLoadBalancerName(service),
name: cs.GetLoadBalancerName(context.TODO(), "", service),
projectID: cs.projectID,
rules: make(map[string]*cloudstack.LoadBalancerRule),
}
@ -264,14 +270,14 @@ func (cs *CSCloud) getLoadBalancer(service *v1.Service) (*loadBalancer, error) {
lb.rules[lbRule.Name] = lbRule
if lb.ipAddr != "" && lb.ipAddr != lbRule.Publicip {
glog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip)
klog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip)
}
lb.ipAddr = lbRule.Publicip
lb.ipAddrID = lbRule.Publicipid
}
glog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules))
klog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules))
return lb, nil
}
@ -329,7 +335,7 @@ func (lb *loadBalancer) getLoadBalancerIP(loadBalancerIP string) error {
// getPublicIPAddressID retrieves the ID of the given IP, and sets the address and it's ID.
func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error {
glog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP)
klog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP)
p := lb.Address.NewListPublicIpAddressesParams()
p.SetIpaddress(loadBalancerIP)
@ -356,7 +362,7 @@ func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error {
// associatePublicIPAddress associates a new IP and sets the address and it's ID.
func (lb *loadBalancer) associatePublicIPAddress() error {
glog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name)
klog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name)
// If a network belongs to a VPC, the IP address needs to be associated with
// the VPC instead of with the network.
network, count, err := lb.Network.GetNetworkByID(lb.networkID, cloudstack.WithProject(lb.projectID))

View File

@ -25,10 +25,10 @@ import (
"net/http"
"github.com/d2g/dhcp4"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
type metadata struct {
@ -39,6 +39,7 @@ type metadata struct {
type metadataType string
const (
metadataTypeHostname metadataType = "local-hostname"
metadataTypeExternalIP metadataType = "public-ipv4"
metadataTypeInternalIP metadataType = "local-ipv4"
metadataTypeInstanceID metadataType = "instance-id"
@ -58,10 +59,20 @@ func (m *metadata) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1
return nil, fmt.Errorf("could not get internal IP: %v", err)
}
return []v1.NodeAddress{
addresses := []v1.NodeAddress{
{Type: v1.NodeExternalIP, Address: externalIP},
{Type: v1.NodeInternalIP, Address: internalIP},
}, nil
}
hostname, err := m.get(metadataTypeHostname)
if err != nil {
return nil, fmt.Errorf("could not get hostname: %v", err)
}
if hostname != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname})
}
return addresses, nil
}
// NodeAddressesByProviderID returns the addresses of the specified instance.
@ -132,7 +143,7 @@ func (m *metadata) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
m.zone = zoneName
}
glog.V(2).Infof("Current zone is %v", zone)
klog.V(2).Infof("Current zone is %v", zone)
zone.FailureDomain = m.zone
zone.Region = m.zone

View File

@ -13,10 +13,9 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/fake",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
],
)

View File

@ -22,11 +22,11 @@ import (
"net"
"regexp"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
cloudprovider "k8s.io/cloud-provider"
)
const defaultProviderName = "fake"
@ -57,6 +57,7 @@ type FakeCloud struct {
Calls []string
Addresses []v1.NodeAddress
addressesMux sync.Mutex
ExtID map[types.NodeName]string
InstanceTypes map[types.NodeName]string
Machines []types.NodeName
@ -72,6 +73,8 @@ type FakeCloud struct {
addCallLock sync.Mutex
cloudprovider.Zone
VolumeLabelMap map[string]map[string]string
RequestDelay time.Duration
}
type FakeRoute struct {
@ -82,6 +85,9 @@ type FakeRoute struct {
func (f *FakeCloud) addCall(desc string) {
f.addCallLock.Lock()
defer f.addCallLock.Unlock()
time.Sleep(f.RequestDelay)
f.Calls = append(f.Calls, desc)
}
@ -91,7 +97,8 @@ func (f *FakeCloud) ClearCalls() {
}
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
func (f *FakeCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
func (f *FakeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
}
func (f *FakeCloud) ListClusters(ctx context.Context) ([]string, error) {
return f.ClusterList, f.Err
@ -147,6 +154,12 @@ func (f *FakeCloud) GetLoadBalancer(ctx context.Context, clusterName string, ser
return status, f.Exists, f.Err
}
// GetLoadBalancerName is a stub implementation of LoadBalancer.GetLoadBalancerName.
func (f *FakeCloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
// TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names.
return cloudprovider.DefaultLoadBalancerName(service)
}
// EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer.
// It adds an entry "create" into the internal method call record.
func (f *FakeCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
@ -155,7 +168,7 @@ func (f *FakeCloud) EnsureLoadBalancer(ctx context.Context, clusterName string,
f.Balancers = make(map[string]FakeBalancer)
}
name := cloudprovider.GetLoadBalancerName(service)
name := f.GetLoadBalancerName(ctx, clusterName, service)
spec := service.Spec
zone, err := f.GetZone(context.TODO())
@ -200,9 +213,17 @@ func (f *FakeCloud) CurrentNodeName(ctx context.Context, hostname string) (types
// It adds an entry "node-addresses" into the internal method call record.
func (f *FakeCloud) NodeAddresses(ctx context.Context, instance types.NodeName) ([]v1.NodeAddress, error) {
f.addCall("node-addresses")
f.addressesMux.Lock()
defer f.addressesMux.Unlock()
return f.Addresses, f.Err
}
func (f *FakeCloud) SetNodeAddresses(nodeAddresses []v1.NodeAddress) {
f.addressesMux.Lock()
defer f.addressesMux.Unlock()
f.Addresses = nodeAddresses
}
// NodeAddressesByProviderID is a test-spy implementation of Instances.NodeAddressesByProviderID.
// It adds an entry "node-addresses-by-provider-id" into the internal method call record.
func (f *FakeCloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {

View File

@ -20,6 +20,7 @@ go_library(
"gce_clusterid.go",
"gce_clusters.go",
"gce_disks.go",
"gce_fake.go",
"gce_firewall.go",
"gce_forwardingrule.go",
"gce_healthchecks.go",
@ -46,21 +47,39 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce",
deps = [
"//pkg/api/v1/service:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/util/net/sets:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/version:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/cloud.google.com/go/compute/metadata:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/golang.org/x/oauth2:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
@ -71,24 +90,7 @@ go_library(
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/google.golang.org/api/tpu/v1:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -110,12 +112,17 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/api/v1/service:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/net/sets:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
@ -123,12 +130,6 @@ go_test(
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)

View File

@ -3,6 +3,5 @@ approvers:
- jingxu97
- bowei
- freehan
- nicksardo
- mrhohn
- dnardo
- cheftako

View File

@ -6,7 +6,6 @@ go_library(
"constants.go",
"context.go",
"doc.go",
"errors.go",
"gce_projects.go",
"gen.go",
"op.go",
@ -20,11 +19,11 @@ go_library(
deps = [
"//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library",
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -1,48 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import "fmt"
// OperationPollingError occurs when the GCE Operation cannot be retrieved for a prolonged period.
type OperationPollingError struct {
LastPollError error
}
// Error returns a string representation including the last poll error encountered.
func (e *OperationPollingError) Error() string {
return fmt.Sprintf("GCE operation polling error: %v", e.LastPollError)
}
// GCEOperationError occurs when the GCE Operation finishes with an error.
type GCEOperationError struct {
// HTTPStatusCode is the HTTP status code of the final error.
// For example, a failed operation may have 400 - BadRequest.
HTTPStatusCode int
// Code is GCE's code of what went wrong.
// For example, RESOURCE_IN_USE_BY_ANOTHER_RESOURCE
Code string
// Message is a human readable message.
// For example, "The network resource 'xxx' is already being used by 'xxx'"
Message string
}
// Error returns a string representation including the HTTP Status code, GCE's error code
// and a human readable message.
func (e *GCEOperationError) Error() string {
return fmt.Sprintf("GCE %v - %v: %v", e.HTTPStatusCode, e.Code, e.Message)
}

View File

@ -5,7 +5,7 @@ go_library(
srcs = ["filter.go"],
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/golang/glog:go_default_library"],
deps = ["//vendor/k8s.io/klog:go_default_library"],
)
go_test(

View File

@ -34,7 +34,7 @@ import (
"regexp"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
)
var (
@ -221,7 +221,7 @@ func (fp *filterPredicate) String() string {
func (fp *filterPredicate) match(o interface{}) bool {
v, err := extractValue(fp.fieldName, o)
glog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err)
klog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err)
if err != nil {
return false
}
@ -234,7 +234,7 @@ func (fp *filterPredicate) match(o interface{}) bool {
}
re, err := regexp.Compile(*fp.s)
if err != nil {
glog.Errorf("Match regexp %q is invalid: %v", *fp.s, err)
klog.Errorf("Match regexp %q is invalid: %v", *fp.s, err)
return false
}
match = re.Match([]byte(x))

File diff suppressed because it is too large Load Diff

View File

@ -99,7 +99,7 @@ import (
"sync"
"google.golang.org/api/googleapi"
"github.com/golang/glog"
"k8s.io/klog"
"{{.PackageRoot}}/filter"
"{{.PackageRoot}}/meta"
@ -219,7 +219,7 @@ func (m *Mock{{.Service}}Obj) ToAlpha() *{{.Alpha.FQObjectType}} {
// Convert the object via JSON copying to the type that was requested.
ret := &{{.Alpha.FQObjectType}}{}
if err := copyViaJSON(ret, m.Obj); err != nil {
glog.Errorf("Could not convert %T to *{{.Alpha.FQObjectType}} via JSON: %v", m.Obj, err)
klog.Errorf("Could not convert %T to *{{.Alpha.FQObjectType}} via JSON: %v", m.Obj, err)
}
return ret
}
@ -233,7 +233,7 @@ func (m *Mock{{.Service}}Obj) ToBeta() *{{.Beta.FQObjectType}} {
// Convert the object via JSON copying to the type that was requested.
ret := &{{.Beta.FQObjectType}}{}
if err := copyViaJSON(ret, m.Obj); err != nil {
glog.Errorf("Could not convert %T to *{{.Beta.FQObjectType}} via JSON: %v", m.Obj, err)
klog.Errorf("Could not convert %T to *{{.Beta.FQObjectType}} via JSON: %v", m.Obj, err)
}
return ret
}
@ -247,7 +247,7 @@ func (m *Mock{{.Service}}Obj) ToGA() *{{.GA.FQObjectType}} {
// Convert the object via JSON copying to the type that was requested.
ret := &{{.GA.FQObjectType}}{}
if err := copyViaJSON(ret, m.Obj); err != nil {
glog.Errorf("Could not convert %T to *{{.GA.FQObjectType}} via JSON: %v", m.Obj, err)
klog.Errorf("Could not convert %T to *{{.GA.FQObjectType}} via JSON: %v", m.Obj, err)
}
return ret
}
@ -394,7 +394,7 @@ type {{.MockWrapType}} struct {
func (m *{{.MockWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjectType}}, error) {
if m.GetHook != nil {
if intercept, obj, err := m.GetHook(ctx, key, m); intercept {
glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, %v", ctx, key, obj ,err)
klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, %v", ctx, key, obj ,err)
return obj, err
}
}
@ -406,12 +406,12 @@ func (m *{{.MockWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjec
defer m.Lock.Unlock()
if err, ok := m.GetError[*key]; ok {
glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err)
klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err
}
if obj, ok := m.Objects[*key]; ok {
typedObj := obj.To{{.VersionTitle}}()
glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, nil", ctx, key, typedObj)
klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, nil", ctx, key, typedObj)
return typedObj, nil
}
@ -419,7 +419,7 @@ func (m *{{.MockWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjec
Code: http.StatusNotFound,
Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key),
}
glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err)
klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err)
return nil, err
}
{{- end}}
@ -440,15 +440,15 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F)
if m.ListHook != nil {
{{if .KeyIsGlobal -}}
if intercept, objs, err := m.ListHook(ctx, fl, m); intercept {
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err)
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err)
{{- end -}}
{{- if .KeyIsRegional -}}
if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept {
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err)
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err)
{{- end -}}
{{- if .KeyIsZonal -}}
if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept {
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err)
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err)
{{- end}}
return objs, err
}
@ -460,13 +460,13 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F)
if m.ListError != nil {
err := *m.ListError
{{if .KeyIsGlobal -}}
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = nil, %v", ctx, fl, err)
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = nil, %v", ctx, fl, err)
{{- end -}}
{{- if .KeyIsRegional -}}
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, region, fl, err)
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, region, fl, err)
{{- end -}}
{{- if .KeyIsZonal -}}
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err)
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err)
{{- end}}
return nil, *m.ListError
@ -495,13 +495,13 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F)
}
{{if .KeyIsGlobal -}}
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], nil", ctx, fl, len(objs))
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], nil", ctx, fl, len(objs))
{{- end -}}
{{- if .KeyIsRegional -}}
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs))
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs))
{{- end -}}
{{- if .KeyIsZonal -}}
glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs))
klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs))
{{- end}}
return objs, nil
}
@ -512,7 +512,7 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F)
func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQObjectType}}) error {
if m.InsertHook != nil {
if intercept, err := m.InsertHook(ctx, key, obj, m); intercept {
glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
}
@ -524,7 +524,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F
defer m.Lock.Unlock()
if err, ok := m.InsertError[*key]; ok {
glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
if _, ok := m.Objects[*key]; ok {
@ -532,7 +532,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F
Code: http.StatusConflict,
Message: fmt.Sprintf("{{.MockWrapType}} %v exists", key),
}
glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err)
return err
}
@ -541,7 +541,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F
obj.SelfLink = SelfLink(meta.Version{{.VersionTitle}}, projectID, "{{.Resource}}", key)
m.Objects[*key] = &Mock{{.Service}}Obj{obj}
glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = nil", ctx, key, obj)
klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = nil", ctx, key, obj)
return nil
}
{{- end}}
@ -551,7 +551,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F
func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
if m.DeleteHook != nil {
if intercept, err := m.DeleteHook(ctx, key, m); intercept {
glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
return err
}
}
@ -563,7 +563,7 @@ func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
defer m.Lock.Unlock()
if err, ok := m.DeleteError[*key]; ok {
glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
return err
}
if _, ok := m.Objects[*key]; !ok {
@ -571,12 +571,12 @@ func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
Code: http.StatusNotFound,
Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key),
}
glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
return err
}
delete(m.Objects, *key)
glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = nil", ctx, key)
klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = nil", ctx, key)
return nil
}
{{- end}}
@ -586,7 +586,7 @@ func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) {
if m.AggregatedListHook != nil {
if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept {
glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err)
klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err)
return objs, err
}
}
@ -596,7 +596,7 @@ func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (m
if m.AggregatedListError != nil {
err := *m.AggregatedListError
glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err)
klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err)
return nil, err
}
@ -610,7 +610,7 @@ func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (m
location := res.Key.Zone
{{- end}}
if err != nil {
glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err)
klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err)
return nil, err
}
if !fl.Match(obj.To{{.VersionTitle}}()) {
@ -618,7 +618,7 @@ func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (m
}
objs[location] = append(objs[location], obj.To{{.VersionTitle}}())
}
glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs))
klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs))
return objs, nil
}
{{- end}}
@ -659,10 +659,10 @@ type {{.GCEWrapType}} struct {
{{- if .GenerateGet}}
// Get the {{.Object}} named by key.
func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjectType}}, error) {
glog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): called", ctx, key)
klog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("{{.GCEWrapType}}.Get(%v, %v): key is invalid (%#v)", ctx, key, key)
klog.V(2).Infof("{{.GCEWrapType}}.Get(%v, %v): key is invalid (%#v)", ctx, key, key)
return nil, fmt.Errorf("invalid GCE key (%#v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}")
@ -672,9 +672,9 @@ func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObject
Version: meta.Version("{{.Version}}"),
Service: "{{.Service}}",
}
glog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v): RateLimiter error: %v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v): RateLimiter error: %v", ctx, key, err)
return nil, err
}
{{- if .KeyIsGlobal}}
@ -688,7 +688,7 @@ func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObject
{{- end}}
call.Context(ctx)
v, err := call.Do()
glog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v) = %+v, %v", ctx, key, v, err)
klog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v) = %+v, %v", ctx, key, v, err)
return v, err
}
{{- end}}
@ -697,15 +697,15 @@ func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObject
// List all {{.Object}} objects.
{{- if .KeyIsGlobal}}
func (g *{{.GCEWrapType}}) List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) {
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v) called", ctx, fl)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v) called", ctx, fl)
{{- end -}}
{{- if .KeyIsRegional}}
func (g *{{.GCEWrapType}}) List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) {
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, region, fl)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, region, fl)
{{- end -}}
{{- if .KeyIsZonal}}
func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) {
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, zone, fl)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, zone, fl)
{{- end}}
projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}")
rk := &RateLimitKey{
@ -718,15 +718,15 @@ func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F)
return nil, err
}
{{- if .KeyIsGlobal}}
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk)
call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID)
{{- end -}}
{{- if .KeyIsRegional}}
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk)
call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, region)
{{- end -}}
{{- if .KeyIsZonal}}
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk)
call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, zone)
{{- end}}
if fl != filter.None {
@ -734,23 +734,23 @@ func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F)
}
var all []*{{.FQObjectType}}
f := func(l *{{.ObjectListType}}) error {
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v): page %+v", ctx, fl, l)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v): page %+v", ctx, fl, l)
all = append(all, l.Items...)
return nil
}
if err := call.Pages(ctx, f); err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err)
klog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err)
return nil, err
}
if glog.V(4) {
glog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil)
} else if glog.V(5) {
if klog.V(4) {
klog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil)
} else if klog.V(5) {
var asStr []string
for _, o := range all {
asStr = append(asStr, fmt.Sprintf("%+v", o))
}
glog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil)
klog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil)
}
return all, nil
@ -760,9 +760,9 @@ func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F)
{{- if .GenerateInsert}}
// Insert {{.Object}} with key of value obj.
func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQObjectType}}) error {
glog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v): called", ctx, key, obj)
klog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v): called", ctx, key, obj)
if !key.Valid() {
glog.V(2).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
klog.V(2).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}")
@ -772,9 +772,9 @@ func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQ
Version: meta.Version("{{.Version}}"),
Service: "{{.Service}}",
}
glog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
return err
}
obj.Name = key.Name
@ -791,12 +791,12 @@ func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQ
op, err := call.Do()
if err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...) = %+v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...) = %+v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err)
klog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err)
return err
}
{{- end}}
@ -804,9 +804,9 @@ func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQ
{{- if .GenerateDelete}}
// Delete the {{.Object}} referenced by key.
func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
glog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): called", ctx, key)
klog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("{{.GCEWrapType}}.Delete(%v, %v): key is invalid (%#v)", ctx, key, key)
klog.V(2).Infof("{{.GCEWrapType}}.Delete(%v, %v): key is invalid (%#v)", ctx, key, key)
return fmt.Errorf("invalid GCE key (%+v)", key)
}
projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}")
@ -816,9 +816,9 @@ func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
Version: meta.Version("{{.Version}}"),
Service: "{{.Service}}",
}
glog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v): RateLimiter error: %v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v): RateLimiter error: %v", ctx, key, err)
return err
}
{{- if .KeyIsGlobal}}
@ -834,12 +834,12 @@ func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
op, err := call.Do()
if err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err)
return err
}
{{end -}}
@ -847,7 +847,7 @@ func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error {
{{- if .AggregatedList}}
// AggregatedList lists all resources of the given type across all locations.
func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) {
glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) called", ctx, fl)
klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) called", ctx, fl)
projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}")
rk := &RateLimitKey{
@ -857,9 +857,9 @@ func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (ma
Service: "{{.Service}}",
}
glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err)
klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err)
return nil, err
}
@ -872,23 +872,23 @@ func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (ma
all := map[string][]*{{.FQObjectType}}{}
f := func(l *{{.ObjectAggregatedListType}}) error {
for k, v := range l.Items {
glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v)
klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v)
all[k] = append(all[k], v.{{.AggregatedListField}}...)
}
return nil
}
if err := call.Pages(ctx, f); err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err)
klog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err)
return nil, err
}
if glog.V(4) {
glog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil)
} else if glog.V(5) {
if klog.V(4) {
klog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil)
} else if klog.V(5) {
var asStr []string
for _, o := range all {
asStr = append(asStr, fmt.Sprintf("%+v", o))
}
glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil)
klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil)
}
return all, nil
}
@ -898,10 +898,10 @@ func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (ma
{{- range .}}
// {{.Name}} is a method on {{.GCEWrapType}}.
func (g *{{.GCEWrapType}}) {{.FcnArgs}} {
glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): called", ctx, key)
klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): called", ctx, key)
if !key.Valid() {
glog.V(2).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
klog.V(2).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): key is invalid (%#v)", ctx, key, key)
{{- if .IsOperation}}
return fmt.Errorf("invalid GCE key (%+v)", key)
{{- else if .IsGet}}
@ -917,10 +917,10 @@ func (g *{{.GCEWrapType}}) {{.FcnArgs}} {
Version: meta.Version("{{.Version}}"),
Service: "{{.Service}}",
}
glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk)
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): RateLimiter error: %v", ctx, key, err)
{{- if .IsOperation}}
return err
{{- else}}
@ -940,36 +940,36 @@ func (g *{{.GCEWrapType}}) {{.FcnArgs}} {
call.Context(ctx)
op, err := call.Do()
if err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err)
return err
}
err = g.s.WaitForCompletion(ctx, op)
glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err)
klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err)
return err
{{- else if .IsGet}}
call.Context(ctx)
v, err := call.Do()
glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v, %v", ctx, key, v, err)
klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v, %v", ctx, key, v, err)
return v, err
{{- else if .IsPaged}}
var all []*{{.Version}}.{{.ItemType}}
f := func(l *{{.Version}}.{{.ReturnType}}) error {
glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): page %+v", ctx, key, l)
klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): page %+v", ctx, key, l)
all = append(all, l.Items...)
return nil
}
if err := call.Pages(ctx, f); err != nil {
glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, nil, err)
klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, nil, err)
return nil, err
}
if glog.V(4) {
glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil)
} else if glog.V(5) {
if klog.V(4) {
klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil)
} else if klog.V(5) {
var asStr []string
for _, o := range all {
asStr = append(asStr, fmt.Sprintf("%+v", o))
}
glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, asStr, nil)
klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, asStr, nil)
}
return all, nil
{{- end}}
@ -1207,7 +1207,7 @@ func Test{{.Service}}Group(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want)
t.Errorf("Beta{{.Service}}().List(); got %+v, want %+v", got, want)
}
}
}
@ -1227,7 +1227,7 @@ func Test{{.Service}}Group(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want)
t.Errorf("{{.Service}}().List(); got %+v, want %+v", got, want)
}
}
}

View File

@ -127,7 +127,7 @@ func TestAddressesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want)
t.Errorf("BetaAddresses().List(); got %+v, want %+v", got, want)
}
}
}
@ -141,7 +141,7 @@ func TestAddressesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want)
t.Errorf("Addresses().List(); got %+v, want %+v", got, want)
}
}
}
@ -262,7 +262,7 @@ func TestBackendServicesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want)
t.Errorf("BetaBackendServices().List(); got %+v, want %+v", got, want)
}
}
}
@ -276,7 +276,7 @@ func TestBackendServicesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want)
t.Errorf("BackendServices().List(); got %+v, want %+v", got, want)
}
}
}
@ -351,7 +351,7 @@ func TestDisksGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want)
t.Errorf("Disks().List(); got %+v, want %+v", got, want)
}
}
}
@ -414,7 +414,7 @@ func TestFirewallsGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaFirewalls().List(); got %+v, want %+v", got, want)
t.Errorf("Firewalls().List(); got %+v, want %+v", got, want)
}
}
}
@ -507,7 +507,7 @@ func TestForwardingRulesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaForwardingRules().List(); got %+v, want %+v", got, want)
t.Errorf("ForwardingRules().List(); got %+v, want %+v", got, want)
}
}
}
@ -576,7 +576,7 @@ func TestGlobalAddressesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want)
t.Errorf("GlobalAddresses().List(); got %+v, want %+v", got, want)
}
}
}
@ -639,7 +639,7 @@ func TestGlobalForwardingRulesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want)
t.Errorf("GlobalForwardingRules().List(); got %+v, want %+v", got, want)
}
}
}
@ -665,6 +665,8 @@ func TestHealthChecksGroup(t *testing.T) {
var key *meta.Key
keyAlpha := meta.GlobalKey("key-alpha")
key = keyAlpha
keyBeta := meta.GlobalKey("key-beta")
key = keyBeta
keyGA := meta.GlobalKey("key-ga")
key = keyGA
// Ignore unused variables.
@ -674,6 +676,9 @@ func TestHealthChecksGroup(t *testing.T) {
if _, err := mock.AlphaHealthChecks().Get(ctx, key); err == nil {
t.Errorf("AlphaHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key)
}
if _, err := mock.BetaHealthChecks().Get(ctx, key); err == nil {
t.Errorf("BetaHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key)
}
if _, err := mock.HealthChecks().Get(ctx, key); err == nil {
t.Errorf("HealthChecks().Get(%v, %v) = _, nil; want error", ctx, key)
}
@ -685,6 +690,12 @@ func TestHealthChecksGroup(t *testing.T) {
t.Errorf("AlphaHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
}
}
{
obj := &beta.HealthCheck{}
if err := mock.BetaHealthChecks().Insert(ctx, keyBeta, obj); err != nil {
t.Errorf("BetaHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
}
}
{
obj := &ga.HealthCheck{}
if err := mock.HealthChecks().Insert(ctx, keyGA, obj); err != nil {
@ -696,15 +707,20 @@ func TestHealthChecksGroup(t *testing.T) {
if obj, err := mock.AlphaHealthChecks().Get(ctx, key); err != nil {
t.Errorf("AlphaHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
if obj, err := mock.BetaHealthChecks().Get(ctx, key); err != nil {
t.Errorf("BetaHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
if obj, err := mock.HealthChecks().Get(ctx, key); err != nil {
t.Errorf("HealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
// List.
mock.MockAlphaHealthChecks.Objects[*keyAlpha] = mock.MockAlphaHealthChecks.Obj(&alpha.HealthCheck{Name: keyAlpha.Name})
mock.MockBetaHealthChecks.Objects[*keyBeta] = mock.MockBetaHealthChecks.Obj(&beta.HealthCheck{Name: keyBeta.Name})
mock.MockHealthChecks.Objects[*keyGA] = mock.MockHealthChecks.Obj(&ga.HealthCheck{Name: keyGA.Name})
want := map[string]bool{
"key-alpha": true,
"key-beta": true,
"key-ga": true,
}
_ = want // ignore unused variables.
@ -722,6 +738,20 @@ func TestHealthChecksGroup(t *testing.T) {
}
}
}
{
objs, err := mock.BetaHealthChecks().List(ctx, filter.None)
if err != nil {
t.Errorf("BetaHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
} else {
got := map[string]bool{}
for _, obj := range objs {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("BetaHealthChecks().List(); got %+v, want %+v", got, want)
}
}
}
{
objs, err := mock.HealthChecks().List(ctx, filter.None)
if err != nil {
@ -732,7 +762,7 @@ func TestHealthChecksGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaHealthChecks().List(); got %+v, want %+v", got, want)
t.Errorf("HealthChecks().List(); got %+v, want %+v", got, want)
}
}
}
@ -741,6 +771,9 @@ func TestHealthChecksGroup(t *testing.T) {
if err := mock.AlphaHealthChecks().Delete(ctx, keyAlpha); err != nil {
t.Errorf("AlphaHealthChecks().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
}
if err := mock.BetaHealthChecks().Delete(ctx, keyBeta); err != nil {
t.Errorf("BetaHealthChecks().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
}
if err := mock.HealthChecks().Delete(ctx, keyGA); err != nil {
t.Errorf("HealthChecks().Delete(%v, %v) = %v; want nil", ctx, keyGA, err)
}
@ -749,6 +782,9 @@ func TestHealthChecksGroup(t *testing.T) {
if err := mock.AlphaHealthChecks().Delete(ctx, keyAlpha); err == nil {
t.Errorf("AlphaHealthChecks().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
}
if err := mock.BetaHealthChecks().Delete(ctx, keyBeta); err == nil {
t.Errorf("BetaHealthChecks().Delete(%v, %v) = nil; want error", ctx, keyBeta)
}
if err := mock.HealthChecks().Delete(ctx, keyGA); err == nil {
t.Errorf("HealthChecks().Delete(%v, %v) = nil; want error", ctx, keyGA)
}
@ -801,7 +837,7 @@ func TestHttpHealthChecksGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaHttpHealthChecks().List(); got %+v, want %+v", got, want)
t.Errorf("HttpHealthChecks().List(); got %+v, want %+v", got, want)
}
}
}
@ -864,7 +900,7 @@ func TestHttpsHealthChecksGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaHttpsHealthChecks().List(); got %+v, want %+v", got, want)
t.Errorf("HttpsHealthChecks().List(); got %+v, want %+v", got, want)
}
}
}
@ -927,7 +963,7 @@ func TestInstanceGroupsGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaInstanceGroups().List(); got %+v, want %+v", got, want)
t.Errorf("InstanceGroups().List(); got %+v, want %+v", got, want)
}
}
}
@ -1036,7 +1072,7 @@ func TestInstancesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want)
t.Errorf("BetaInstances().List(); got %+v, want %+v", got, want)
}
}
}
@ -1050,7 +1086,7 @@ func TestInstancesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want)
t.Errorf("Instances().List(); got %+v, want %+v", got, want)
}
}
}
@ -1088,6 +1124,8 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
var key *meta.Key
keyAlpha := meta.ZonalKey("key-alpha", "location")
key = keyAlpha
keyBeta := meta.ZonalKey("key-beta", "location")
key = keyBeta
// Ignore unused variables.
_, _, _ = ctx, mock, key
@ -1095,6 +1133,9 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, key); err == nil {
t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key)
}
if _, err := mock.BetaNetworkEndpointGroups().Get(ctx, key); err == nil {
t.Errorf("BetaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key)
}
// Insert.
{
@ -1103,16 +1144,27 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
}
}
{
obj := &beta.NetworkEndpointGroup{}
if err := mock.BetaNetworkEndpointGroups().Insert(ctx, keyBeta, obj); err != nil {
t.Errorf("BetaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
}
}
// Get across versions.
if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, key); err != nil {
t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
if obj, err := mock.BetaNetworkEndpointGroups().Get(ctx, key); err != nil {
t.Errorf("BetaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
// List.
mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name})
mock.MockBetaNetworkEndpointGroups.Objects[*keyBeta] = mock.MockBetaNetworkEndpointGroups.Obj(&beta.NetworkEndpointGroup{Name: keyBeta.Name})
want := map[string]bool{
"key-alpha": true,
"key-beta": true,
}
_ = want // ignore unused variables.
{
@ -1129,16 +1181,36 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
}
}
}
{
objs, err := mock.BetaNetworkEndpointGroups().List(ctx, location, filter.None)
if err != nil {
t.Errorf("BetaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
} else {
got := map[string]bool{}
for _, obj := range objs {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("BetaNetworkEndpointGroups().List(); got %+v, want %+v", got, want)
}
}
}
// Delete across versions.
if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, keyAlpha); err != nil {
t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
}
if err := mock.BetaNetworkEndpointGroups().Delete(ctx, keyBeta); err != nil {
t.Errorf("BetaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
}
// Delete not found.
if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, keyAlpha); err == nil {
t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
}
if err := mock.BetaNetworkEndpointGroups().Delete(ctx, keyBeta); err == nil {
t.Errorf("BetaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, keyBeta)
}
}
func TestProjectsGroup(t *testing.T) {
@ -1249,7 +1321,7 @@ func TestRegionBackendServicesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want)
t.Errorf("RegionBackendServices().List(); got %+v, want %+v", got, want)
}
}
}
@ -1279,58 +1351,58 @@ func TestRegionDisksGroup(t *testing.T) {
mock := NewMockGCE(pr)
var key *meta.Key
keyBeta := meta.RegionalKey("key-beta", "location")
key = keyBeta
keyGA := meta.RegionalKey("key-ga", "location")
key = keyGA
// Ignore unused variables.
_, _, _ = ctx, mock, key
// Get not found.
if _, err := mock.BetaRegionDisks().Get(ctx, key); err == nil {
t.Errorf("BetaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key)
if _, err := mock.RegionDisks().Get(ctx, key); err == nil {
t.Errorf("RegionDisks().Get(%v, %v) = _, nil; want error", ctx, key)
}
// Insert.
{
obj := &beta.Disk{}
if err := mock.BetaRegionDisks().Insert(ctx, keyBeta, obj); err != nil {
t.Errorf("BetaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
obj := &ga.Disk{}
if err := mock.RegionDisks().Insert(ctx, keyGA, obj); err != nil {
t.Errorf("RegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyGA, obj, err)
}
}
// Get across versions.
if obj, err := mock.BetaRegionDisks().Get(ctx, key); err != nil {
t.Errorf("BetaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
if obj, err := mock.RegionDisks().Get(ctx, key); err != nil {
t.Errorf("RegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
}
// List.
mock.MockBetaRegionDisks.Objects[*keyBeta] = mock.MockBetaRegionDisks.Obj(&beta.Disk{Name: keyBeta.Name})
mock.MockRegionDisks.Objects[*keyGA] = mock.MockRegionDisks.Obj(&ga.Disk{Name: keyGA.Name})
want := map[string]bool{
"key-beta": true,
"key-ga": true,
}
_ = want // ignore unused variables.
{
objs, err := mock.BetaRegionDisks().List(ctx, location, filter.None)
objs, err := mock.RegionDisks().List(ctx, location, filter.None)
if err != nil {
t.Errorf("BetaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
t.Errorf("RegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
} else {
got := map[string]bool{}
for _, obj := range objs {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want)
t.Errorf("RegionDisks().List(); got %+v, want %+v", got, want)
}
}
}
// Delete across versions.
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err != nil {
t.Errorf("BetaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
if err := mock.RegionDisks().Delete(ctx, keyGA); err != nil {
t.Errorf("RegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyGA, err)
}
// Delete not found.
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err == nil {
t.Errorf("BetaRegionDisks().Delete(%v, %v) = nil; want error", ctx, keyBeta)
if err := mock.RegionDisks().Delete(ctx, keyGA); err == nil {
t.Errorf("RegionDisks().Delete(%v, %v) = nil; want error", ctx, keyGA)
}
}
@ -1372,7 +1444,7 @@ func TestRegionsGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want)
t.Errorf("Regions().List(); got %+v, want %+v", got, want)
}
}
}
@ -1429,7 +1501,7 @@ func TestRoutesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want)
t.Errorf("Routes().List(); got %+v, want %+v", got, want)
}
}
}
@ -1492,7 +1564,7 @@ func TestSecurityPoliciesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaSecurityPolicies().List(); got %+v, want %+v", got, want)
t.Errorf("BetaSecurityPolicies().List(); got %+v, want %+v", got, want)
}
}
}
@ -1555,7 +1627,7 @@ func TestSslCertificatesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaSslCertificates().List(); got %+v, want %+v", got, want)
t.Errorf("SslCertificates().List(); got %+v, want %+v", got, want)
}
}
}
@ -1618,7 +1690,7 @@ func TestTargetHttpProxiesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want)
t.Errorf("TargetHttpProxies().List(); got %+v, want %+v", got, want)
}
}
}
@ -1681,7 +1753,7 @@ func TestTargetHttpsProxiesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaTargetHttpsProxies().List(); got %+v, want %+v", got, want)
t.Errorf("TargetHttpsProxies().List(); got %+v, want %+v", got, want)
}
}
}
@ -1744,7 +1816,7 @@ func TestTargetPoolsGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want)
t.Errorf("TargetPools().List(); got %+v, want %+v", got, want)
}
}
}
@ -1807,7 +1879,7 @@ func TestUrlMapsGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want)
t.Errorf("UrlMaps().List(); got %+v, want %+v", got, want)
}
}
}
@ -1861,7 +1933,7 @@ func TestZonesGroup(t *testing.T) {
got[obj.Name] = true
}
if !reflect.DeepEqual(got, want) {
t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want)
t.Errorf("Zones().List(); got %+v, want %+v", got, want)
}
}
}

View File

@ -115,6 +115,7 @@ var AllServices = []*ServiceInfo{
keyType: Global,
serviceType: reflect.TypeOf(&beta.BackendServicesService{}),
additionalMethods: []string{
"Update",
"SetSecurityPolicy",
},
},
@ -168,9 +169,9 @@ var AllServices = []*ServiceInfo{
Object: "Disk",
Service: "RegionDisks",
Resource: "disks",
version: VersionBeta,
version: VersionGA,
keyType: Regional,
serviceType: reflect.TypeOf(&beta.RegionDisksService{}),
serviceType: reflect.TypeOf(&ga.RegionDisksService{}),
additionalMethods: []string{
"Resize",
},
@ -220,6 +221,17 @@ var AllServices = []*ServiceInfo{
"Update",
},
},
{
Object: "HealthCheck",
Service: "HealthChecks",
Resource: "healthChecks",
version: VersionBeta,
keyType: Global,
serviceType: reflect.TypeOf(&beta.HealthChecksService{}),
additionalMethods: []string{
"Update",
},
},
{
Object: "HealthCheck",
Service: "HealthChecks",
@ -315,6 +327,20 @@ var AllServices = []*ServiceInfo{
},
options: AggregatedList,
},
{
Object: "NetworkEndpointGroup",
Service: "NetworkEndpointGroups",
Resource: "networkEndpointGroups",
version: VersionBeta,
keyType: Zonal,
serviceType: reflect.TypeOf(&beta.NetworkEndpointGroupsService{}),
additionalMethods: []string{
"AttachNetworkEndpoints",
"DetachNetworkEndpoints",
"ListNetworkEndpoints",
},
options: AggregatedList,
},
{
Object: "Project",
Service: "Projects",

View File

@ -44,6 +44,8 @@ var (
InUseError = &googleapi.Error{Code: http.StatusBadRequest, Message: "It's being used by god."}
// InternalServerError is shared variable with error code StatusInternalServerError for error verification.
InternalServerError = &googleapi.Error{Code: http.StatusInternalServerError}
// UnauthorizedErr wraps a Google API error with code StatusForbidden.
UnauthorizedErr = &googleapi.Error{Code: http.StatusForbidden}
)
// gceObject is an abstraction of all GCE API object in go client
@ -436,6 +438,82 @@ func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.
return nil
}
// UpdateBackendServiceHook defines the hook for updating a BackendService.
// It replaces the object with the same key in the mock with the updated object.
func UpdateBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockBackendServices) error {
_, err := m.Get(ctx, key)
if err != nil {
return &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()),
}
}
obj.Name = key.Name
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices")
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key)
m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj}
return nil
}
// UpdateAlphaBackendServiceHook defines the hook for updating an alpha BackendService.
// It replaces the object with the same key in the mock with the updated object.
func UpdateAlphaBackendServiceHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error {
_, err := m.Get(ctx, key)
if err != nil {
return &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()),
}
}
obj.Name = key.Name
projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices")
obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "backendServices", key)
m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj}
return nil
}
// UpdateBetaBackendServiceHook defines the hook for updating an beta BackendService.
// It replaces the object with the same key in the mock with the updated object.
func UpdateBetaBackendServiceHook(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *cloud.MockBetaBackendServices) error {
_, err := m.Get(ctx, key)
if err != nil {
return &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()),
}
}
obj.Name = key.Name
projectID := m.ProjectRouter.ProjectID(ctx, "beta", "backendServices")
obj.SelfLink = cloud.SelfLink(meta.VersionBeta, projectID, "backendServices", key)
m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj}
return nil
}
// UpdateURLMapHook defines the hook for updating a UrlMap.
// It replaces the object with the same key in the mock with the updated object.
func UpdateURLMapHook(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *cloud.MockUrlMaps) error {
_, err := m.Get(ctx, key)
if err != nil {
return &googleapi.Error{
Code: http.StatusNotFound,
Message: fmt.Sprintf("Key: %s was not found in UrlMaps", key.String()),
}
}
obj.Name = key.Name
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "urlMaps")
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "urlMaps", key)
m.Objects[*key] = &cloud.MockUrlMapsObj{Obj: obj}
return nil
}
// InsertFirewallsUnauthorizedErrHook mocks firewall insertion. A forbidden error will be thrown as return.
func InsertFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) (bool, error) {
return true, &googleapi.Error{Code: http.StatusForbidden}
@ -496,6 +574,16 @@ func DeleteAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud
return true, InternalServerError
}
// InsertAlphaBackendServiceUnauthorizedErrHook mocks inserting an alpha BackendService and returns a forbidden error.
func InsertAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) (bool, error) {
return true, UnauthorizedErr
}
// UpdateAlphaBackendServiceUnauthorizedErrHook mocks updating an alpha BackendService and returns a forbidden error.
func UpdateAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error {
return UnauthorizedErr
}
// GetRegionBackendServicesErrHook mocks getting region backend service and returns an internal server error.
func GetRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, *ga.BackendService, error) {
return true, nil, InternalServerError

View File

@ -20,11 +20,12 @@ import (
"context"
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
alpha "google.golang.org/api/compute/v0.alpha"
beta "google.golang.org/api/compute/v0.beta"
ga "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
)
@ -66,13 +67,13 @@ func (o *gaOperation) isDone(ctx context.Context) (bool, error) {
switch o.key.Type() {
case meta.Regional:
op, err = o.s.GA.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
klog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
case meta.Zonal:
op, err = o.s.GA.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
klog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
case meta.Global:
op, err = o.s.GA.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
klog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
default:
return false, fmt.Errorf("invalid key type: %#v", o.key)
}
@ -85,7 +86,7 @@ func (o *gaOperation) isDone(ctx context.Context) (bool, error) {
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
e := op.Error.Errors[0]
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
o.err = &googleapi.Error{Code: int(op.HttpErrorStatusCode), Message: fmt.Sprintf("%v - %v", e.Code, e.Message)}
}
return true, nil
}
@ -123,13 +124,13 @@ func (o *alphaOperation) isDone(ctx context.Context) (bool, error) {
switch o.key.Type() {
case meta.Regional:
op, err = o.s.Alpha.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
klog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
case meta.Zonal:
op, err = o.s.Alpha.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
klog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
case meta.Global:
op, err = o.s.Alpha.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
klog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
default:
return false, fmt.Errorf("invalid key type: %#v", o.key)
}
@ -142,7 +143,7 @@ func (o *alphaOperation) isDone(ctx context.Context) (bool, error) {
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
e := op.Error.Errors[0]
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
o.err = &googleapi.Error{Code: int(op.HttpErrorStatusCode), Message: fmt.Sprintf("%v - %v", e.Code, e.Message)}
}
return true, nil
}
@ -180,13 +181,13 @@ func (o *betaOperation) isDone(ctx context.Context) (bool, error) {
switch o.key.Type() {
case meta.Regional:
op, err = o.s.Beta.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
klog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
case meta.Zonal:
op, err = o.s.Beta.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
klog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
case meta.Global:
op, err = o.s.Beta.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do()
glog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
klog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
default:
return false, fmt.Errorf("invalid key type: %#v", o.key)
}
@ -199,7 +200,7 @@ func (o *betaOperation) isDone(ctx context.Context) (bool, error) {
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
e := op.Error.Errors[0]
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
o.err = &googleapi.Error{Code: int(op.HttpErrorStatusCode), Message: fmt.Sprintf("%v - %v", e.Code, e.Message)}
}
return true, nil
}

View File

@ -20,7 +20,7 @@ import (
"context"
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
alpha "google.golang.org/api/compute/v0.alpha"
beta "google.golang.org/api/compute/v0.beta"
@ -69,7 +69,7 @@ func (s *Service) wrapOperation(anyOp interface{}) (operation, error) {
func (s *Service) WaitForCompletion(ctx context.Context, genericOp interface{}) error {
op, err := s.wrapOperation(genericOp)
if err != nil {
glog.Errorf("wrapOperation(%+v) error: %v", genericOp, err)
klog.Errorf("wrapOperation(%+v) error: %v", genericOp, err)
return err
}
@ -86,18 +86,18 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error {
// returning ctx.Err().
select {
case <-ctx.Done():
glog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err())
klog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err())
return ctx.Err()
default:
// ctx is not canceled, continue immediately
}
pollCount++
glog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount)
klog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount)
s.RateLimiter.Accept(ctx, op.rateLimitKey())
done, err := op.isDone(ctx)
if err != nil {
glog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err)
klog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err)
}
if done {
@ -105,6 +105,6 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error {
}
}
glog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error())
klog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error())
return op.error()
}

View File

@ -30,13 +30,13 @@ import (
gcfg "gopkg.in/gcfg.v1"
"cloud.google.com/go/compute/metadata"
"github.com/golang/glog"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
computealpha "google.golang.org/api/compute/v0.alpha"
computebeta "google.golang.org/api/compute/v0.beta"
compute "google.golang.org/api/compute/v1"
container "google.golang.org/api/container/v1"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
@ -49,7 +49,7 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
"k8s.io/kubernetes/pkg/controller"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@ -57,6 +57,7 @@ import (
)
const (
// ProviderName is the official const representation of the Google Cloud Provider
ProviderName = "gce"
k8sNodeRouteTag = "k8s-node-route"
@ -80,13 +81,13 @@ const (
maxTargetPoolCreateInstances = 200
// HTTP Load Balancer parameters
// Configure 2 second period for external health checks.
gceHcCheckIntervalSeconds = int64(2)
// Configure 8 second period for external health checks.
gceHcCheckIntervalSeconds = int64(8)
gceHcTimeoutSeconds = int64(1)
// Start sending requests as soon as a pod is found on the node.
gceHcHealthyThreshold = int64(1)
// Defaults to 5 * 2 = 10 seconds before the LB will steer traffic away
gceHcUnhealthyThreshold = int64(5)
// Defaults to 3 * 8 = 24 seconds before the LB will steer traffic away.
gceHcUnhealthyThreshold = int64(3)
gceComputeAPIEndpoint = "https://www.googleapis.com/compute/v1/"
gceComputeAPIEndpointBeta = "https://www.googleapis.com/compute/beta/"
@ -97,9 +98,9 @@ type gceObject interface {
MarshalJSON() ([]byte, error)
}
// GCECloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine.
type GCECloud struct {
// ClusterID contains functionality for getting (and initializing) the ingress-uid. Call GCECloud.Initialize()
// Cloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine.
type Cloud struct {
// ClusterID contains functionality for getting (and initializing) the ingress-uid. Call Cloud.Initialize()
// for the cloudprovider to start watching the configmap.
ClusterID ClusterID
@ -114,6 +115,7 @@ type GCECloud struct {
eventRecorder record.EventRecorder
projectID string
region string
regional bool
localZone string // The zone in which we are running
// managedZones will be set to the 1 zone if running a single zone cluster
// it will be set to ALL zones in region for any multi-zone cluster
@ -144,7 +146,7 @@ type GCECloud struct {
// lock to prevent shared resources from being prematurely deleted while the operation is
// in progress.
sharedResourceLock sync.Mutex
// AlphaFeatureGate gates gce alpha features in GCECloud instance.
// AlphaFeatureGate gates gce alpha features in Cloud instance.
// Related wrapper functions that interacts with gce alpha api should examine whether
// the corresponding api is enabled.
// If not enabled, it should return error.
@ -157,6 +159,7 @@ type GCECloud struct {
s *cloud.Service
}
// ConfigGlobal is the in memory representation of the gce.conf config data
// TODO: replace gcfg with json
type ConfigGlobal struct {
TokenURL string `gcfg:"token-url"`
@ -174,10 +177,14 @@ type ConfigGlobal struct {
SecondaryRangeName string `gcfg:"secondary-range-name"`
NodeTags []string `gcfg:"node-tags"`
NodeInstancePrefix string `gcfg:"node-instance-prefix"`
Regional bool `gcfg:"regional"`
Multizone bool `gcfg:"multizone"`
// ApiEndpoint is the GCE compute API endpoint to use. If this is blank,
// APIEndpoint is the GCE compute API endpoint to use. If this is blank,
// then the default endpoint is used.
ApiEndpoint string `gcfg:"api-endpoint"`
APIEndpoint string `gcfg:"api-endpoint"`
// ContainerAPIEndpoint is the GCE container API endpoint to use. If this is blank,
// then the default endpoint is used.
ContainerAPIEndpoint string `gcfg:"container-api-endpoint"`
// LocalZone specifies the GCE zone that gce cloud client instance is
// located in (i.e. where the controller will be running). If this is
// blank, then the local zone will be discovered via the metadata server.
@ -192,24 +199,26 @@ type ConfigFile struct {
Global ConfigGlobal `gcfg:"global"`
}
// CloudConfig includes all the necessary configuration for creating GCECloud
// CloudConfig includes all the necessary configuration for creating Cloud
type CloudConfig struct {
ApiEndpoint string
ProjectID string
NetworkProjectID string
Region string
Zone string
ManagedZones []string
NetworkName string
NetworkURL string
SubnetworkName string
SubnetworkURL string
SecondaryRangeName string
NodeTags []string
NodeInstancePrefix string
TokenSource oauth2.TokenSource
UseMetadataServer bool
AlphaFeatureGate *AlphaFeatureGate
APIEndpoint string
ContainerAPIEndpoint string
ProjectID string
NetworkProjectID string
Region string
Regional bool
Zone string
ManagedZones []string
NetworkName string
NetworkURL string
SubnetworkName string
SubnetworkURL string
SecondaryRangeName string
NodeTags []string
NodeInstancePrefix string
TokenSource oauth2.TokenSource
UseMetadataServer bool
AlphaFeatureGate *AlphaFeatureGate
}
func init() {
@ -229,17 +238,22 @@ type Services struct {
}
// ComputeServices returns access to the internal compute services.
func (g *GCECloud) ComputeServices() *Services {
func (g *Cloud) ComputeServices() *Services {
return &Services{g.service, g.serviceAlpha, g.serviceBeta}
}
// Compute returns the generated stubs for the compute API.
func (g *GCECloud) Compute() cloud.Cloud {
func (g *Cloud) Compute() cloud.Cloud {
return g.c
}
// newGCECloud creates a new instance of GCECloud.
func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) {
// ContainerService returns the container service.
func (g *Cloud) ContainerService() *container.Service {
return g.containerService
}
// newGCECloud creates a new instance of Cloud.
func newGCECloud(config io.Reader) (gceCloud *Cloud, err error) {
var cloudConfig *CloudConfig
var configFile *ConfigFile
@ -248,7 +262,7 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) {
if err != nil {
return nil, err
}
glog.Infof("Using GCE provider config %+v", configFile)
klog.Infof("Using GCE provider config %+v", configFile)
}
cloudConfig, err = generateCloudConfig(configFile)
@ -261,7 +275,7 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) {
func readConfig(reader io.Reader) (*ConfigFile, error) {
cfg := &ConfigFile{}
if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil {
glog.Errorf("Couldn't read config: %v", err)
klog.Errorf("Couldn't read config: %v", err)
return nil, err
}
return cfg, nil
@ -274,8 +288,12 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
cloudConfig.UseMetadataServer = true
cloudConfig.AlphaFeatureGate = NewAlphaFeatureGate([]string{})
if configFile != nil {
if configFile.Global.ApiEndpoint != "" {
cloudConfig.ApiEndpoint = configFile.Global.ApiEndpoint
if configFile.Global.APIEndpoint != "" {
cloudConfig.APIEndpoint = configFile.Global.APIEndpoint
}
if configFile.Global.ContainerAPIEndpoint != "" {
cloudConfig.ContainerAPIEndpoint = configFile.Global.ContainerAPIEndpoint
}
if configFile.Global.TokenURL != "" {
@ -319,9 +337,14 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
return nil, err
}
// Determine if its a regional cluster
if configFile != nil && configFile.Global.Regional {
cloudConfig.Regional = true
}
// generate managedZones
cloudConfig.ManagedZones = []string{cloudConfig.Zone}
if configFile != nil && configFile.Global.Multizone {
if configFile != nil && (configFile.Global.Multizone || configFile.Global.Regional) {
cloudConfig.ManagedZones = nil // Use all zones in region
}
@ -356,11 +379,11 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
return cloudConfig, err
}
// CreateGCECloud creates a GCECloud object using the specified parameters.
// CreateGCECloud creates a Cloud object using the specified parameters.
// If no networkUrl is specified, loads networkName via rest call.
// If no tokenSource is specified, uses oauth2.DefaultTokenSource.
// If managedZones is nil / empty all zones in the region will be managed.
func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
func CreateGCECloud(config *CloudConfig) (*Cloud, error) {
// Remove any pre-release version and build metadata from the semver,
// leaving only the MAJOR.MINOR.PATCH portion. See http://semver.org/.
version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v")
@ -408,10 +431,10 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
// Generate alpha and beta api endpoints based on override v1 api endpoint.
// For example,
// staging API endpoint: https://www.googleapis.com/compute/staging_v1/
if config.ApiEndpoint != "" {
service.BasePath = fmt.Sprintf("%sprojects/", config.ApiEndpoint)
serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "beta", -1))
serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "alpha", -1))
if config.APIEndpoint != "" {
service.BasePath = fmt.Sprintf("%sprojects/", config.APIEndpoint)
serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.APIEndpoint, "v1", "beta", -1))
serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.APIEndpoint, "v1", "alpha", -1))
}
containerService, err := container.New(client)
@ -419,6 +442,9 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
return nil, err
}
containerService.UserAgent = userAgent
if config.ContainerAPIEndpoint != "" {
containerService.BasePath = config.ContainerAPIEndpoint
}
tpuService, err := newTPUService(client)
if err != nil {
@ -436,17 +462,17 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
if config.NetworkURL != "" {
networkURL = config.NetworkURL
} else if config.NetworkName != "" {
networkURL = gceNetworkURL(config.ApiEndpoint, netProjID, config.NetworkName)
networkURL = gceNetworkURL(config.APIEndpoint, netProjID, config.NetworkName)
} else {
// Other consumers may use the cloudprovider without utilizing the wrapped GCE API functions
// or functions requiring network/subnetwork URLs (e.g. Kubelet).
glog.Warningf("No network name or URL specified.")
klog.Warningf("No network name or URL specified.")
}
if config.SubnetworkURL != "" {
subnetURL = config.SubnetworkURL
} else if config.SubnetworkName != "" {
subnetURL = gceSubnetworkURL(config.ApiEndpoint, netProjID, config.Region, config.SubnetworkName)
subnetURL = gceSubnetworkURL(config.APIEndpoint, netProjID, config.Region, config.SubnetworkName)
} else {
// Determine the type of network and attempt to discover the correct subnet for AUTO mode.
// Gracefully fail because kubelet calls CreateGCECloud without any config, and minions
@ -454,20 +480,20 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
if networkName := lastComponent(networkURL); networkName != "" {
var n *compute.Network
if n, err = getNetwork(service, netProjID, networkName); err != nil {
glog.Warningf("Could not retrieve network %q; err: %v", networkName, err)
klog.Warningf("Could not retrieve network %q; err: %v", networkName, err)
} else {
switch typeOfNetwork(n) {
case netTypeLegacy:
glog.Infof("Network %q is type legacy - no subnetwork", networkName)
klog.Infof("Network %q is type legacy - no subnetwork", networkName)
isLegacyNetwork = true
case netTypeCustom:
glog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName)
klog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName)
case netTypeAuto:
subnetURL, err = determineSubnetURL(service, netProjID, networkName, config.Region)
if err != nil {
glog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err)
klog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err)
} else {
glog.Infof("Auto selecting subnetwork %q", subnetURL)
klog.Infof("Auto selecting subnetwork %q", subnetURL)
}
}
}
@ -481,12 +507,12 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
}
}
if len(config.ManagedZones) > 1 {
glog.Infof("managing multiple zones: %v", config.ManagedZones)
klog.Infof("managing multiple zones: %v", config.ManagedZones)
}
operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(5, 5) // 5 qps, 5 burst.
gce := &GCECloud{
gce := &Cloud{
service: service,
serviceAlpha: serviceAlpha,
serviceBeta: serviceBeta,
@ -496,6 +522,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
networkProjectID: netProjID,
onXPN: onXPN,
region: config.Region,
regional: config.Regional,
localZone: config.Zone,
managedZones: config.ManagedZones,
networkURL: networkURL,
@ -525,8 +552,8 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
// SetRateLimiter adds a custom cloud.RateLimiter implementation.
// WARNING: Calling this could have unexpected behavior if you have in-flight
// requests. It is best to use this immediately after creating a GCECloud.
func (g *GCECloud) SetRateLimiter(rl cloud.RateLimiter) {
// requests. It is best to use this immediately after creating a Cloud.
func (g *Cloud) SetRateLimiter(rl cloud.RateLimiter) {
if rl != nil {
g.s.RateLimiter = rl
}
@ -561,7 +588,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic
if isProjectNumber(projID) {
projName, err := getProjectID(service, projID)
if err != nil {
glog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err)
klog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err)
} else {
projID = projName
}
@ -574,7 +601,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic
if isProjectNumber(netProjID) {
netProjName, err := getProjectID(service, netProjID)
if err != nil {
glog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err)
klog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err)
} else {
netProjID = netProjName
}
@ -585,89 +612,92 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic
// Initialize takes in a clientBuilder and spawns a goroutine for watching the clusterid configmap.
// This must be called before utilizing the funcs of gce.ClusterID
func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder) {
gce.clientBuilder = clientBuilder
gce.client = clientBuilder.ClientOrDie("cloud-provider")
func (g *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
g.clientBuilder = clientBuilder
g.client = clientBuilder.ClientOrDie("cloud-provider")
if gce.OnXPN() {
gce.eventBroadcaster = record.NewBroadcaster()
gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gce.client.CoreV1().Events("")})
gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"})
if g.OnXPN() {
g.eventBroadcaster = record.NewBroadcaster()
g.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: g.client.CoreV1().Events("")})
g.eventRecorder = g.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "g-cloudprovider"})
}
go gce.watchClusterID()
go g.watchClusterID(stop)
}
// LoadBalancer returns an implementation of LoadBalancer for Google Compute Engine.
func (gce *GCECloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return gce, true
func (g *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return g, true
}
// Instances returns an implementation of Instances for Google Compute Engine.
func (gce *GCECloud) Instances() (cloudprovider.Instances, bool) {
return gce, true
func (g *Cloud) Instances() (cloudprovider.Instances, bool) {
return g, true
}
// Zones returns an implementation of Zones for Google Compute Engine.
func (gce *GCECloud) Zones() (cloudprovider.Zones, bool) {
return gce, true
func (g *Cloud) Zones() (cloudprovider.Zones, bool) {
return g, true
}
func (gce *GCECloud) Clusters() (cloudprovider.Clusters, bool) {
return gce, true
// Clusters returns an implementation of Clusters for Google Compute Engine.
func (g *Cloud) Clusters() (cloudprovider.Clusters, bool) {
return g, true
}
// Routes returns an implementation of Routes for Google Compute Engine.
func (gce *GCECloud) Routes() (cloudprovider.Routes, bool) {
return gce, true
func (g *Cloud) Routes() (cloudprovider.Routes, bool) {
return g, true
}
// ProviderName returns the cloud provider ID.
func (gce *GCECloud) ProviderName() string {
func (g *Cloud) ProviderName() string {
return ProviderName
}
// ProjectID returns the ProjectID corresponding to the project this cloud is in.
func (g *GCECloud) ProjectID() string {
func (g *Cloud) ProjectID() string {
return g.projectID
}
// NetworkProjectID returns the ProjectID corresponding to the project this cluster's network is in.
func (g *GCECloud) NetworkProjectID() string {
func (g *Cloud) NetworkProjectID() string {
return g.networkProjectID
}
// Region returns the region
func (gce *GCECloud) Region() string {
return gce.region
func (g *Cloud) Region() string {
return g.region
}
// OnXPN returns true if the cluster is running on a cross project network (XPN)
func (gce *GCECloud) OnXPN() bool {
return gce.onXPN
func (g *Cloud) OnXPN() bool {
return g.onXPN
}
// NetworkURL returns the network url
func (gce *GCECloud) NetworkURL() string {
return gce.networkURL
func (g *Cloud) NetworkURL() string {
return g.networkURL
}
// SubnetworkURL returns the subnetwork url
func (gce *GCECloud) SubnetworkURL() string {
return gce.subnetworkURL
func (g *Cloud) SubnetworkURL() string {
return g.subnetworkURL
}
func (gce *GCECloud) IsLegacyNetwork() bool {
return gce.isLegacyNetwork
// IsLegacyNetwork returns true if the cluster is still running a legacy network configuration.
func (g *Cloud) IsLegacyNetwork() bool {
return g.isLegacyNetwork
}
func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactory) {
glog.Infof("Setting up informers for GCECloud")
// SetInformers sets up the zone handlers we need watching for node changes.
func (g *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
klog.Infof("Setting up informers for Cloud")
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
node := obj.(*v1.Node)
gce.updateNodeZones(nil, node)
g.updateNodeZones(nil, node)
},
UpdateFunc: func(prev, obj interface{}) {
prevNode := prev.(*v1.Node)
@ -676,7 +706,7 @@ func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactor
prevNode.Labels[kubeletapis.LabelZoneFailureDomain] {
return
}
gce.updateNodeZones(prevNode, newNode)
g.updateNodeZones(prevNode, newNode)
},
DeleteFunc: func(obj interface{}) {
node, isNode := obj.(*v1.Node)
@ -685,46 +715,46 @@ func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactor
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Received unexpected object: %v", obj)
klog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
}
}
gce.updateNodeZones(node, nil)
g.updateNodeZones(node, nil)
},
})
gce.nodeInformerSynced = nodeInformer.HasSynced
g.nodeInformerSynced = nodeInformer.HasSynced
}
func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) {
gce.nodeZonesLock.Lock()
defer gce.nodeZonesLock.Unlock()
func (g *Cloud) updateNodeZones(prevNode, newNode *v1.Node) {
g.nodeZonesLock.Lock()
defer g.nodeZonesLock.Unlock()
if prevNode != nil {
prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
if ok {
gce.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
if gce.nodeZones[prevZone].Len() == 0 {
gce.nodeZones[prevZone] = nil
g.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
if g.nodeZones[prevZone].Len() == 0 {
g.nodeZones[prevZone] = nil
}
}
}
if newNode != nil {
newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
if ok {
if gce.nodeZones[newZone] == nil {
gce.nodeZones[newZone] = sets.NewString()
if g.nodeZones[newZone] == nil {
g.nodeZones[newZone] = sets.NewString()
}
gce.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
g.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
}
}
}
// HasClusterID returns true if the cluster has a clusterID
func (gce *GCECloud) HasClusterID() bool {
func (g *Cloud) HasClusterID() bool {
return true
}
@ -735,8 +765,8 @@ func isProjectNumber(idOrNumber string) bool {
return err == nil
}
// GCECloud implements cloudprovider.Interface.
var _ cloudprovider.Interface = (*GCECloud)(nil)
// Cloud implements cloudprovider.Interface.
var _ cloudprovider.Interface = (*Cloud)(nil)
func gceNetworkURL(apiEndpoint, project, network string) string {
if apiEndpoint == "" {
@ -841,12 +871,12 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
oauth2.NoContext,
compute.CloudPlatformScope,
compute.ComputeScope)
glog.Infof("Using DefaultTokenSource %#v", tokenSource)
klog.Infof("Using DefaultTokenSource %#v", tokenSource)
if err != nil {
return nil, err
}
} else {
glog.Infof("Using existing Token Source %#v", tokenSource)
klog.Infof("Using existing Token Source %#v", tokenSource)
}
backoff := wait.Backoff{
@ -857,7 +887,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
}
if err := wait.ExponentialBackoff(backoff, func() (bool, error) {
if _, err := tokenSource.Token(); err != nil {
glog.Errorf("error fetching initial token: %v", err)
klog.Errorf("error fetching initial token: %v", err)
return false, nil
}
return true, nil
@ -869,19 +899,19 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
}
func (manager *gceServiceManager) getProjectsAPIEndpoint() string {
projectsApiEndpoint := gceComputeAPIEndpoint + "projects/"
projectsAPIEndpoint := gceComputeAPIEndpoint + "projects/"
if manager.gce.service != nil {
projectsApiEndpoint = manager.gce.service.BasePath
projectsAPIEndpoint = manager.gce.service.BasePath
}
return projectsApiEndpoint
return projectsAPIEndpoint
}
func (manager *gceServiceManager) getProjectsAPIEndpointBeta() string {
projectsApiEndpoint := gceComputeAPIEndpointBeta + "projects/"
projectsAPIEndpoint := gceComputeAPIEndpointBeta + "projects/"
if manager.gce.service != nil {
projectsApiEndpoint = manager.gce.serviceBeta.BasePath
projectsAPIEndpoint = manager.gce.serviceBeta.BasePath
}
return projectsApiEndpoint
return projectsAPIEndpoint
}

View File

@ -22,7 +22,7 @@ import (
compute "google.golang.org/api/compute/v1"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
)
@ -62,7 +62,7 @@ func (am *addressManager) HoldAddress() (string, error) {
// could be reserving another address; therefore, it would need to be deleted. In the normal
// case of using a controller address, retrieving the address by name results in the fewest API
// calls since it indicates whether a Delete is necessary before Reserve.
glog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType)
klog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType)
// Get the address in case it was orphaned earlier
addr, err := am.svc.GetRegionAddress(am.name, am.region)
if err != nil && !isNotFound(err) {
@ -73,20 +73,20 @@ func (am *addressManager) HoldAddress() (string, error) {
// If address exists, check if the address had the expected attributes.
validationError := am.validateAddress(addr)
if validationError == nil {
glog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType)
klog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType)
return addr.Address, nil
}
glog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError)
klog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError)
err := am.svc.DeleteRegionAddress(addr.Name, am.region)
if err != nil {
if isNotFound(err) {
glog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name)
klog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name)
} else {
return "", err
}
} else {
glog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name)
klog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name)
}
}
@ -96,23 +96,23 @@ func (am *addressManager) HoldAddress() (string, error) {
// ReleaseAddress will release the address if it's owned by the controller.
func (am *addressManager) ReleaseAddress() error {
if !am.tryRelease {
glog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP)
klog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP)
return nil
}
glog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name)
klog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name)
// Controller only ever tries to unreserve the address named with the load balancer's name.
err := am.svc.DeleteRegionAddress(am.name, am.region)
if err != nil {
if isNotFound(err) {
glog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name)
klog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name)
return nil
}
return err
}
glog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name)
klog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name)
return nil
}
@ -130,7 +130,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
reserveErr := am.svc.ReserveRegionAddress(newAddr, am.region)
if reserveErr == nil {
if newAddr.Address != "" {
glog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name)
klog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name)
return newAddr.Address, nil
}
@ -139,7 +139,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
return "", err
}
glog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address)
klog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address)
return addr.Address, nil
} else if !isHTTPErrorCode(reserveErr, http.StatusConflict) && !isHTTPErrorCode(reserveErr, http.StatusBadRequest) {
// If the IP is already reserved:
@ -169,10 +169,10 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
if am.isManagedAddress(addr) {
// The address with this name is checked at the beginning of 'HoldAddress()', but for some reason
// it was re-created by this point. May be possible that two controllers are running.
glog.Warning("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
klog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
} else {
// If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it.
glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description)
klog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description)
am.tryRelease = false
}

View File

@ -19,7 +19,7 @@ package gce
import (
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
computealpha "google.golang.org/api/compute/v0.alpha"
computebeta "google.golang.org/api/compute/v0.beta"
@ -42,106 +42,106 @@ func newAddressMetricContextWithVersion(request, region, version string) *metric
// Caller is allocated a random IP if they do not specify an ipAddress. If an
// ipAddress is specified, it must belong to the current project, eg: an
// ephemeral IP associated with a global forwarding rule.
func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error {
func (g *Cloud) ReserveGlobalAddress(addr *compute.Address) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("reserve", "")
return mc.Observe(gce.c.GlobalAddresses().Insert(ctx, meta.GlobalKey(addr.Name), addr))
return mc.Observe(g.c.GlobalAddresses().Insert(ctx, meta.GlobalKey(addr.Name), addr))
}
// DeleteGlobalAddress deletes a global address by name.
func (gce *GCECloud) DeleteGlobalAddress(name string) error {
func (g *Cloud) DeleteGlobalAddress(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("delete", "")
return mc.Observe(gce.c.GlobalAddresses().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.GlobalAddresses().Delete(ctx, meta.GlobalKey(name)))
}
// GetGlobalAddress returns the global address by name.
func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) {
func (g *Cloud) GetGlobalAddress(name string) (*compute.Address, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("get", "")
v, err := gce.c.GlobalAddresses().Get(ctx, meta.GlobalKey(name))
v, err := g.c.GlobalAddresses().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// ReserveRegionAddress creates a region address
func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error {
func (g *Cloud) ReserveRegionAddress(addr *compute.Address, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("reserve", region)
return mc.Observe(gce.c.Addresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
return mc.Observe(g.c.Addresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
}
// ReserveAlphaRegionAddress creates an Alpha, regional address.
func (gce *GCECloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error {
func (g *Cloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("reserve", region)
return mc.Observe(gce.c.AlphaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
return mc.Observe(g.c.AlphaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
}
// ReserveBetaRegionAddress creates a beta region address
func (gce *GCECloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error {
func (g *Cloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("reserve", region)
return mc.Observe(gce.c.BetaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
return mc.Observe(g.c.BetaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
}
// DeleteRegionAddress deletes a region address by name.
func (gce *GCECloud) DeleteRegionAddress(name, region string) error {
func (g *Cloud) DeleteRegionAddress(name, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("delete", region)
return mc.Observe(gce.c.Addresses().Delete(ctx, meta.RegionalKey(name, region)))
return mc.Observe(g.c.Addresses().Delete(ctx, meta.RegionalKey(name, region)))
}
// GetRegionAddress returns the region address by name
func (gce *GCECloud) GetRegionAddress(name, region string) (*compute.Address, error) {
func (g *Cloud) GetRegionAddress(name, region string) (*compute.Address, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("get", region)
v, err := gce.c.Addresses().Get(ctx, meta.RegionalKey(name, region))
v, err := g.c.Addresses().Get(ctx, meta.RegionalKey(name, region))
return v, mc.Observe(err)
}
// GetAlphaRegionAddress returns the Alpha, regional address by name.
func (gce *GCECloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) {
func (g *Cloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("get", region)
v, err := gce.c.AlphaAddresses().Get(ctx, meta.RegionalKey(name, region))
v, err := g.c.AlphaAddresses().Get(ctx, meta.RegionalKey(name, region))
return v, mc.Observe(err)
}
// GetBetaRegionAddress returns the beta region address by name
func (gce *GCECloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) {
func (g *Cloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("get", region)
v, err := gce.c.BetaAddresses().Get(ctx, meta.RegionalKey(name, region))
v, err := g.c.BetaAddresses().Get(ctx, meta.RegionalKey(name, region))
return v, mc.Observe(err)
}
// GetRegionAddressByIP returns the regional address matching the given IP address.
func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) {
func (g *Cloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("list", region)
addrs, err := gce.c.Addresses().List(ctx, region, filter.Regexp("address", ipAddress))
addrs, err := g.c.Addresses().List(ctx, region, filter.Regexp("address", ipAddress))
mc.Observe(err)
if err != nil {
@ -149,7 +149,7 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad
}
if len(addrs) > 1 {
glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs))
klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs))
}
for _, addr := range addrs {
if addr.Address == ipAddress {
@ -160,12 +160,12 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad
}
// GetBetaRegionAddressByIP returns the beta regional address matching the given IP address.
func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) {
func (g *Cloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newAddressMetricContext("list", region)
addrs, err := gce.c.BetaAddresses().List(ctx, region, filter.Regexp("address", ipAddress))
addrs, err := g.c.BetaAddresses().List(ctx, region, filter.Regexp("address", ipAddress))
mc.Observe(err)
if err != nil {
@ -173,7 +173,7 @@ func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*comput
}
if len(addrs) > 1 {
glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs))
klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs))
}
for _, addr := range addrs {
if addr.Address == ipAddress {
@ -184,11 +184,11 @@ func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*comput
}
// TODO(#51665): retire this function once Network Tiers becomes Beta in GCP.
func (gce *GCECloud) getNetworkTierFromAddress(name, region string) (string, error) {
if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) {
func (g *Cloud) getNetworkTierFromAddress(name, region string) (string, error) {
if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) {
return cloud.NetworkTierDefault.ToGCEValue(), nil
}
addr, err := gce.GetAlphaRegionAddress(name, region)
addr, err := g.GetAlphaRegionAddress(name, region)
if err != nil {
return handleAlphaNetworkTierGetError(err)
}

View File

@ -21,23 +21,24 @@ import (
)
const (
// alpha: v1.8 (for Services)
// AlphaFeatureNetworkTiers allows Services backed by a GCP load balancer to choose
// what network tier to use. Currently supports "Standard" and "Premium" (default).
//
// Allows Services backed by a GCP load balancer to choose what network
// tier to use. Currently supports "Standard" and "Premium" (default).
// alpha: v1.8 (for Services)
AlphaFeatureNetworkTiers = "NetworkTiers"
AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup"
)
// AlphaFeatureGate contains a mapping of alpha features to whether they are enabled
type AlphaFeatureGate struct {
features map[string]bool
}
// Enabled returns true if the provided alpha feature is enabled
func (af *AlphaFeatureGate) Enabled(key string) bool {
return af.features[key]
}
// NewAlphaFeatureGate marks the provided alpha features as enabled
func NewAlphaFeatureGate(features []string) *AlphaFeatureGate {
featureMap := make(map[string]bool)
for _, name := range features {
@ -46,9 +47,9 @@ func NewAlphaFeatureGate(features []string) *AlphaFeatureGate {
return &AlphaFeatureGate{featureMap}
}
func (gce *GCECloud) alphaFeatureEnabled(feature string) error {
if !gce.AlphaFeatureGate.Enabled(feature) {
return fmt.Errorf("alpha feature %q is not enabled.", feature)
func (g *Cloud) alphaFeatureEnabled(feature string) error {
if !g.AlphaFeatureGate.Enabled(feature) {
return fmt.Errorf("alpha feature %q is not enabled", feature)
}
return nil
}

View File

@ -19,12 +19,13 @@ package gce
import (
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
)
// LoadBalancerType defines a specific type for holding load balancer types (eg. Internal)
type LoadBalancerType string
const (
@ -33,23 +34,30 @@ const (
// Currently, only "internal" is supported.
ServiceAnnotationLoadBalancerType = "cloud.google.com/load-balancer-type"
// LBTypeInternal is the constant for the official internal type.
LBTypeInternal LoadBalancerType = "Internal"
// Deprecating the lowercase spelling of Internal.
deprecatedTypeInternalLowerCase LoadBalancerType = "internal"
// ServiceAnnotationInternalBackendShare is annotated on a service with "true" when users
// ServiceAnnotationILBBackendShare is annotated on a service with "true" when users
// want to share GCP Backend Services for a set of internal load balancers.
// ALPHA feature - this may be removed in a future release.
ServiceAnnotationILBBackendShare = "alpha.cloud.google.com/load-balancer-backend-share"
// This annotation did not correctly specify "alpha", so both annotations will be checked.
deprecatedServiceAnnotationILBBackendShare = "cloud.google.com/load-balancer-backend-share"
// NetworkTierAnnotationKey is annotated on a Service object to indicate which
// network tier a GCP LB should use. The valid values are "Standard" and
// "Premium" (default).
NetworkTierAnnotationKey = "cloud.google.com/network-tier"
NetworkTierAnnotationKey = "cloud.google.com/network-tier"
// NetworkTierAnnotationStandard is an annotation to indicate the Service is on the Standard network tier
NetworkTierAnnotationStandard = cloud.NetworkTierStandard
NetworkTierAnnotationPremium = cloud.NetworkTierPremium
// NetworkTierAnnotationPremium is an annotation to indicate the Service is on the Premium network tier
NetworkTierAnnotationPremium = cloud.NetworkTierPremium
)
// GetLoadBalancerAnnotationType returns the type of GCP load balancer which should be assembled.
@ -82,7 +90,7 @@ func GetLoadBalancerAnnotationBackendShare(service *v1.Service) bool {
// Check for deprecated annotation key
if l, exists := service.Annotations[deprecatedServiceAnnotationILBBackendShare]; exists && l == "true" {
glog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare)
klog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare)
return true
}

View File

@ -35,182 +35,201 @@ func newBackendServiceMetricContextWithVersion(request, region, version string)
}
// GetGlobalBackendService retrieves a backend by name.
func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendService, error) {
func (g *Cloud) GetGlobalBackendService(name string) (*compute.BackendService, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("get", "")
v, err := gce.c.BackendServices().Get(ctx, meta.GlobalKey(name))
v, err := g.c.BackendServices().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// GetBetaGlobalBackendService retrieves beta backend by name.
func (gce *GCECloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) {
func (g *Cloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContextWithVersion("get", "", computeBetaVersion)
v, err := gce.c.BetaBackendServices().Get(ctx, meta.GlobalKey(name))
v, err := g.c.BetaBackendServices().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// GetAlphaGlobalBackendService retrieves alpha backend by name.
func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) {
func (g *Cloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion)
v, err := gce.c.AlphaBackendServices().Get(ctx, meta.GlobalKey(name))
v, err := g.c.AlphaBackendServices().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// UpdateGlobalBackendService applies the given BackendService as an update to
// an existing service.
func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) error {
func (g *Cloud) UpdateGlobalBackendService(bg *compute.BackendService) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("update", "")
return mc.Observe(gce.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
return mc.Observe(g.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
}
// UpdateBetaGlobalBackendService applies the given beta BackendService as an
// update to an existing service.
func (g *Cloud) UpdateBetaGlobalBackendService(bg *computebeta.BackendService) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContextWithVersion("update", "", computeBetaVersion)
return mc.Observe(g.c.BetaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
}
// UpdateAlphaGlobalBackendService applies the given alpha BackendService as an
// update to an existing service.
func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
func (g *Cloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("update", "")
return mc.Observe(gce.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
mc := newBackendServiceMetricContextWithVersion("update", "", computeAlphaVersion)
return mc.Observe(g.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
}
// DeleteGlobalBackendService deletes the given BackendService by name.
func (gce *GCECloud) DeleteGlobalBackendService(name string) error {
func (g *Cloud) DeleteGlobalBackendService(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("delete", "")
return mc.Observe(gce.c.BackendServices().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.BackendServices().Delete(ctx, meta.GlobalKey(name)))
}
// CreateGlobalBackendService creates the given BackendService.
func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) error {
func (g *Cloud) CreateGlobalBackendService(bg *compute.BackendService) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("create", "")
return mc.Observe(gce.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
return mc.Observe(g.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
}
// CreateBetaGlobalBackendService creates the given beta BackendService.
func (g *Cloud) CreateBetaGlobalBackendService(bg *computebeta.BackendService) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContextWithVersion("create", "", computeBetaVersion)
return mc.Observe(g.c.BetaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
}
// CreateAlphaGlobalBackendService creates the given alpha BackendService.
func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
func (g *Cloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("create", "")
return mc.Observe(gce.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
mc := newBackendServiceMetricContextWithVersion("create", "", computeAlphaVersion)
return mc.Observe(g.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
}
// ListGlobalBackendServices lists all backend services in the project.
func (gce *GCECloud) ListGlobalBackendServices() ([]*compute.BackendService, error) {
func (g *Cloud) ListGlobalBackendServices() ([]*compute.BackendService, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("list", "")
v, err := gce.c.BackendServices().List(ctx, filter.None)
v, err := g.c.BackendServices().List(ctx, filter.None)
return v, mc.Observe(err)
}
// GetGlobalBackendServiceHealth returns the health of the BackendService
// identified by the given name, in the given instanceGroup. The
// instanceGroupLink is the fully qualified self link of an instance group.
func (gce *GCECloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
func (g *Cloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("get_health", "")
groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink}
v, err := gce.c.BackendServices().GetHealth(ctx, meta.GlobalKey(name), groupRef)
v, err := g.c.BackendServices().GetHealth(ctx, meta.GlobalKey(name), groupRef)
return v, mc.Observe(err)
}
// GetRegionBackendService retrieves a backend by name.
func (gce *GCECloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) {
func (g *Cloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("get", region)
v, err := gce.c.RegionBackendServices().Get(ctx, meta.RegionalKey(name, region))
v, err := g.c.RegionBackendServices().Get(ctx, meta.RegionalKey(name, region))
return v, mc.Observe(err)
}
// UpdateRegionBackendService applies the given BackendService as an update to
// an existing service.
func (gce *GCECloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error {
func (g *Cloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("update", region)
return mc.Observe(gce.c.RegionBackendServices().Update(ctx, meta.RegionalKey(bg.Name, region), bg))
return mc.Observe(g.c.RegionBackendServices().Update(ctx, meta.RegionalKey(bg.Name, region), bg))
}
// DeleteRegionBackendService deletes the given BackendService by name.
func (gce *GCECloud) DeleteRegionBackendService(name, region string) error {
func (g *Cloud) DeleteRegionBackendService(name, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("delete", region)
return mc.Observe(gce.c.RegionBackendServices().Delete(ctx, meta.RegionalKey(name, region)))
return mc.Observe(g.c.RegionBackendServices().Delete(ctx, meta.RegionalKey(name, region)))
}
// CreateRegionBackendService creates the given BackendService.
func (gce *GCECloud) CreateRegionBackendService(bg *compute.BackendService, region string) error {
func (g *Cloud) CreateRegionBackendService(bg *compute.BackendService, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("create", region)
return mc.Observe(gce.c.RegionBackendServices().Insert(ctx, meta.RegionalKey(bg.Name, region), bg))
return mc.Observe(g.c.RegionBackendServices().Insert(ctx, meta.RegionalKey(bg.Name, region), bg))
}
// ListRegionBackendServices lists all backend services in the project.
func (gce *GCECloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) {
func (g *Cloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("list", region)
v, err := gce.c.RegionBackendServices().List(ctx, region, filter.None)
v, err := g.c.RegionBackendServices().List(ctx, region, filter.None)
return v, mc.Observe(err)
}
// GetRegionalBackendServiceHealth returns the health of the BackendService
// identified by the given name, in the given instanceGroup. The
// instanceGroupLink is the fully qualified self link of an instance group.
func (gce *GCECloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
func (g *Cloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContext("get_health", region)
ref := &compute.ResourceGroupReference{Group: instanceGroupLink}
v, err := gce.c.RegionBackendServices().GetHealth(ctx, meta.RegionalKey(name, region), ref)
v, err := g.c.RegionBackendServices().GetHealth(ctx, meta.RegionalKey(name, region), ref)
return v, mc.Observe(err)
}
// SetSecurityPolicyForBetaGlobalBackendService sets the given
// SecurityPolicyReference for the BackendService identified by the given name.
func (gce *GCECloud) SetSecurityPolicyForBetaGlobalBackendService(backendServiceName string, securityPolicyReference *computebeta.SecurityPolicyReference) error {
func (g *Cloud) SetSecurityPolicyForBetaGlobalBackendService(backendServiceName string, securityPolicyReference *computebeta.SecurityPolicyReference) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeBetaVersion)
return mc.Observe(gce.c.BetaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference))
return mc.Observe(g.c.BetaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference))
}
// SetSecurityPolicyForAlphaGlobalBackendService sets the given
// SecurityPolicyReference for the BackendService identified by the given name.
func (gce *GCECloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error {
func (g *Cloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeAlphaVersion)
return mc.Observe(gce.c.AlphaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference))
return mc.Observe(g.c.AlphaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference))
}

View File

@ -29,43 +29,43 @@ func newCertMetricContext(request string) *metricContext {
}
// GetSslCertificate returns the SslCertificate by name.
func (gce *GCECloud) GetSslCertificate(name string) (*compute.SslCertificate, error) {
func (g *Cloud) GetSslCertificate(name string) (*compute.SslCertificate, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newCertMetricContext("get")
v, err := gce.c.SslCertificates().Get(ctx, meta.GlobalKey(name))
v, err := g.c.SslCertificates().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// CreateSslCertificate creates and returns a SslCertificate.
func (gce *GCECloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) {
func (g *Cloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newCertMetricContext("create")
err := gce.c.SslCertificates().Insert(ctx, meta.GlobalKey(sslCerts.Name), sslCerts)
err := g.c.SslCertificates().Insert(ctx, meta.GlobalKey(sslCerts.Name), sslCerts)
if err != nil {
return nil, mc.Observe(err)
}
return gce.GetSslCertificate(sslCerts.Name)
return g.GetSslCertificate(sslCerts.Name)
}
// DeleteSslCertificate deletes the SslCertificate by name.
func (gce *GCECloud) DeleteSslCertificate(name string) error {
func (g *Cloud) DeleteSslCertificate(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newCertMetricContext("delete")
return mc.Observe(gce.c.SslCertificates().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.SslCertificates().Delete(ctx, meta.GlobalKey(name)))
}
// ListSslCertificates lists all SslCertificates in the project.
func (gce *GCECloud) ListSslCertificates() ([]*compute.SslCertificate, error) {
func (g *Cloud) ListSslCertificates() ([]*compute.SslCertificate, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newCertMetricContext("list")
v, err := gce.c.SslCertificates().List(ctx, filter.None)
v, err := g.c.SslCertificates().List(ctx, filter.None)
return v, mc.Observe(err)
}

View File

@ -25,7 +25,6 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@ -33,22 +32,31 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
const (
// Key used to persist UIDs to configmaps.
// UIDConfigMapName is the Key used to persist UIDs to configmaps.
UIDConfigMapName = "ingress-uid"
// Namespace which contains the above config map
// UIDNamespace is the namespace which contains the above config map
UIDNamespace = metav1.NamespaceSystem
// Data keys for the specific ids
UIDCluster = "uid"
UIDProvider = "provider-uid"
// UIDCluster is the data keys for looking up the clusters UID
UIDCluster = "uid"
// UIDProvider is the data keys for looking up the providers UID
UIDProvider = "provider-uid"
// UIDLengthBytes is the length of a UID
UIDLengthBytes = 8
// Frequency of the updateFunc event handler being called
// This does not actually query the apiserver for current state - the local cache value is used.
updateFuncFrequency = 10 * time.Minute
)
// ClusterID is the struct for maintaining information about this cluster's ID
type ClusterID struct {
idLock sync.RWMutex
client clientset.Interface
@ -59,17 +67,17 @@ type ClusterID struct {
}
// Continually watches for changes to the cluster id config map
func (gce *GCECloud) watchClusterID() {
gce.ClusterID = ClusterID{
func (g *Cloud) watchClusterID(stop <-chan struct{}) {
g.ClusterID = ClusterID{
cfgMapKey: fmt.Sprintf("%v/%v", UIDNamespace, UIDConfigMapName),
client: gce.client,
client: g.client,
}
mapEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
m, ok := obj.(*v1.ConfigMap)
if !ok || m == nil {
glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok)
klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok)
return
}
if m.Namespace != UIDNamespace ||
@ -77,13 +85,13 @@ func (gce *GCECloud) watchClusterID() {
return
}
glog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data)
gce.ClusterID.update(m)
klog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data)
g.ClusterID.update(m)
},
UpdateFunc: func(old, cur interface{}) {
m, ok := cur.(*v1.ConfigMap)
if !ok || m == nil {
glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok)
klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok)
return
}
@ -96,16 +104,16 @@ func (gce *GCECloud) watchClusterID() {
return
}
glog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data)
gce.ClusterID.update(m)
klog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data)
g.ClusterID.update(m)
},
}
listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything())
listerWatcher := cache.NewListWatchFromClient(g.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything())
var controller cache.Controller
gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler)
g.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler)
controller.Run(nil)
controller.Run(stop)
}
// GetID returns the id which is unique to this cluster
@ -131,9 +139,9 @@ func (ci *ClusterID) GetID() (string, error) {
return *ci.clusterID, nil
}
// GetFederationId returns the id which could represent the entire Federation
// GetFederationID returns the id which could represent the entire Federation
// or just the cluster if not federated.
func (ci *ClusterID) GetFederationId() (string, bool, error) {
func (ci *ClusterID) GetFederationID() (string, bool, error) {
if err := ci.getOrInitialize(); err != nil {
return "", false, err
}
@ -141,7 +149,7 @@ func (ci *ClusterID) GetFederationId() (string, bool, error) {
ci.idLock.RLock()
defer ci.idLock.RUnlock()
if ci.clusterID == nil {
return "", false, errors.New("Could not retrieve cluster id")
return "", false, errors.New("could not retrieve cluster id")
}
// If provider ID is not set, return false
@ -157,7 +165,7 @@ func (ci *ClusterID) GetFederationId() (string, bool, error) {
// before the watch has begun.
func (ci *ClusterID) getOrInitialize() error {
if ci.store == nil {
return errors.New("GCECloud.ClusterID is not ready. Call Initialize() before using.")
return errors.New("Cloud.ClusterID is not ready. Call Initialize() before using")
}
if ci.clusterID != nil {
@ -172,12 +180,12 @@ func (ci *ClusterID) getOrInitialize() error {
}
// The configmap does not exist - let's try creating one.
newId, err := makeUID()
newID, err := makeUID()
if err != nil {
return err
}
glog.V(4).Infof("Creating clusteriD: %v", newId)
klog.V(4).Infof("Creating clusteriD: %v", newID)
cfg := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: UIDConfigMapName,
@ -185,16 +193,16 @@ func (ci *ClusterID) getOrInitialize() error {
},
}
cfg.Data = map[string]string{
UIDCluster: newId,
UIDProvider: newId,
UIDCluster: newID,
UIDProvider: newID,
}
if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil {
glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err)
klog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err)
return err
}
glog.V(2).Infof("Created a config map containing clusteriD: %v", newId)
klog.V(2).Infof("Created a config map containing clusteriD: %v", newID)
ci.update(cfg)
return nil
}
@ -211,7 +219,7 @@ func (ci *ClusterID) getConfigMap() (bool, error) {
m, ok := item.(*v1.ConfigMap)
if !ok || m == nil {
err = fmt.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", item, ok)
glog.Error(err)
klog.Error(err)
return false, err
}
ci.update(m)
@ -224,8 +232,8 @@ func (ci *ClusterID) update(m *v1.ConfigMap) {
if clusterID, exists := m.Data[UIDCluster]; exists {
ci.clusterID = &clusterID
}
if provId, exists := m.Data[UIDProvider]; exists {
ci.providerID = &provId
if provID, exists := m.Data[UIDProvider]; exists {
ci.providerID = &provID
}
}

View File

@ -16,17 +16,24 @@ limitations under the License.
package gce
import "context"
import (
"context"
"fmt"
"google.golang.org/api/container/v1"
"k8s.io/klog"
)
func newClustersMetricContext(request, zone string) *metricContext {
return newGenericMetricContext("clusters", request, unusedMetricLabel, zone, computeV1Version)
}
func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) {
// ListClusters will return a list of cluster names for the associated project
func (g *Cloud) ListClusters(ctx context.Context) ([]string, error) {
allClusters := []string{}
for _, zone := range gce.managedZones {
clusters, err := gce.listClustersInZone(zone)
for _, zone := range g.managedZones {
clusters, err := g.listClustersInZone(zone)
if err != nil {
return nil, err
}
@ -37,21 +44,61 @@ func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) {
return allClusters, nil
}
func (gce *GCECloud) Master(ctx context.Context, clusterName string) (string, error) {
// GetManagedClusters will return the cluster objects associated to this project
func (g *Cloud) GetManagedClusters(ctx context.Context) ([]*container.Cluster, error) {
managedClusters := []*container.Cluster{}
if g.regional {
var err error
managedClusters, err = g.getClustersInLocation(g.region)
if err != nil {
return nil, err
}
} else if len(g.managedZones) >= 1 {
for _, zone := range g.managedZones {
clusters, err := g.getClustersInLocation(zone)
if err != nil {
return nil, err
}
managedClusters = append(managedClusters, clusters...)
}
} else {
return nil, fmt.Errorf("no zones associated with this cluster(%s)", g.ProjectID())
}
return managedClusters, nil
}
// Master returned the dns address of the master
func (g *Cloud) Master(ctx context.Context, clusterName string) (string, error) {
return "k8s-" + clusterName + "-master.internal", nil
}
func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) {
mc := newClustersMetricContext("list_zone", zone)
// TODO: use PageToken to list all not just the first 500
list, err := gce.containerService.Projects.Zones.Clusters.List(gce.projectID, zone).Do()
func (g *Cloud) listClustersInZone(zone string) ([]string, error) {
clusters, err := g.getClustersInLocation(zone)
if err != nil {
return nil, mc.Observe(err)
return nil, err
}
result := []string{}
for _, cluster := range list.Clusters {
for _, cluster := range clusters {
result = append(result, cluster.Name)
}
return result, mc.Observe(nil)
return result, nil
}
func (g *Cloud) getClustersInLocation(zoneOrRegion string) ([]*container.Cluster, error) {
// TODO: Issue/68913 migrate metric to list_location instead of list_zone.
mc := newClustersMetricContext("list_zone", zoneOrRegion)
// TODO: use PageToken to list all not just the first 500
location := getLocationName(g.projectID, zoneOrRegion)
list, err := g.containerService.Projects.Locations.Clusters.List(location).Do()
if err != nil {
return nil, mc.Observe(err)
}
if list.Header.Get("nextPageToken") != "" {
klog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken"))
}
return list.Clusters, mc.Observe(nil)
}

View File

@ -28,25 +28,28 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"github.com/golang/glog"
computebeta "google.golang.org/api/compute/v0.beta"
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
"k8s.io/kubernetes/pkg/features"
)
// DiskType defines a specific type for holding disk types (eg. pd-ssd)
type DiskType string
const (
DiskTypeSSD = "pd-ssd"
// DiskTypeSSD the type for persistent SSD storage
DiskTypeSSD = "pd-ssd"
// DiskTypeStandard the type for standard persistent storage
DiskTypeStandard = "pd-standard"
diskTypeDefault = DiskTypeStandard
@ -85,7 +88,7 @@ type diskServiceManager interface {
// Attach a persistent disk on GCE with the given disk spec to the specified instance.
AttachDiskOnCloudProvider(
disk *GCEDisk,
disk *Disk,
readWrite string,
instanceZone string,
instanceName string) error
@ -96,18 +99,18 @@ type diskServiceManager interface {
instanceName string,
devicePath string) error
ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error
RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error
ResizeDiskOnCloudProvider(disk *Disk, sizeGb int64, zone string) error
RegionalResizeDiskOnCloudProvider(disk *Disk, sizeGb int64) error
// Gets the persistent disk from GCE with the given diskName.
GetDiskFromCloudProvider(zone string, diskName string) (*GCEDisk, error)
GetDiskFromCloudProvider(zone string, diskName string) (*Disk, error)
// Gets the regional persistent disk from GCE with the given diskName.
GetRegionalDiskFromCloudProvider(diskName string) (*GCEDisk, error)
GetRegionalDiskFromCloudProvider(diskName string) (*Disk, error)
}
type gceServiceManager struct {
gce *GCECloud
gce *Cloud
}
var _ diskServiceManager = &gceServiceManager{}
@ -119,7 +122,7 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
diskType string,
zone string) error {
diskTypeURI, err := manager.getDiskTypeURI(
manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useBetaAPI */)
manager.gce.region /* diskRegion */, singleZone{zone}, diskType)
if err != nil {
return err
}
@ -148,17 +151,17 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
}
diskTypeURI, err := manager.getDiskTypeURI(
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useBetaAPI */)
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType)
if err != nil {
return err
}
fullyQualifiedReplicaZones := []string{}
for _, replicaZone := range replicaZones.UnsortedList() {
fullyQualifiedReplicaZones = append(
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone))
}
diskToCreateBeta := &computebeta.Disk{
diskToCreate := &compute.Disk{
Name: name,
SizeGb: sizeGb,
Description: tagsStr,
@ -168,11 +171,11 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.BetaRegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreateBeta)
return manager.gce.c.RegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreate)
}
func (manager *gceServiceManager) AttachDiskOnCloudProvider(
disk *GCEDisk,
disk *Disk,
readWrite string,
instanceZone string,
instanceName string) error {
@ -205,13 +208,13 @@ func (manager *gceServiceManager) DetachDiskOnCloudProvider(
func (manager *gceServiceManager) GetDiskFromCloudProvider(
zone string,
diskName string) (*GCEDisk, error) {
diskName string) (*Disk, error) {
if zone == "" {
return nil, fmt.Errorf("Can not fetch disk %q. Zone is empty.", diskName)
return nil, fmt.Errorf("can not fetch disk %q, zone is empty", diskName)
}
if diskName == "" {
return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone)
return nil, fmt.Errorf("can not fetch disk, zone is specified (%q), but disk name is empty", zone)
}
ctx, cancel := cloud.ContextWithCallTimeout()
@ -231,7 +234,7 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err)
}
return &GCEDisk{
return &Disk{
ZoneInfo: zoneInfo,
Region: region,
Name: diskStable.Name,
@ -242,7 +245,7 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
}
func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
diskName string) (*GCEDisk, error) {
diskName string) (*Disk, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
@ -250,7 +253,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
diskBeta, err := manager.gce.c.BetaRegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region))
diskBeta, err := manager.gce.c.RegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region))
if err != nil {
return nil, err
}
@ -260,7 +263,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
zones.Insert(lastComponent(zoneURI))
}
return &GCEDisk{
return &Disk{
ZoneInfo: multiZone{zones},
Region: lastComponent(diskBeta.Region),
Name: diskBeta.Name,
@ -287,10 +290,10 @@ func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider(
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.BetaRegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region))
return manager.gce.c.RegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region))
}
func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) {
func (manager *gceServiceManager) getDiskSourceURI(disk *Disk) (string, error) {
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
switch zoneInfo := disk.ZoneInfo.(type) {
@ -325,14 +328,9 @@ func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error
}
func (manager *gceServiceManager) getDiskTypeURI(
diskRegion string, diskZoneInfo zoneType, diskType string, useBetaAPI bool) (string, error) {
diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) {
var getProjectsAPIEndpoint string
if useBetaAPI {
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta()
} else {
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
}
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
switch zoneInfo := diskZoneInfo.(type) {
case singleZone:
@ -361,15 +359,8 @@ func (manager *gceServiceManager) getDiskTypeURI(
}
}
func (manager *gceServiceManager) getReplicaZoneURI(zone string, useBetaAPI bool) string {
var getProjectsAPIEndpoint string
if useBetaAPI {
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta()
} else {
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
}
return getProjectsAPIEndpoint + fmt.Sprintf(
func (manager *gceServiceManager) getReplicaZoneURI(zone string) string {
return manager.getProjectsAPIEndpoint() + fmt.Sprintf(
replicaZoneURITemplateSingleZone,
manager.gce.projectID,
zone)
@ -402,14 +393,14 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string,
region, err := GetGCERegion(zone)
if err != nil {
glog.Warningf("failed to parse GCE region from zone %q: %v", zone, err)
klog.Warningf("failed to parse GCE region from zone %q: %v", zone, err)
region = manager.gce.region
}
return region, nil
}
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error {
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *Disk, sizeGb int64, zone string) error {
resizeServiceRequest := &compute.DisksResizeRequest{
SizeGb: sizeGb,
}
@ -419,18 +410,18 @@ func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeG
return manager.gce.c.Disks().Resize(ctx, meta.ZonalKey(disk.Name, zone), resizeServiceRequest)
}
func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error {
func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *Disk, sizeGb int64) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
}
resizeServiceRequest := &computebeta.RegionDisksResizeRequest{
resizeServiceRequest := &compute.RegionDisksResizeRequest{
SizeGb: sizeGb,
}
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
return manager.gce.c.BetaRegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest)
return manager.gce.c.RegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest)
}
// Disks is interface for manipulation with GCE PDs.
@ -472,13 +463,14 @@ type Disks interface {
GetAutoLabelsForPD(name string, zone string) (map[string]string, error)
}
// GCECloud implements Disks.
var _ Disks = (*GCECloud)(nil)
// Cloud implements Disks.
var _ Disks = (*Cloud)(nil)
// GCECloud implements PVLabeler.
var _ cloudprovider.PVLabeler = (*GCECloud)(nil)
// Cloud implements PVLabeler.
var _ cloudprovider.PVLabeler = (*Cloud)(nil)
type GCEDisk struct {
// Disk holds all relevant data about an instance of GCE storage
type Disk struct {
ZoneInfo zoneType
Region string
Name string
@ -510,7 +502,8 @@ func newDiskMetricContextRegional(request, region string) *metricContext {
return newGenericMetricContext("disk", request, region, unusedMetricLabel, computeV1Version)
}
func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// GetLabelsForVolume retrieved the label info for the provided volume
func (g *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// Ignore any volumes that are being provisioned
if pv.Spec.GCEPersistentDisk.PDName == volume.ProvisionedVolumeName {
return nil, nil
@ -519,7 +512,7 @@ func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo
// If the zone is already labeled, honor the hint
zone := pv.Labels[kubeletapis.LabelZoneFailureDomain]
labels, err := gce.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone)
labels, err := g.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone)
if err != nil {
return nil, err
}
@ -527,28 +520,30 @@ func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo
return labels, nil
}
func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error {
// AttachDisk attaches given disk to the node with the specified NodeName.
// Current instance is used when instanceID is empty string.
func (g *Cloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error {
instanceName := mapNodeNameToInstanceName(nodeName)
instance, err := gce.getInstanceByName(instanceName)
instance, err := g.getInstanceByName(instanceName)
if err != nil {
return fmt.Errorf("error getting instance %q", instanceName)
}
// Try fetching as regional PD
var disk *GCEDisk
var disk *Disk
var mc *metricContext
if regional && utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
disk, err = gce.getRegionalDiskByName(diskName)
disk, err = g.getRegionalDiskByName(diskName)
if err != nil {
return err
}
mc = newDiskMetricContextRegional("attach", gce.region)
mc = newDiskMetricContextRegional("attach", g.region)
} else {
disk, err = gce.getDiskByName(diskName, instance.Zone)
disk, err = g.getDiskByName(diskName, instance.Zone)
if err != nil {
return err
}
mc = newDiskMetricContextZonal("attach", gce.region, instance.Zone)
mc = newDiskMetricContextZonal("attach", g.region, instance.Zone)
}
readWrite := "READ_WRITE"
@ -556,16 +551,18 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn
readWrite = "READ_ONLY"
}
return mc.Observe(gce.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name))
return mc.Observe(g.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name))
}
func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) error {
// DetachDisk detaches given disk to the node with the specified NodeName.
// Current instance is used when nodeName is empty string.
func (g *Cloud) DetachDisk(devicePath string, nodeName types.NodeName) error {
instanceName := mapNodeNameToInstanceName(nodeName)
inst, err := gce.getInstanceByName(instanceName)
inst, err := g.getInstanceByName(instanceName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached.
glog.Warningf(
klog.Warningf(
"Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.",
instanceName,
devicePath)
@ -575,17 +572,18 @@ func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) erro
return fmt.Errorf("error getting instance %q", instanceName)
}
mc := newDiskMetricContextZonal("detach", gce.region, inst.Zone)
return mc.Observe(gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath))
mc := newDiskMetricContextZonal("detach", g.region, inst.Zone)
return mc.Observe(g.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath))
}
func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
// DiskIsAttached checks if a disk is attached to the node with the specified NodeName.
func (g *Cloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
instanceName := mapNodeNameToInstanceName(nodeName)
instance, err := gce.getInstanceByName(instanceName)
instance, err := g.getInstanceByName(instanceName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached.
glog.Warningf(
klog.Warningf(
"Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.",
instanceName,
diskName)
@ -605,17 +603,19 @@ func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (b
return false, nil
}
func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
// DisksAreAttached is a batch function to check if a list of disks are attached
// to the node with the specified NodeName.
func (g *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
attached := make(map[string]bool)
for _, diskName := range diskNames {
attached[diskName] = false
}
instanceName := mapNodeNameToInstanceName(nodeName)
instance, err := gce.getInstanceByName(instanceName)
instance, err := g.getInstanceByName(instanceName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached.
glog.Warningf(
klog.Warningf(
"Instance %q does not exist. DisksAreAttached will assume PD %v are not attached to it.",
instanceName,
diskNames)
@ -640,11 +640,11 @@ func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeNam
// CreateDisk creates a new Persistent Disk, with the specified name &
// size, in the specified zone. It stores specified tags encoded in
// JSON in Description field.
func (gce *GCECloud) CreateDisk(
func (g *Cloud) CreateDisk(
name string, diskType string, zone string, sizeGb int64, tags map[string]string) error {
// Do not allow creation of PDs in zones that are do not have nodes. Such PDs
// are not currently usable.
curZones, err := gce.GetAllCurrentZones()
curZones, err := g.GetAllCurrentZones()
if err != nil {
return err
}
@ -652,7 +652,7 @@ func (gce *GCECloud) CreateDisk(
return fmt.Errorf("kubernetes does not have a node in zone %q", zone)
}
tagsStr, err := gce.encodeDiskTags(tags)
tagsStr, err := g.encodeDiskTags(tags)
if err != nil {
return err
}
@ -662,14 +662,14 @@ func (gce *GCECloud) CreateDisk(
return err
}
mc := newDiskMetricContextZonal("create", gce.region, zone)
mc := newDiskMetricContextZonal("create", g.region, zone)
err = gce.manager.CreateDiskOnCloudProvider(
err = g.manager.CreateDiskOnCloudProvider(
name, sizeGb, tagsStr, diskType, zone)
mc.Observe(err)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
klog.Warningf("GCE PD %q already exists, reusing", name)
return nil
}
return err
@ -678,14 +678,14 @@ func (gce *GCECloud) CreateDisk(
// CreateRegionalDisk creates a new Regional Persistent Disk, with the specified
// name & size, replicated to the specified zones. It stores specified tags
// encoded in JSON in Description field.
func (gce *GCECloud) CreateRegionalDisk(
func (g *Cloud) CreateRegionalDisk(
name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error {
// Do not allow creation of PDs in zones that are do not have nodes. Such PDs
// are not currently usable. This functionality should be reverted to checking
// against managed zones if we want users to be able to create RegionalDisks
// in zones where there are no nodes
curZones, err := gce.GetAllCurrentZones()
curZones, err := g.GetAllCurrentZones()
if err != nil {
return err
}
@ -693,7 +693,7 @@ func (gce *GCECloud) CreateRegionalDisk(
return fmt.Errorf("kubernetes does not have nodes in specified zones: %q. Zones that contain nodes: %q", replicaZones.Difference(curZones), curZones)
}
tagsStr, err := gce.encodeDiskTags(tags)
tagsStr, err := g.encodeDiskTags(tags)
if err != nil {
return err
}
@ -703,14 +703,14 @@ func (gce *GCECloud) CreateRegionalDisk(
return err
}
mc := newDiskMetricContextRegional("create", gce.region)
mc := newDiskMetricContextRegional("create", g.region)
err = gce.manager.CreateRegionalDiskOnCloudProvider(
err = g.manager.CreateRegionalDiskOnCloudProvider(
name, sizeGb, tagsStr, diskType, replicaZones)
mc.Observe(err)
if isGCEError(err, "alreadyExists") {
glog.Warningf("GCE PD %q already exists, reusing", name)
klog.Warningf("GCE PD %q already exists, reusing", name)
return nil
}
return err
@ -727,8 +727,9 @@ func getDiskType(diskType string) (string, error) {
}
}
func (gce *GCECloud) DeleteDisk(diskToDelete string) error {
err := gce.doDeleteDisk(diskToDelete)
// DeleteDisk deletes rgw referenced persistent disk.
func (g *Cloud) DeleteDisk(diskToDelete string) error {
err := g.doDeleteDisk(diskToDelete)
if isGCEError(err, "resourceInUseByAnotherResource") {
return volume.NewDeletedVolumeInUseError(err.Error())
}
@ -740,19 +741,18 @@ func (gce *GCECloud) DeleteDisk(diskToDelete string) error {
}
// ResizeDisk expands given disk and returns new disk size
func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
disk, err := gce.GetDiskByNameUnknownZone(diskToResize)
func (g *Cloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
disk, err := g.GetDiskByNameUnknownZone(diskToResize)
if err != nil {
return oldSize, err
}
requestBytes := newSize.Value()
// GCE resizes in chunks of GBs (not GiB)
requestGB := volumeutil.RoundUpSize(requestBytes, 1000*1000*1000)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB))
// GCE resizes in chunks of GiBs
requestGIB := volumeutil.RoundUpToGiB(newSize)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGIB))
// If disk is already of size equal or greater than requested size, we simply return
if disk.SizeGb >= requestGB {
if disk.SizeGb >= requestGIB {
return newSizeQuant, nil
}
@ -761,26 +761,24 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
switch zoneInfo := disk.ZoneInfo.(type) {
case singleZone:
mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone)
err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone)
err := g.manager.ResizeDiskOnCloudProvider(disk, requestGIB, zoneInfo.zone)
if err != nil {
return oldSize, mc.Observe(err)
} else {
return newSizeQuant, mc.Observe(err)
}
return newSizeQuant, mc.Observe(err)
case multiZone:
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
}
mc = newDiskMetricContextRegional("resize", disk.Region)
err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
err := g.manager.RegionalResizeDiskOnCloudProvider(disk, requestGIB)
if err != nil {
return oldSize, mc.Observe(err)
} else {
return newSizeQuant, mc.Observe(err)
}
return newSizeQuant, mc.Observe(err)
case nil:
return oldSize, fmt.Errorf("PD has nil ZoneInfo: %v", disk)
default:
@ -788,13 +786,13 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
}
}
// Builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD
// GetAutoLabelsForPD builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD
// Specifically, this builds FailureDomain (zone) and Region labels.
// The PersistentVolumeLabel admission controller calls this and adds the labels when a PV is created.
// If zone is specified, the volume will only be found in the specified zone,
// otherwise all managed zones will be searched.
func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) {
var disk *GCEDisk
func (g *Cloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) {
var disk *Disk
var err error
if zone == "" {
// For regional PDs this is fine, but for zonal PDs we would like as far
@ -805,7 +803,7 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
// However, wherever possible the zone should be passed (and it is
// passed for most cases that we can control, e.g. dynamic volume
// provisioning).
disk, err = gce.GetDiskByNameUnknownZone(name)
disk, err = g.GetDiskByNameUnknownZone(name)
if err != nil {
return nil, err
}
@ -816,24 +814,24 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
zoneSet, err := volumeutil.LabelZonesToSet(zone)
if err != nil {
glog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone)
klog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone)
}
if len(zoneSet) > 1 {
// Regional PD
disk, err = gce.getRegionalDiskByName(name)
disk, err = g.getRegionalDiskByName(name)
if err != nil {
return nil, err
}
} else {
// Zonal PD
disk, err = gce.getDiskByName(name, zone)
disk, err = g.getDiskByName(name, zone)
if err != nil {
return nil, err
}
}
} else {
disk, err = gce.getDiskByName(name, zone)
disk, err = g.getDiskByName(name, zone)
if err != nil {
return nil, err
}
@ -868,11 +866,11 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
return labels, nil
}
// Returns a GCEDisk for the disk, if it is found in the specified zone.
// Returns a Disk for the disk, if it is found in the specified zone.
// If not found, returns (nil, nil)
func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, error) {
mc := newDiskMetricContextZonal("get", gce.region, zone)
disk, err := gce.manager.GetDiskFromCloudProvider(zone, diskName)
func (g *Cloud) findDiskByName(diskName string, zone string) (*Disk, error) {
mc := newDiskMetricContextZonal("get", g.region, zone)
disk, err := g.manager.GetDiskFromCloudProvider(zone, diskName)
if err == nil {
return disk, mc.Observe(nil)
}
@ -883,19 +881,19 @@ func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, err
}
// Like findDiskByName, but returns an error if the disk is not found
func (gce *GCECloud) getDiskByName(diskName string, zone string) (*GCEDisk, error) {
disk, err := gce.findDiskByName(diskName, zone)
func (g *Cloud) getDiskByName(diskName string, zone string) (*Disk, error) {
disk, err := g.findDiskByName(diskName, zone)
if disk == nil && err == nil {
return nil, fmt.Errorf("GCE persistent disk not found: diskName=%q zone=%q", diskName, zone)
}
return disk, err
}
// Returns a GCEDisk for the regional disk, if it is found.
// Returns a Disk for the regional disk, if it is found.
// If not found, returns (nil, nil)
func (gce *GCECloud) findRegionalDiskByName(diskName string) (*GCEDisk, error) {
mc := newDiskMetricContextRegional("get", gce.region)
disk, err := gce.manager.GetRegionalDiskFromCloudProvider(diskName)
func (g *Cloud) findRegionalDiskByName(diskName string) (*Disk, error) {
mc := newDiskMetricContextRegional("get", g.region)
disk, err := g.manager.GetRegionalDiskFromCloudProvider(diskName)
if err == nil {
return disk, mc.Observe(nil)
}
@ -906,20 +904,20 @@ func (gce *GCECloud) findRegionalDiskByName(diskName string) (*GCEDisk, error) {
}
// Like findRegionalDiskByName, but returns an error if the disk is not found
func (gce *GCECloud) getRegionalDiskByName(diskName string) (*GCEDisk, error) {
disk, err := gce.findRegionalDiskByName(diskName)
func (g *Cloud) getRegionalDiskByName(diskName string) (*Disk, error) {
disk, err := g.findRegionalDiskByName(diskName)
if disk == nil && err == nil {
return nil, fmt.Errorf("GCE regional persistent disk not found: diskName=%q", diskName)
}
return disk, err
}
// Scans all managed zones to return the GCE PD
// GetDiskByNameUnknownZone scans all managed zones to return the GCE PD
// Prefer getDiskByName, if the zone can be established
// Return cloudprovider.DiskNotFound if the given disk cannot be found in any zone
func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) {
func (g *Cloud) GetDiskByNameUnknownZone(diskName string) (*Disk, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
regionalDisk, err := gce.getRegionalDiskByName(diskName)
regionalDisk, err := g.getRegionalDiskByName(diskName)
if err == nil {
return regionalDisk, err
}
@ -935,9 +933,9 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error)
// admission control, but that might be a little weird (values changing
// on create)
var found *GCEDisk
for _, zone := range gce.managedZones {
disk, err := gce.findDiskByName(diskName, zone)
var found *Disk
for _, zone := range g.managedZones {
disk, err := g.findDiskByName(diskName, zone)
if err != nil {
return nil, err
}
@ -950,7 +948,7 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error)
switch zoneInfo := disk.ZoneInfo.(type) {
case multiZone:
if zoneInfo.replicaZones.Has(zone) {
glog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.",
klog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.",
diskName, zoneInfo.replicaZones)
continue
}
@ -964,15 +962,15 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error)
if found != nil {
return found, nil
}
glog.Warningf("GCE persistent disk %q not found in managed zones (%s)",
diskName, strings.Join(gce.managedZones, ","))
klog.Warningf("GCE persistent disk %q not found in managed zones (%s)",
diskName, strings.Join(g.managedZones, ","))
return nil, cloudprovider.DiskNotFound
}
// encodeDiskTags encodes requested volume tags into JSON string, as GCE does
// not support tags on GCE PDs and we use Description field as fallback.
func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) {
func (g *Cloud) encodeDiskTags(tags map[string]string) (string, error) {
if len(tags) == 0 {
// No tags -> empty JSON
return "", nil
@ -985,8 +983,8 @@ func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) {
return string(enc), nil
}
func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
disk, err := gce.GetDiskByNameUnknownZone(diskToDelete)
func (g *Cloud) doDeleteDisk(diskToDelete string) error {
disk, err := g.GetDiskByNameUnknownZone(diskToDelete)
if err != nil {
return err
}
@ -996,14 +994,14 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
switch zoneInfo := disk.ZoneInfo.(type) {
case singleZone:
mc = newDiskMetricContextZonal("delete", disk.Region, zoneInfo.zone)
return mc.Observe(gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name))
return mc.Observe(g.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name))
case multiZone:
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
}
mc = newDiskMetricContextRegional("delete", disk.Region)
return mc.Observe(gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name))
return mc.Observe(g.manager.DeleteRegionalDiskOnCloudProvider(disk.Name))
case nil:
return fmt.Errorf("PD has nil ZoneInfo: %v", disk)
default:

View File

@ -26,7 +26,7 @@ import (
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
)
@ -35,15 +35,15 @@ import (
func TestCreateDisk_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: []string{"zone1"},
projectID: gceProjectId,
projectID: gceProjectID,
AlphaFeatureGate: alphaFeatureGate,
nodeZones: createNodeZones(zonesWithNodes),
nodeInformerSynced: func() bool { return true },
@ -57,7 +57,7 @@ func TestCreateDisk_Basic(t *testing.T) {
tags["test-tag"] = "test-value"
expectedDiskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(
diskTypeURITemplateSingleZone, gceProjectId, zone, diskType)
diskTypeURITemplateSingleZone, gceProjectID, zone, diskType)
expectedDescription := "{\"test-tag\":\"test-value\"}"
/* Act */
@ -90,15 +90,15 @@ func TestCreateDisk_Basic(t *testing.T) {
func TestCreateRegionalDisk_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1", "zone3", "zone2"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
projectID: gceProjectId,
projectID: gceProjectID,
nodeZones: createNodeZones(zonesWithNodes),
nodeInformerSynced: func() bool { return true },
}
@ -110,8 +110,8 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
tags := make(map[string]string)
tags["test-tag"] = "test-value"
expectedDiskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(
diskTypeURITemplateRegional, gceProjectId, gceRegion, diskType)
expectedDiskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(
diskTypeURITemplateRegional, gceProjectID, gceRegion, diskType)
expectedDescription := "{\"test-tag\":\"test-value\"}"
/* Act */
@ -144,12 +144,12 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -175,11 +175,11 @@ func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
func TestCreateDisk_WrongZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
gce := GCECloud{
fakeManager := newFakeManager(gceProjectID, gceRegion)
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
nodeZones: createNodeZones(zonesWithNodes),
@ -200,11 +200,11 @@ func TestCreateDisk_WrongZone(t *testing.T) {
func TestCreateDisk_NoManagedZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{}
fakeManager := newFakeManager(gceProjectId, gceRegion)
gce := GCECloud{
fakeManager := newFakeManager(gceProjectID, gceRegion)
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
nodeZones: createNodeZones(zonesWithNodes),
@ -225,11 +225,11 @@ func TestCreateDisk_NoManagedZone(t *testing.T) {
func TestCreateDisk_BadDiskType(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
gce := GCECloud{manager: fakeManager,
fakeManager := newFakeManager(gceProjectID, gceRegion)
gce := Cloud{manager: fakeManager,
managedZones: zonesWithNodes,
nodeZones: createNodeZones(zonesWithNodes),
nodeInformerSynced: func() bool { return true }}
@ -250,12 +250,12 @@ func TestCreateDisk_BadDiskType(t *testing.T) {
func TestCreateDisk_MultiZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -279,12 +279,12 @@ func TestCreateDisk_MultiZone(t *testing.T) {
func TestDeleteDisk_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -313,12 +313,12 @@ func TestDeleteDisk_Basic(t *testing.T) {
func TestDeleteDisk_NotFound(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -338,12 +338,12 @@ func TestDeleteDisk_NotFound(t *testing.T) {
func TestDeleteDisk_ResourceBeingUsed(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -369,12 +369,12 @@ func TestDeleteDisk_ResourceBeingUsed(t *testing.T) {
func TestDeleteDisk_SameDiskMultiZone(t *testing.T) {
/* Assert */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -403,12 +403,12 @@ func TestDeleteDisk_SameDiskMultiZone(t *testing.T) {
func TestDeleteDisk_DiffDiskMultiZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -437,16 +437,16 @@ func TestDeleteDisk_DiffDiskMultiZone(t *testing.T) {
func TestGetAutoLabelsForPD_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "us-central1"
zone := "us-central1-c"
zonesWithNodes := []string{zone}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
diskName := "disk"
diskType := DiskTypeSSD
const sizeGb int64 = 128
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -474,16 +474,16 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) {
func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "europe-west1"
zone := "europe-west1-d"
zonesWithNodes := []string{zone}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
diskName := "disk"
diskType := DiskTypeStandard
const sizeGb int64 = 128
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -510,13 +510,13 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zone := "asia-northeast1-a"
zonesWithNodes := []string{zone}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
diskName := "disk"
gce := GCECloud{manager: fakeManager,
gce := Cloud{manager: fakeManager,
managedZones: zonesWithNodes,
nodeZones: createNodeZones(zonesWithNodes),
nodeInformerSynced: func() bool { return true }}
@ -532,13 +532,13 @@ func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
diskName := "disk"
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -557,17 +557,17 @@ func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "us-west1"
zonesWithNodes := []string{"us-west1-b", "asia-southeast1-a"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
diskName := "disk"
diskType := DiskTypeStandard
zone := "us-west1-b"
const sizeGb int64 = 128
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -596,16 +596,16 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceProjectID := "test-project"
gceRegion := "fake-region"
zonesWithNodes := []string{"us-west1-b", "asia-southeast1-a"}
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectID, gceRegion)
diskName := "disk"
diskType := DiskTypeStandard
const sizeGb int64 = 128
alphaFeatureGate := NewAlphaFeatureGate([]string{})
gce := GCECloud{
gce := Cloud{
manager: fakeManager,
managedZones: zonesWithNodes,
AlphaFeatureGate: alphaFeatureGate,
@ -723,9 +723,9 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
tagsStr string,
diskType string,
zones sets.String) error {
manager.createDiskCalled = true
diskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
manager.createDiskCalled = true
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
switch t := manager.targetAPI; t {
case targetStable:
diskToCreateV1 := &compute.Disk{
@ -737,17 +737,13 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
manager.diskToCreateStable = diskToCreateV1
manager.regionalDisks[diskToCreateV1.Name] = zones
return nil
case targetBeta:
return fmt.Errorf("RegionalDisk CreateDisk op not supported in beta.")
case targetAlpha:
return fmt.Errorf("RegionalDisk CreateDisk op not supported in alpha.")
default:
return fmt.Errorf("unexpected type: %T", t)
}
}
func (manager *FakeServiceManager) AttachDiskOnCloudProvider(
disk *GCEDisk,
disk *Disk,
readWrite string,
instanceZone string,
instanceName string) error {
@ -784,7 +780,7 @@ func (manager *FakeServiceManager) DetachDiskOnCloudProvider(
* Gets disk info stored in the FakeServiceManager.
*/
func (manager *FakeServiceManager) GetDiskFromCloudProvider(
zone string, diskName string) (*GCEDisk, error) {
zone string, diskName string) (*Disk, error) {
if manager.zonalDisks[zone] == "" {
return nil, cloudprovider.DiskNotFound
@ -796,7 +792,7 @@ func (manager *FakeServiceManager) GetDiskFromCloudProvider(
return nil, err
}
return &GCEDisk{
return &Disk{
Region: manager.gceRegion,
ZoneInfo: singleZone{lastComponent(zone)},
Name: diskName,
@ -809,7 +805,7 @@ func (manager *FakeServiceManager) GetDiskFromCloudProvider(
* Gets disk info stored in the FakeServiceManager.
*/
func (manager *FakeServiceManager) GetRegionalDiskFromCloudProvider(
diskName string) (*GCEDisk, error) {
diskName string) (*Disk, error) {
if _, ok := manager.regionalDisks[diskName]; !ok {
return nil, cloudprovider.DiskNotFound
@ -821,7 +817,7 @@ func (manager *FakeServiceManager) GetRegionalDiskFromCloudProvider(
return nil, err
}
return &GCEDisk{
return &Disk{
Region: manager.gceRegion,
ZoneInfo: multiZone{manager.regionalDisks[diskName]},
Name: diskName,
@ -831,14 +827,14 @@ func (manager *FakeServiceManager) GetRegionalDiskFromCloudProvider(
}
func (manager *FakeServiceManager) ResizeDiskOnCloudProvider(
disk *GCEDisk,
disk *Disk,
size int64,
zone string) error {
panic("Not implmented")
}
func (manager *FakeServiceManager) RegionalResizeDiskOnCloudProvider(
disk *GCEDisk,
disk *Disk,
size int64) error {
panic("Not implemented")
}

View File

@ -0,0 +1,83 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce
import (
"fmt"
"net/http"
compute "google.golang.org/api/compute/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
)
// TestClusterValues holds the config values for the fake/test gce cloud object.
type TestClusterValues struct {
ProjectID string
Region string
ZoneName string
SecondaryZoneName string
ClusterID string
ClusterName string
}
// DefaultTestClusterValues Creates a reasonable set of default cluster values
// for generating a new test fake GCE cloud instance.
func DefaultTestClusterValues() TestClusterValues {
return TestClusterValues{
ProjectID: "test-project",
Region: "us-central1",
ZoneName: "us-central1-b",
SecondaryZoneName: "us-central1-c",
ClusterID: "test-cluster-id",
ClusterName: "Test Cluster Name",
}
}
type fakeRoundTripper struct{}
func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
return nil, fmt.Errorf("err: test used fake http client")
}
// Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling
// gce.Initialize()
func fakeClusterID(clusterID string) ClusterID {
return ClusterID{
clusterID: &clusterID,
store: cache.NewStore(func(obj interface{}) (string, error) {
return "", nil
}),
}
}
// NewFakeGCECloud constructs a fake GCE Cloud from the cluster values.
func NewFakeGCECloud(vals TestClusterValues) *Cloud {
client := &http.Client{Transport: &fakeRoundTripper{}}
service, _ := compute.New(client)
gce := &Cloud{
region: vals.Region,
service: service,
managedZones: []string{vals.ZoneName},
projectID: vals.ProjectID,
networkProjectID: vals.ProjectID,
ClusterID: fakeClusterID(vals.ClusterID),
}
c := cloud.NewMockGCE(&gceProjectRouter{gce})
gce.c = c
return gce
}

View File

@ -28,38 +28,38 @@ func newFirewallMetricContext(request string) *metricContext {
}
// GetFirewall returns the Firewall by name.
func (gce *GCECloud) GetFirewall(name string) (*compute.Firewall, error) {
func (g *Cloud) GetFirewall(name string) (*compute.Firewall, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newFirewallMetricContext("get")
v, err := gce.c.Firewalls().Get(ctx, meta.GlobalKey(name))
v, err := g.c.Firewalls().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// CreateFirewall creates the passed firewall
func (gce *GCECloud) CreateFirewall(f *compute.Firewall) error {
func (g *Cloud) CreateFirewall(f *compute.Firewall) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newFirewallMetricContext("create")
return mc.Observe(gce.c.Firewalls().Insert(ctx, meta.GlobalKey(f.Name), f))
return mc.Observe(g.c.Firewalls().Insert(ctx, meta.GlobalKey(f.Name), f))
}
// DeleteFirewall deletes the given firewall rule.
func (gce *GCECloud) DeleteFirewall(name string) error {
func (g *Cloud) DeleteFirewall(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newFirewallMetricContext("delete")
return mc.Observe(gce.c.Firewalls().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.Firewalls().Delete(ctx, meta.GlobalKey(name)))
}
// UpdateFirewall applies the given firewall as an update to an existing service.
func (gce *GCECloud) UpdateFirewall(f *compute.Firewall) error {
func (g *Cloud) UpdateFirewall(f *compute.Firewall) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newFirewallMetricContext("update")
return mc.Observe(gce.c.Firewalls().Update(ctx, meta.GlobalKey(f.Name), f))
return mc.Observe(g.c.Firewalls().Update(ctx, meta.GlobalKey(f.Name), f))
}

View File

@ -32,129 +32,129 @@ func newForwardingRuleMetricContextWithVersion(request, region, version string)
}
// CreateGlobalForwardingRule creates the passed GlobalForwardingRule
func (gce *GCECloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error {
func (g *Cloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("create", "")
return mc.Observe(gce.c.GlobalForwardingRules().Insert(ctx, meta.GlobalKey(rule.Name), rule))
return mc.Observe(g.c.GlobalForwardingRules().Insert(ctx, meta.GlobalKey(rule.Name), rule))
}
// SetProxyForGlobalForwardingRule links the given TargetHttp(s)Proxy with the given GlobalForwardingRule.
// targetProxyLink is the SelfLink of a TargetHttp(s)Proxy.
func (gce *GCECloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error {
func (g *Cloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("set_proxy", "")
target := &compute.TargetReference{Target: targetProxyLink}
return mc.Observe(gce.c.GlobalForwardingRules().SetTarget(ctx, meta.GlobalKey(forwardingRuleName), target))
return mc.Observe(g.c.GlobalForwardingRules().SetTarget(ctx, meta.GlobalKey(forwardingRuleName), target))
}
// DeleteGlobalForwardingRule deletes the GlobalForwardingRule by name.
func (gce *GCECloud) DeleteGlobalForwardingRule(name string) error {
func (g *Cloud) DeleteGlobalForwardingRule(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("delete", "")
return mc.Observe(gce.c.GlobalForwardingRules().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.GlobalForwardingRules().Delete(ctx, meta.GlobalKey(name)))
}
// GetGlobalForwardingRule returns the GlobalForwardingRule by name.
func (gce *GCECloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) {
func (g *Cloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("get", "")
v, err := gce.c.GlobalForwardingRules().Get(ctx, meta.GlobalKey(name))
v, err := g.c.GlobalForwardingRules().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// ListGlobalForwardingRules lists all GlobalForwardingRules in the project.
func (gce *GCECloud) ListGlobalForwardingRules() ([]*compute.ForwardingRule, error) {
func (g *Cloud) ListGlobalForwardingRules() ([]*compute.ForwardingRule, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("list", "")
v, err := gce.c.GlobalForwardingRules().List(ctx, filter.None)
v, err := g.c.GlobalForwardingRules().List(ctx, filter.None)
return v, mc.Observe(err)
}
// GetRegionForwardingRule returns the RegionalForwardingRule by name & region.
func (gce *GCECloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) {
func (g *Cloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("get", region)
v, err := gce.c.ForwardingRules().Get(ctx, meta.RegionalKey(name, region))
v, err := g.c.ForwardingRules().Get(ctx, meta.RegionalKey(name, region))
return v, mc.Observe(err)
}
// GetAlphaRegionForwardingRule returns the Alpha forwarding rule by name & region.
func (gce *GCECloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) {
func (g *Cloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContextWithVersion("get", region, computeAlphaVersion)
v, err := gce.c.AlphaForwardingRules().Get(ctx, meta.RegionalKey(name, region))
v, err := g.c.AlphaForwardingRules().Get(ctx, meta.RegionalKey(name, region))
return v, mc.Observe(err)
}
// ListRegionForwardingRules lists all RegionalForwardingRules in the project & region.
func (gce *GCECloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) {
func (g *Cloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("list", region)
v, err := gce.c.ForwardingRules().List(ctx, region, filter.None)
v, err := g.c.ForwardingRules().List(ctx, region, filter.None)
return v, mc.Observe(err)
}
// ListAlphaRegionForwardingRules lists all RegionalForwardingRules in the project & region.
func (gce *GCECloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.ForwardingRule, error) {
func (g *Cloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.ForwardingRule, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContextWithVersion("list", region, computeAlphaVersion)
v, err := gce.c.AlphaForwardingRules().List(ctx, region, filter.None)
v, err := g.c.AlphaForwardingRules().List(ctx, region, filter.None)
return v, mc.Observe(err)
}
// CreateRegionForwardingRule creates and returns a
// RegionalForwardingRule that points to the given BackendService
func (gce *GCECloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error {
func (g *Cloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("create", region)
return mc.Observe(gce.c.ForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule))
return mc.Observe(g.c.ForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule))
}
// CreateAlphaRegionForwardingRule creates and returns an Alpha
// forwarding fule in the given region.
func (gce *GCECloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error {
func (g *Cloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContextWithVersion("create", region, computeAlphaVersion)
return mc.Observe(gce.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule))
return mc.Observe(g.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule))
}
// DeleteRegionForwardingRule deletes the RegionalForwardingRule by name & region.
func (gce *GCECloud) DeleteRegionForwardingRule(name, region string) error {
func (g *Cloud) DeleteRegionForwardingRule(name, region string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newForwardingRuleMetricContext("delete", region)
return mc.Observe(gce.c.ForwardingRules().Delete(ctx, meta.RegionalKey(name, region)))
return mc.Observe(g.c.ForwardingRules().Delete(ctx, meta.RegionalKey(name, region)))
}
// TODO(#51665): retire this function once Network Tiers becomes Beta in GCP.
func (gce *GCECloud) getNetworkTierFromForwardingRule(name, region string) (string, error) {
if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) {
func (g *Cloud) getNetworkTierFromForwardingRule(name, region string) (string, error) {
if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) {
return cloud.NetworkTierDefault.ToGCEValue(), nil
}
fwdRule, err := gce.GetAlphaRegionForwardingRule(name, region)
fwdRule, err := g.GetAlphaRegionForwardingRule(name, region)
if err != nil {
return handleAlphaNetworkTierGetError(err)
}

View File

@ -17,17 +17,18 @@ limitations under the License.
package gce
import (
"github.com/golang/glog"
"k8s.io/klog"
computealpha "google.golang.org/api/compute/v0.alpha"
computebeta "google.golang.org/api/compute/v0.beta"
compute "google.golang.org/api/compute/v1"
"k8s.io/api/core/v1"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
"k8s.io/kubernetes/pkg/master/ports"
utilversion "k8s.io/kubernetes/pkg/util/version"
)
const (
@ -41,7 +42,7 @@ var (
func init() {
if v, err := utilversion.ParseGeneric("1.7.2"); err != nil {
glog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err)
klog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err)
} else {
minNodesHealthCheckVersion = v
}
@ -55,176 +56,204 @@ func newHealthcheckMetricContextWithVersion(request, version string) *metricCont
return newGenericMetricContext("healthcheck", request, unusedMetricLabel, unusedMetricLabel, version)
}
// GetHttpHealthCheck returns the given HttpHealthCheck by name.
func (gce *GCECloud) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) {
// GetHTTPHealthCheck returns the given HttpHealthCheck by name.
func (g *Cloud) GetHTTPHealthCheck(name string) (*compute.HttpHealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("get_legacy")
v, err := gce.c.HttpHealthChecks().Get(ctx, meta.GlobalKey(name))
v, err := g.c.HttpHealthChecks().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// UpdateHttpHealthCheck applies the given HttpHealthCheck as an update.
func (gce *GCECloud) UpdateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
// UpdateHTTPHealthCheck applies the given HttpHealthCheck as an update.
func (g *Cloud) UpdateHTTPHealthCheck(hc *compute.HttpHealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("update_legacy")
return mc.Observe(gce.c.HttpHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.HttpHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
}
// DeleteHttpHealthCheck deletes the given HttpHealthCheck by name.
func (gce *GCECloud) DeleteHttpHealthCheck(name string) error {
// DeleteHTTPHealthCheck deletes the given HttpHealthCheck by name.
func (g *Cloud) DeleteHTTPHealthCheck(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("delete_legacy")
return mc.Observe(gce.c.HttpHealthChecks().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.HttpHealthChecks().Delete(ctx, meta.GlobalKey(name)))
}
// CreateHttpHealthCheck creates the given HttpHealthCheck.
func (gce *GCECloud) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
// CreateHTTPHealthCheck creates the given HttpHealthCheck.
func (g *Cloud) CreateHTTPHealthCheck(hc *compute.HttpHealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("create_legacy")
return mc.Observe(gce.c.HttpHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.HttpHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
}
// ListHttpHealthChecks lists all HttpHealthChecks in the project.
func (gce *GCECloud) ListHttpHealthChecks() ([]*compute.HttpHealthCheck, error) {
// ListHTTPHealthChecks lists all HttpHealthChecks in the project.
func (g *Cloud) ListHTTPHealthChecks() ([]*compute.HttpHealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("list_legacy")
v, err := gce.c.HttpHealthChecks().List(ctx, filter.None)
v, err := g.c.HttpHealthChecks().List(ctx, filter.None)
return v, mc.Observe(err)
}
// Legacy HTTPS Health Checks
// GetHttpsHealthCheck returns the given HttpsHealthCheck by name.
func (gce *GCECloud) GetHttpsHealthCheck(name string) (*compute.HttpsHealthCheck, error) {
// GetHTTPSHealthCheck returns the given HttpsHealthCheck by name.
func (g *Cloud) GetHTTPSHealthCheck(name string) (*compute.HttpsHealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("get_legacy")
v, err := gce.c.HttpsHealthChecks().Get(ctx, meta.GlobalKey(name))
v, err := g.c.HttpsHealthChecks().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// UpdateHttpsHealthCheck applies the given HttpsHealthCheck as an update.
func (gce *GCECloud) UpdateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error {
// UpdateHTTPSHealthCheck applies the given HttpsHealthCheck as an update.
func (g *Cloud) UpdateHTTPSHealthCheck(hc *compute.HttpsHealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("update_legacy")
return mc.Observe(gce.c.HttpsHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.HttpsHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
}
// DeleteHttpsHealthCheck deletes the given HttpsHealthCheck by name.
func (gce *GCECloud) DeleteHttpsHealthCheck(name string) error {
// DeleteHTTPSHealthCheck deletes the given HttpsHealthCheck by name.
func (g *Cloud) DeleteHTTPSHealthCheck(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("delete_legacy")
return mc.Observe(gce.c.HttpsHealthChecks().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.HttpsHealthChecks().Delete(ctx, meta.GlobalKey(name)))
}
// CreateHttpsHealthCheck creates the given HttpsHealthCheck.
func (gce *GCECloud) CreateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error {
// CreateHTTPSHealthCheck creates the given HttpsHealthCheck.
func (g *Cloud) CreateHTTPSHealthCheck(hc *compute.HttpsHealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("create_legacy")
return mc.Observe(gce.c.HttpsHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.HttpsHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
}
// ListHttpsHealthChecks lists all HttpsHealthChecks in the project.
func (gce *GCECloud) ListHttpsHealthChecks() ([]*compute.HttpsHealthCheck, error) {
// ListHTTPSHealthChecks lists all HttpsHealthChecks in the project.
func (g *Cloud) ListHTTPSHealthChecks() ([]*compute.HttpsHealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("list_legacy")
v, err := gce.c.HttpsHealthChecks().List(ctx, filter.None)
v, err := g.c.HttpsHealthChecks().List(ctx, filter.None)
return v, mc.Observe(err)
}
// Generic HealthCheck
// GetHealthCheck returns the given HealthCheck by name.
func (gce *GCECloud) GetHealthCheck(name string) (*compute.HealthCheck, error) {
func (g *Cloud) GetHealthCheck(name string) (*compute.HealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("get")
v, err := gce.c.HealthChecks().Get(ctx, meta.GlobalKey(name))
v, err := g.c.HealthChecks().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// GetAlphaHealthCheck returns the given alpha HealthCheck by name.
func (gce *GCECloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) {
func (g *Cloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContextWithVersion("get", computeAlphaVersion)
v, err := gce.c.AlphaHealthChecks().Get(ctx, meta.GlobalKey(name))
v, err := g.c.AlphaHealthChecks().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// GetBetaHealthCheck returns the given beta HealthCheck by name.
func (g *Cloud) GetBetaHealthCheck(name string) (*computebeta.HealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContextWithVersion("get", computeBetaVersion)
v, err := g.c.BetaHealthChecks().Get(ctx, meta.GlobalKey(name))
return v, mc.Observe(err)
}
// UpdateHealthCheck applies the given HealthCheck as an update.
func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error {
func (g *Cloud) UpdateHealthCheck(hc *compute.HealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("update")
return mc.Observe(gce.c.HealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.HealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
}
// UpdateAlphaHealthCheck applies the given alpha HealthCheck as an update.
func (gce *GCECloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
func (g *Cloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContextWithVersion("update", computeAlphaVersion)
return mc.Observe(gce.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
}
// UpdateBetaHealthCheck applies the given beta HealthCheck as an update.
func (g *Cloud) UpdateBetaHealthCheck(hc *computebeta.HealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContextWithVersion("update", computeBetaVersion)
return mc.Observe(g.c.BetaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
}
// DeleteHealthCheck deletes the given HealthCheck by name.
func (gce *GCECloud) DeleteHealthCheck(name string) error {
func (g *Cloud) DeleteHealthCheck(name string) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("delete")
return mc.Observe(gce.c.HealthChecks().Delete(ctx, meta.GlobalKey(name)))
return mc.Observe(g.c.HealthChecks().Delete(ctx, meta.GlobalKey(name)))
}
// CreateHealthCheck creates the given HealthCheck.
func (gce *GCECloud) CreateHealthCheck(hc *compute.HealthCheck) error {
func (g *Cloud) CreateHealthCheck(hc *compute.HealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("create")
return mc.Observe(gce.c.HealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.HealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
}
// CreateAlphaHealthCheck creates the given alpha HealthCheck.
func (gce *GCECloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
func (g *Cloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContextWithVersion("create", computeAlphaVersion)
return mc.Observe(gce.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
return mc.Observe(g.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
}
// CreateBetaHealthCheck creates the given beta HealthCheck.
func (g *Cloud) CreateBetaHealthCheck(hc *computebeta.HealthCheck) error {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContextWithVersion("create", computeBetaVersion)
return mc.Observe(g.c.BetaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
}
// ListHealthChecks lists all HealthCheck in the project.
func (gce *GCECloud) ListHealthChecks() ([]*compute.HealthCheck, error) {
func (g *Cloud) ListHealthChecks() ([]*compute.HealthCheck, error) {
ctx, cancel := cloud.ContextWithCallTimeout()
defer cancel()
mc := newHealthcheckMetricContext("list")
v, err := gce.c.HealthChecks().List(ctx, filter.None)
v, err := g.c.HealthChecks().List(ctx, filter.None)
return v, mc.Observe(err)
}
@ -245,7 +274,7 @@ func GetNodesHealthCheckPath() string {
func isAtLeastMinNodesHealthCheckVersion(vstring string) bool {
version, err := utilversion.ParseGeneric(vstring)
if err != nil {
glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
return false
}
return version.AtLeast(minNodesHealthCheckVersion)

Some files were not shown because too many files have changed in this diff Show More