mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS
generated
vendored
@ -41,3 +41,4 @@ reviewers:
|
||||
- jingxu97
|
||||
- wlan0
|
||||
- cheftako
|
||||
- andrewsykim
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
@ -130,10 +130,8 @@ type Instances interface {
|
||||
// from the node whose nodeaddresses are being queried. i.e. local metadata
|
||||
// services cannot be used in this method to obtain nodeaddresses
|
||||
NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error)
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
ExternalID(ctx context.Context, nodeName types.NodeName) (string, error)
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
InstanceID(ctx context.Context, nodeName types.NodeName) (string, error)
|
||||
// InstanceType returns the type of the specified instance.
|
||||
InstanceType(ctx context.Context, name types.NodeName) (string, error)
|
||||
@ -148,6 +146,8 @@ type Instances interface {
|
||||
// InstanceExistsByProviderID returns true if the instance for the given provider id still is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error)
|
||||
// InstanceShutdownByProviderID returns true if the instance is shutdown in cloudprovider
|
||||
InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error)
|
||||
}
|
||||
|
||||
// Route is a representation of an advanced routing rule.
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
@ -33,8 +33,16 @@ type Factory func(config io.Reader) (Interface, error)
|
||||
|
||||
// All registered cloud providers.
|
||||
var (
|
||||
providersMutex sync.Mutex
|
||||
providers = make(map[string]Factory)
|
||||
providersMutex sync.Mutex
|
||||
providers = make(map[string]Factory)
|
||||
deprecatedCloudProviders = []struct {
|
||||
name string
|
||||
external bool
|
||||
detail string
|
||||
}{
|
||||
{"openstack", true, "https://github.com/kubernetes/cloud-provider-openstack"},
|
||||
{"photon", false, "The Photon Controller project is no longer maintained."},
|
||||
}
|
||||
)
|
||||
|
||||
const externalCloudProvider = "external"
|
||||
@ -95,6 +103,18 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, provider := range deprecatedCloudProviders {
|
||||
if provider.name == name {
|
||||
detail := provider.detail
|
||||
if provider.external {
|
||||
detail = fmt.Sprintf("Please use 'external' cloud provider for %s: %s", name, provider.detail)
|
||||
}
|
||||
glog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if configFilePath != "" {
|
||||
var config *os.File
|
||||
config, err = os.Open(configFilePath)
|
||||
|
91
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
91
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
@ -44,7 +44,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/kms"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
@ -430,7 +429,7 @@ type Volumes interface {
|
||||
// Attach the disk to the node with the specified NodeName
|
||||
// nodeName can be empty to mean "the instance on which we are running"
|
||||
// Returns the device (e.g. /dev/xvdf) where we attached the volume
|
||||
AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error)
|
||||
AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
|
||||
// Detach the disk from the node with the specified NodeName
|
||||
// nodeName can be empty to mean "the instance on which we are running"
|
||||
// Returns the device where the volume was attached
|
||||
@ -754,7 +753,7 @@ func (s *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*e
|
||||
for {
|
||||
response, err := s.ec2.DescribeInstances(request)
|
||||
if err != nil {
|
||||
recordAwsMetric("describe_instance", 0, err)
|
||||
recordAWSMetric("describe_instance", 0, err)
|
||||
return nil, fmt.Errorf("error listing AWS instances: %q", err)
|
||||
}
|
||||
|
||||
@ -769,7 +768,7 @@ func (s *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*e
|
||||
request.NextToken = nextToken
|
||||
}
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("describe_instance", timeTaken, nil)
|
||||
recordAWSMetric("describe_instance", timeTaken, nil)
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@ -787,7 +786,7 @@ func (s *awsSdkEC2) AttachVolume(request *ec2.AttachVolumeInput) (*ec2.VolumeAtt
|
||||
requestTime := time.Now()
|
||||
resp, err := s.ec2.AttachVolume(request)
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("attach_volume", timeTaken, err)
|
||||
recordAWSMetric("attach_volume", timeTaken, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -795,7 +794,7 @@ func (s *awsSdkEC2) DetachVolume(request *ec2.DetachVolumeInput) (*ec2.VolumeAtt
|
||||
requestTime := time.Now()
|
||||
resp, err := s.ec2.DetachVolume(request)
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("detach_volume", timeTaken, err)
|
||||
recordAWSMetric("detach_volume", timeTaken, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -808,8 +807,8 @@ func (s *awsSdkEC2) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.V
|
||||
response, err := s.ec2.DescribeVolumes(request)
|
||||
|
||||
if err != nil {
|
||||
recordAwsMetric("describe_volume", 0, err)
|
||||
return nil, fmt.Errorf("error listing AWS volumes: %q", err)
|
||||
recordAWSMetric("describe_volume", 0, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results = append(results, response.Volumes...)
|
||||
@ -821,7 +820,7 @@ func (s *awsSdkEC2) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.V
|
||||
request.NextToken = nextToken
|
||||
}
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("describe_volume", timeTaken, nil)
|
||||
recordAWSMetric("describe_volume", timeTaken, nil)
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@ -829,7 +828,7 @@ func (s *awsSdkEC2) CreateVolume(request *ec2.CreateVolumeInput) (*ec2.Volume, e
|
||||
requestTime := time.Now()
|
||||
resp, err := s.ec2.CreateVolume(request)
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("create_volume", timeTaken, err)
|
||||
recordAWSMetric("create_volume", timeTaken, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -837,7 +836,7 @@ func (s *awsSdkEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (*ec2.DeleteVol
|
||||
requestTime := time.Now()
|
||||
resp, err := s.ec2.DeleteVolume(request)
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("delete_volume", timeTaken, err)
|
||||
recordAWSMetric("delete_volume", timeTaken, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -845,7 +844,7 @@ func (s *awsSdkEC2) ModifyVolume(request *ec2.ModifyVolumeInput) (*ec2.ModifyVol
|
||||
requestTime := time.Now()
|
||||
resp, err := s.ec2.ModifyVolume(request)
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("modify_volume", timeTaken, err)
|
||||
recordAWSMetric("modify_volume", timeTaken, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -856,7 +855,7 @@ func (s *awsSdkEC2) DescribeVolumeModifications(request *ec2.DescribeVolumesModi
|
||||
for {
|
||||
resp, err := s.ec2.DescribeVolumesModifications(request)
|
||||
if err != nil {
|
||||
recordAwsMetric("describe_volume_modification", 0, err)
|
||||
recordAWSMetric("describe_volume_modification", 0, err)
|
||||
return nil, fmt.Errorf("error listing volume modifictions : %v", err)
|
||||
}
|
||||
results = append(results, resp.VolumesModifications...)
|
||||
@ -867,7 +866,7 @@ func (s *awsSdkEC2) DescribeVolumeModifications(request *ec2.DescribeVolumesModi
|
||||
request.NextToken = nextToken
|
||||
}
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("describe_volume_modification", timeTaken, nil)
|
||||
recordAWSMetric("describe_volume_modification", timeTaken, nil)
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@ -900,7 +899,7 @@ func (s *awsSdkEC2) CreateTags(request *ec2.CreateTagsInput) (*ec2.CreateTagsOut
|
||||
requestTime := time.Now()
|
||||
resp, err := s.ec2.CreateTags(request)
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
recordAwsMetric("create_tags", timeTaken, err)
|
||||
recordAWSMetric("create_tags", timeTaken, err)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -1147,7 +1146,7 @@ func (c *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {
|
||||
c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider")
|
||||
c.eventBroadcaster = record.NewBroadcaster()
|
||||
c.eventBroadcaster.StartLogging(glog.Infof)
|
||||
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.kubeClient.CoreV1().Events("")})
|
||||
c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "aws-cloud-provider"})
|
||||
}
|
||||
|
||||
@ -1318,24 +1317,6 @@ func (c *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string
|
||||
return extractNodeAddresses(instance)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified nodeName (deprecated).
|
||||
func (c *Cloud) ExternalID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
if c.selfAWSInstance.nodeName == nodeName {
|
||||
// We assume that if this is run on the instance itself, the instance exists and is alive
|
||||
return c.selfAWSInstance.awsID, nil
|
||||
}
|
||||
// We must verify that the instance still exists
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
instance, err := c.findInstanceByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if instance == nil {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return aws.StringValue(instance.InstanceId), nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
@ -1368,6 +1349,11 @@ func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified nodeName.
|
||||
func (c *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
// In the future it is possible to also return an endpoint as:
|
||||
@ -1377,6 +1363,10 @@ func (c *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string
|
||||
}
|
||||
inst, err := c.getInstanceByNodeName(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// The Instances interface requires that we return InstanceNotFound (without wrapping)
|
||||
return "", err
|
||||
}
|
||||
return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err)
|
||||
}
|
||||
return "/" + aws.StringValue(inst.Placement.AvailabilityZone) + "/" + aws.StringValue(inst.InstanceId), nil
|
||||
@ -1891,10 +1881,10 @@ func (d *awsDisk) deleteVolume() (bool, error) {
|
||||
request := &ec2.DeleteVolumeInput{VolumeId: d.awsID.awsString()}
|
||||
_, err := d.ec2.DeleteVolume(request)
|
||||
if err != nil {
|
||||
if isAWSErrorVolumeNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
if awsError.Code() == "InvalidVolume.NotFound" {
|
||||
return false, nil
|
||||
}
|
||||
if awsError.Code() == "VolumeInUse" {
|
||||
return false, volume.NewDeletedVolumeInUseError(err.Error())
|
||||
}
|
||||
@ -1970,7 +1960,7 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error {
|
||||
}
|
||||
|
||||
// AttachDisk implements Volumes.AttachDisk
|
||||
func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
disk, err := newAWSDisk(c, diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -1981,12 +1971,6 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName,
|
||||
return "", fmt.Errorf("error finding instance %s: %q", nodeName, err)
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
// TODO: We could enforce this when we mount the volume (?)
|
||||
// TODO: We could also snapshot the volume and attach copies of it
|
||||
return "", errors.New("AWS volumes cannot be mounted read-only")
|
||||
}
|
||||
|
||||
// mountDevice will hold the device where we should try to attach the disk
|
||||
var mountDevice mountDevice
|
||||
// alreadyAttached is true if we have already called AttachVolume on this disk
|
||||
@ -2266,6 +2250,10 @@ func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) {
|
||||
}
|
||||
available, err := c.checkIfAvailable(awsDisk, "deleting", "")
|
||||
if err != nil {
|
||||
if isAWSErrorVolumeNotFound(err) {
|
||||
glog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID)
|
||||
return false, nil
|
||||
}
|
||||
glog.Error(err)
|
||||
}
|
||||
|
||||
@ -3693,7 +3681,11 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m
|
||||
// We create instances with one SG
|
||||
// If users create multiple SGs, they must tag one of them as being k8s owned
|
||||
if len(tagged) != 1 {
|
||||
return nil, fmt.Errorf("Multiple tagged security groups found for instance %s; ensure only the k8s security group is tagged", instanceID)
|
||||
taggedGroups := ""
|
||||
for _, v := range tagged {
|
||||
taggedGroups += fmt.Sprintf("%s(%s) ", *v.GroupId, *v.GroupName)
|
||||
}
|
||||
return nil, fmt.Errorf("Multiple tagged security groups found for instance %s; ensure only the k8s security group is tagged; the tagged groups were %v", instanceID, taggedGroups)
|
||||
}
|
||||
return tagged[0], nil
|
||||
}
|
||||
@ -4338,12 +4330,3 @@ func setNodeDisk(
|
||||
}
|
||||
volumeMap[volumeID] = check
|
||||
}
|
||||
|
||||
func recordAwsMetric(actionName string, timeTaken float64, err error) {
|
||||
if err != nil {
|
||||
awsApiErrorMetric.With(prometheus.Labels{"request": actionName}).Inc()
|
||||
} else {
|
||||
awsApiMetric.With(prometheus.Labels{"request": actionName}).Observe(timeTaken)
|
||||
}
|
||||
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
@ -1027,10 +1027,10 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
|
||||
if elbProtocolsAreEqual(actual.InstanceProtocol, expected.InstanceProtocol) {
|
||||
continue
|
||||
}
|
||||
if orZero(actual.InstancePort) != orZero(expected.InstancePort) {
|
||||
if aws.Int64Value(actual.InstancePort) != aws.Int64Value(expected.InstancePort) {
|
||||
continue
|
||||
}
|
||||
if orZero(actual.LoadBalancerPort) != orZero(expected.LoadBalancerPort) {
|
||||
if aws.Int64Value(actual.LoadBalancerPort) != aws.Int64Value(expected.LoadBalancerPort) {
|
||||
continue
|
||||
}
|
||||
if awsArnEquals(actual.SSLCertificateId, expected.SSLCertificateId) {
|
||||
@ -1375,8 +1375,7 @@ func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescrip
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case "PolicyNotFound":
|
||||
// TODO change from string to `elb.ErrCodePolicyNotFoundException` once the AWS SDK is updated
|
||||
case elb.ErrCodePolicyNotFoundException:
|
||||
default:
|
||||
return fmt.Errorf("error describing security policies on load balancer: %q", err)
|
||||
}
|
||||
|
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_metrics.go
generated
vendored
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_metrics.go
generated
vendored
@ -18,23 +18,43 @@ package aws
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var awsApiMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_aws_api_request_duration_seconds",
|
||||
Help: "Latency of aws api call",
|
||||
},
|
||||
[]string{"request"},
|
||||
var (
|
||||
awsAPIMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_aws_api_request_duration_seconds",
|
||||
Help: "Latency of AWS API calls",
|
||||
},
|
||||
[]string{"request"})
|
||||
|
||||
awsAPIErrorMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_aws_api_request_errors",
|
||||
Help: "AWS API errors",
|
||||
},
|
||||
[]string{"request"})
|
||||
|
||||
awsAPIThrottlesMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_aws_api_throttled_requests_total",
|
||||
Help: "AWS API throttled requests",
|
||||
},
|
||||
[]string{"operation_name"})
|
||||
)
|
||||
|
||||
var awsApiErrorMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_aws_api_request_errors",
|
||||
Help: "AWS Api errors",
|
||||
},
|
||||
[]string{"request"},
|
||||
)
|
||||
func recordAWSMetric(actionName string, timeTaken float64, err error) {
|
||||
if err != nil {
|
||||
awsAPIErrorMetric.With(prometheus.Labels{"request": actionName}).Inc()
|
||||
} else {
|
||||
awsAPIMetric.With(prometheus.Labels{"request": actionName}).Observe(timeTaken)
|
||||
}
|
||||
}
|
||||
|
||||
func recordAWSThrottlesMetric(operation string) {
|
||||
awsAPIThrottlesMetric.With(prometheus.Labels{"operation_name": operation}).Inc()
|
||||
}
|
||||
|
||||
func registerMetrics() {
|
||||
prometheus.MustRegister(awsApiMetric)
|
||||
prometheus.MustRegister(awsApiErrorMetric)
|
||||
prometheus.MustRegister(awsAPIMetric)
|
||||
prometheus.MustRegister(awsAPIErrorMetric)
|
||||
prometheus.MustRegister(awsAPIThrottlesMetric)
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
@ -1319,6 +1319,29 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindSecurityGroupForInstance(t *testing.T) {
|
||||
groups := map[string]*ec2.SecurityGroup{"sg123": {GroupId: aws.String("sg123")}}
|
||||
id, err := findSecurityGroupForInstance(&ec2.Instance{SecurityGroups: []*ec2.GroupIdentifier{{GroupId: aws.String("sg123"), GroupName: aws.String("my_group")}}}, groups)
|
||||
if err != nil {
|
||||
t.Error()
|
||||
}
|
||||
assert.Equal(t, *id.GroupId, "sg123")
|
||||
assert.Equal(t, *id.GroupName, "my_group")
|
||||
}
|
||||
|
||||
func TestFindSecurityGroupForInstanceMultipleTagged(t *testing.T) {
|
||||
groups := map[string]*ec2.SecurityGroup{"sg123": {GroupId: aws.String("sg123")}}
|
||||
_, err := findSecurityGroupForInstance(&ec2.Instance{
|
||||
SecurityGroups: []*ec2.GroupIdentifier{
|
||||
{GroupId: aws.String("sg123"), GroupName: aws.String("my_group")},
|
||||
{GroupId: aws.String("sg123"), GroupName: aws.String("another_group")},
|
||||
},
|
||||
}, groups)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "sg123(my_group)")
|
||||
assert.Contains(t, err.Error(), "sg123(another_group)")
|
||||
}
|
||||
|
||||
func newMockedFakeAWSServices(id string) *FakeAWSServices {
|
||||
s := NewFakeAWSServices(id)
|
||||
s.ec2 = &MockedFakeEC2{FakeEC2Impl: s.ec2.(*FakeEC2Impl)}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go
generated
vendored
@ -42,9 +42,3 @@ func stringSetFromPointers(in []*string) sets.String {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// orZero returns the value, or 0 if the pointer is nil
|
||||
// Deprecated: prefer aws.Int64Value
|
||||
func orZero(v *int64) int64 {
|
||||
return aws.Int64Value(v)
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go
generated
vendored
@ -69,16 +69,19 @@ func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Return a user-friendly string describing the request, for use in log messages
|
||||
func describeRequest(r *request.Request) string {
|
||||
service := r.ClientInfo.ServiceName
|
||||
|
||||
// Return the operation name, for use in log messages and metrics
|
||||
func operationName(r *request.Request) string {
|
||||
name := "?"
|
||||
if r.Operation != nil {
|
||||
name = r.Operation.Name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
return service + "::" + name
|
||||
// Return a user-friendly string describing the request, for use in log messages
|
||||
func describeRequest(r *request.Request) string {
|
||||
service := r.ClientInfo.ServiceName
|
||||
return service + "::" + operationName(r)
|
||||
}
|
||||
|
||||
// Added to the AfterRetry chain; called after any error
|
||||
@ -92,6 +95,7 @@ func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) {
|
||||
}
|
||||
if awsError.Code() == "RequestLimitExceeded" {
|
||||
c.backoff.ReportError()
|
||||
recordAWSThrottlesMetric(operationName(r))
|
||||
glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)",
|
||||
describeRequest(r))
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go
generated
vendored
@ -138,10 +138,10 @@ func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool {
|
||||
for _, tag := range tags {
|
||||
tagKey := aws.StringValue(tag.Key)
|
||||
// For 1.6, we continue to recognize the legacy tags, for the 1.5 -> 1.6 upgrade
|
||||
if tagKey == TagNameKubernetesClusterLegacy {
|
||||
return aws.StringValue(tag.Value) == t.ClusterID
|
||||
// Note that we want to continue traversing tag list if we see a legacy tag with value != ClusterID
|
||||
if (tagKey == TagNameKubernetesClusterLegacy) && (aws.StringValue(tag.Value) == t.ClusterID) {
|
||||
return true
|
||||
}
|
||||
|
||||
if tagKey == clusterTagKey {
|
||||
return true
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
@ -42,11 +42,10 @@ go_library(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
@ -57,6 +56,7 @@ go_library(
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
@ -89,10 +89,9 @@ go_test(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
@ -43,7 +43,7 @@ type AzureAuthConfig struct {
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
}
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
@ -49,6 +49,14 @@ const (
|
||||
|
||||
vmTypeVMSS = "vmss"
|
||||
vmTypeStandard = "standard"
|
||||
|
||||
loadBalancerSkuBasic = "basic"
|
||||
loadBalancerSkuStandard = "standard"
|
||||
)
|
||||
|
||||
var (
|
||||
// Master nodes are not added to standard load balancer by default.
|
||||
defaultExcludeMasterFromStandardLB = true
|
||||
)
|
||||
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
@ -109,11 +117,15 @@ type Config struct {
|
||||
// Use instance metadata service where possible
|
||||
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
|
||||
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
// Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
||||
// If not set, it will be default to basic.
|
||||
LoadBalancerSku string `json:"loadBalancerSku" yaml:"loadBalancerSku"`
|
||||
// ExcludeMasterFromStandardLB excludes master nodes from standard load balancer.
|
||||
// If not set, it will be default to true.
|
||||
ExcludeMasterFromStandardLB *bool `json:"excludeMasterFromStandardLB" yaml:"excludeMasterFromStandardLB"`
|
||||
|
||||
// Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"`
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount" yaml:"maximumLoadBalancerRuleCount"`
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
@ -160,6 +172,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.VMType == "" {
|
||||
// default to standard vmType if not set.
|
||||
config.VMType = vmTypeStandard
|
||||
}
|
||||
|
||||
env, err := auth.ParseAzureEnvironment(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -206,7 +223,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
}
|
||||
|
||||
// Do not add master nodes to standard LB by default.
|
||||
if config.ExcludeMasterFromStandardLB == nil {
|
||||
config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB
|
||||
}
|
||||
|
||||
azClientConfig := &azClientConfig{
|
||||
|
221
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
221
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
@ -20,14 +20,14 @@ import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// requestBackoff if backoff is disabled in cloud provider it
|
||||
@ -51,11 +51,14 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
var retryErr error
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
machine, retryErr = az.getVirtualMachine(name)
|
||||
if retryErr == cloudprovider.InstanceNotFound {
|
||||
return true, cloudprovider.InstanceNotFound
|
||||
}
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Info("backoff: success")
|
||||
glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
@ -68,10 +71,11 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) {
|
||||
allNodes := []compute.VirtualMachine{}
|
||||
var result compute.VirtualMachineListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -85,57 +89,34 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allNodes = append(allNodes, *result.Value...)
|
||||
appendResults = false
|
||||
// follow the next link to get all the vms for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.VirtualMachinesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup, retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("VirtualMachinesClient.ListNextResults(%v): success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allNodes, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allNodes, err
|
||||
}
|
||||
|
||||
// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
|
||||
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) {
|
||||
var ip string
|
||||
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) {
|
||||
var ip, publicIP string
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ip, retryErr = az.getIPForMachine(name)
|
||||
ip, publicIP, retryErr = az.getIPForMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Info("backoff: success")
|
||||
glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
return ip, err
|
||||
return ip, publicIP, err
|
||||
}
|
||||
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.nsgCache.Delete(*sg.Name)
|
||||
@ -147,11 +128,12 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
@ -162,12 +144,14 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
|
||||
// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
|
||||
func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
allLBs := []network.LoadBalancer{}
|
||||
var result network.LoadBalancerListResult
|
||||
var allLBs []network.LoadBalancer
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -181,42 +165,19 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allLBs = append(allLBs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the vms for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.LoadBalancerClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("LoadBalancerClient.ListNextResults(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allLBs, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allLBs, nil
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in the given resource group
|
||||
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
allPIPs := []network.PublicIPAddress{}
|
||||
var result network.PublicIPAddressListResult
|
||||
var allPIPs []network.PublicIPAddress
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.PublicIPAddressesClient.List(pipResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
pipResourceGroup,
|
||||
@ -230,74 +191,52 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allPIPs = append(allPIPs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the pip resources for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, retryErr = az.PublicIPAddressesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allPIPs, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allPIPs, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(pipResourceGroup, *pip.Name, pip, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.PublicIPAddressesClient.Delete(pipResourceGroup, pipName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
return processRetryResponse(resp, err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
done, err := processRetryResponse(resp, err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after deleting
|
||||
az.lbCache.Delete(lbName)
|
||||
@ -309,48 +248,52 @@ func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
// CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
return processRetryResponse(resp.Response, err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
|
||||
return processRetryResponse(resp, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.ResourceGroup, vmName, newVM)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters computepreview.VirtualMachineScaleSetVM) error {
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
@ -361,11 +304,11 @@ func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName st
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
glog.V(2).Infof("backoff: success, HTTP response=%d", resp.StatusCode)
|
||||
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetryAPIRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
@ -418,7 +361,7 @@ func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
azstorage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
@ -277,7 +277,10 @@ func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error)
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(ctx, c.common.resourceGroup, SAName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -432,7 +435,9 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(c.common.resourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(ctx, c.common.resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -484,12 +489,12 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
_, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel)
|
||||
err := <-errChan
|
||||
_, err := c.common.cloud.StorageAccountClient.Create(ctx, c.common.resourceGroup, storageAccountName, cp)
|
||||
if err != nil {
|
||||
return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err))
|
||||
}
|
||||
@ -584,7 +589,9 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
account, err := c.common.cloud.StorageAccountClient.GetProperties(ctx, c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
671
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
671
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
230
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
230
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
@ -20,10 +20,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -56,119 +58,11 @@ type controllerCommon struct {
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
// 1. vmType is standard, attach with availabilitySet.AttachDisk.
|
||||
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
|
||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) {
|
||||
// 1. vmType is standard, return cloud.vmSet directly.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then attach with availabilitySet.AttachDisk.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, attach with scaleSet.AttachDisk.
|
||||
return ss.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
// 1. vmType is standard, detach with availabilitySet.DetachDiskByName.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then detach with availabilitySet.DetachDiskByName.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, detach with scaleSet.DetachDiskByName.
|
||||
return ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetDiskLun.
|
||||
return ss.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetNextDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetNextDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetNextDiskLun.
|
||||
return ss.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
// 1. vmType is standard, check with availabilitySet.DisksAreAttached.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DisksAreAttached(diskNames, nodeName)
|
||||
return c.cloud.vmSet, nil
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
@ -177,16 +71,118 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
|
||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then check with availabilitySet.DisksAreAttached.
|
||||
// 3. If the node is managed by availability set, then return ss.availabilitySet.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DisksAreAttached(diskNames, nodeName)
|
||||
return ss.availabilitySet, nil
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, check with scaleSet.DisksAreAttached.
|
||||
return ss.DisksAreAttached(diskNames, nodeName)
|
||||
// 4. Node is managed by vmss
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return vmset.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
|
||||
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vmset.GetDataDisks(nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("all luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
@ -20,11 +20,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
@ -70,10 +69,10 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
@ -135,10 +134,10 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
@ -156,71 +155,16 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (as *availabilitySet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return nil, err
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
|
||||
if vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (as *availabilitySet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (as *availabilitySet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
} else if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
||||
|
100
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
100
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
@ -20,12 +20,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
@ -36,27 +34,30 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
disks := []compute.DataDisk{}
|
||||
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
|
||||
disks = *vm.StorageProfile.DataDisks
|
||||
}
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
Caching: compute.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &computepreview.ManagedDiskParameters{
|
||||
ManagedDisk: &compute.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &computepreview.VirtualHardDisk{
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
Caching: compute.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
@ -97,7 +98,10 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
disks := []compute.DataDisk{}
|
||||
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
|
||||
disks = *vm.StorageProfile.DataDisks
|
||||
}
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
@ -139,76 +143,16 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (ss *scaleSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
if vm.StorageProfile == nil || vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (ss *scaleSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (ss *scaleSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
||||
|
571
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
571
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
File diff suppressed because it is too large
Load Diff
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -18,7 +18,6 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@ -31,9 +30,39 @@ import (
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ip},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
if len(publicIP) > 0 {
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: publicIP,
|
||||
})
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not local instance, get addresses from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
ipAddress := IPAddress{}
|
||||
err := az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
err = az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -51,16 +80,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
ip, err := az.GetIPForMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff", name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ip},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}, nil
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
@ -75,11 +95,6 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
|
||||
return az.NodeAddresses(ctx, name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (az *Cloud) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return az.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
@ -99,6 +114,11 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
@ -146,6 +166,10 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
}
|
||||
ssName, instanceID, err := extractVmssVMName(metadataName)
|
||||
if err != nil {
|
||||
if err == ErrorNotVmssInstance {
|
||||
// Compose machineID for standard Node.
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
@ -191,7 +215,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return fmt.Errorf("not supported")
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
|
182
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
182
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -27,7 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -63,14 +64,24 @@ const (
|
||||
// to specify that the service should be exposed using an Azure security rule
|
||||
// that may be shared with other service, trading specificity of rules for an
|
||||
// increase in the number of services that can be exposed. This relies on the
|
||||
// Azure "augmented security rules" feature which at the time of writing is in
|
||||
// preview and available only in certain regions.
|
||||
// Azure "augmented security rules" feature.
|
||||
ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule"
|
||||
|
||||
// ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service
|
||||
// to specify the resource group of load balancer objects that are not in the same resource group as the cluster.
|
||||
ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group"
|
||||
|
||||
// ServiceAnnotationAllowedServiceTag is the annotation used on the service
|
||||
// to specify a list of allowed service tags separated by comma
|
||||
ServiceAnnotationAllowedServiceTag = "service.beta.kubernetes.io/azure-allowed-service-tags"
|
||||
)
|
||||
|
||||
// ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service
|
||||
// to specify the resource group of load balancer objects that are not in the same resource group as the cluster.
|
||||
const ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group"
|
||||
var (
|
||||
// supportedServiceTags holds a list of supported service tags on Azure.
|
||||
// Refer https://docs.microsoft.com/en-us/azure/virtual-network/security-overview#service-tags for more information.
|
||||
supportedServiceTags = sets.NewString("VirtualNetwork", "VIRTUAL_NETWORK", "AzureLoadBalancer", "AZURE_LOADBALANCER",
|
||||
"Internet", "INTERNET", "AzureTrafficManager", "Storage", "Sql")
|
||||
)
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// if so, what its status is.
|
||||
@ -124,7 +135,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
if lbStatus != nil && len(lbStatus.Ingress) > 0 {
|
||||
serviceIP = &lbStatus.Ingress[0].IP
|
||||
}
|
||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancer for %s with IP %s, wantLb = true", service.Name, logSafe(serviceIP))
|
||||
glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -154,7 +165,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancerDeleted for %s with IP %s, wantLb = false", service.Name, serviceIPToCleanup)
|
||||
glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -189,7 +200,8 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
|
||||
// check if the service already has a load balancer
|
||||
if existingLBs != nil {
|
||||
for _, existingLB := range existingLBs {
|
||||
for i := range existingLBs {
|
||||
existingLB := existingLBs[i]
|
||||
if strings.EqualFold(*existingLB.Name, defaultLBName) {
|
||||
defaultLB = &existingLB
|
||||
}
|
||||
@ -209,8 +221,14 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
}
|
||||
}
|
||||
|
||||
// service does not have a load balancer, select one
|
||||
if wantLb {
|
||||
hasMode, _, _ := getServiceLoadBalancerMode(service)
|
||||
if az.useStandardLoadBalancer() && hasMode {
|
||||
return nil, nil, false, fmt.Errorf("standard load balancer doesn't work with annotation %q", ServiceAnnotationLoadBalancerMode)
|
||||
}
|
||||
|
||||
// service does not have a basic load balancer, select one.
|
||||
// Standard load balancer doesn't need this because all backends nodes should be added to same LB.
|
||||
if wantLb && !az.useStandardLoadBalancer() {
|
||||
// select new load balancer for service
|
||||
selectedLB, exists, err := az.selectLoadBalancer(clusterName, service, &existingLBs, nodes)
|
||||
if err != nil {
|
||||
@ -227,6 +245,11 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
Location: &az.Location,
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{},
|
||||
}
|
||||
if az.useStandardLoadBalancer() {
|
||||
defaultLB.Sku = &network.LoadBalancerSku{
|
||||
Name: network.LoadBalancerSkuNameStandard,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return defaultLB, nil, false, nil
|
||||
@ -239,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
@ -295,10 +318,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
|
||||
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
||||
if lb == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus lb is nil")
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
|
||||
return nil, nil
|
||||
}
|
||||
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil")
|
||||
return nil, nil
|
||||
}
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
@ -330,6 +354,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", *lbIP, lbFrontendIPConfigName, serviceName)
|
||||
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil
|
||||
}
|
||||
}
|
||||
@ -416,8 +441,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
DomainNameLabel: &domainNameLabel,
|
||||
}
|
||||
}
|
||||
pip.Tags = &map[string]*string{"service": &serviceName}
|
||||
glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
pip.Tags = map[string]*string{"service": &serviceName}
|
||||
if az.useStandardLoadBalancer() {
|
||||
pip.Sku = &network.PublicIPAddressSku{
|
||||
Name: network.PublicIPAddressSkuNameStandard,
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
||||
if err != nil {
|
||||
@ -426,7 +457,9 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
}
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name)
|
||||
|
||||
pip, err = az.PublicIPAddressesClient.Get(pipResourceGroup, *pip.Name, "")
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
pip, err = az.PublicIPAddressesClient.Get(ctx, pipResourceGroup, *pip.Name, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -441,13 +474,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(2).Infof("reconcileLoadBalancer(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb)
|
||||
if err != nil {
|
||||
glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
|
||||
return nil, err
|
||||
}
|
||||
lbName := *lb.Name
|
||||
glog.V(2).Infof("reconcileLoadBalancer(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
|
||||
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
||||
lbBackendPoolName := getBackendPoolName(clusterName)
|
||||
@ -465,18 +499,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
foundBackendPool := false
|
||||
for _, bp := range newBackendPools {
|
||||
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
foundBackendPool = true
|
||||
break
|
||||
} else {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
}
|
||||
}
|
||||
if !foundBackendPool {
|
||||
newBackendPools = append(newBackendPools, network.BackendAddressPool{
|
||||
Name: to.StringPtr(lbBackendPoolName),
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
|
||||
dirtyLb = true
|
||||
lb.BackendAddressPools = &newBackendPools
|
||||
@ -494,7 +528,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@ -504,7 +538,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@ -568,7 +602,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
Name: to.StringPtr(lbFrontendIPConfigName),
|
||||
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
@ -669,15 +703,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
||||
existingProbe := updatedProbes[i]
|
||||
if serviceOwnsRule(service, *existingProbe.Name) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe := false
|
||||
if findProbe(expectedProbes, existingProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe = true
|
||||
}
|
||||
if !keepProbe {
|
||||
updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
dirtyProbes = true
|
||||
}
|
||||
}
|
||||
@ -686,11 +720,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedProbe := range expectedProbes {
|
||||
foundProbe := false
|
||||
if findProbe(updatedProbes, expectedProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
foundProbe = true
|
||||
}
|
||||
if !foundProbe {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
updatedProbes = append(updatedProbes, expectedProbe)
|
||||
dirtyProbes = true
|
||||
}
|
||||
@ -711,13 +745,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
existingRule := updatedRules[i]
|
||||
if serviceOwnsRule(service, *existingRule.Name) {
|
||||
keepRule := false
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
if findRule(expectedRules, existingRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule = true
|
||||
}
|
||||
if !keepRule {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||
dirtyRules = true
|
||||
}
|
||||
@ -727,11 +761,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedRule := range expectedRules {
|
||||
foundRule := false
|
||||
if findRule(updatedRules, expectedRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
foundRule = true
|
||||
}
|
||||
if !foundRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
updatedRules = append(updatedRules, expectedRule)
|
||||
dirtyRules = true
|
||||
}
|
||||
@ -748,12 +782,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
|
||||
// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
|
||||
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
||||
glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||
if err != nil {
|
||||
glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err)
|
||||
return nil, err
|
||||
@ -761,18 +795,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||
|
||||
// Remove the LB.
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName)
|
||||
glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName)
|
||||
err = az.DeleteLBWithRetry(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
} else {
|
||||
glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
|
||||
err := az.CreateOrUpdateLBWithRetry(*lb)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -780,7 +814,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// Refresh updated lb which will be used later in other places.
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("getAzureLoadBalancer(%s) failed: %v", lbName, err)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
@ -794,13 +828,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if wantLb && nodes != nil {
|
||||
// Add the machines to the backend pool if they're not already
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName)
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
@ -839,8 +873,12 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceTags, err := getServiceTags(service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sourceAddressPrefixes []string
|
||||
if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) {
|
||||
if (sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 {
|
||||
if !requiresInternalLoadBalancer(service) {
|
||||
sourceAddressPrefixes = []string{"Internet"}
|
||||
}
|
||||
@ -848,6 +886,9 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
for _, ip := range sourceRanges {
|
||||
sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String())
|
||||
}
|
||||
for _, serviceTag := range serviceTags {
|
||||
sourceAddressPrefixes = append(sourceAddressPrefixes, serviceTag)
|
||||
}
|
||||
}
|
||||
expectedSecurityRules := []network.SecurityRule{}
|
||||
|
||||
@ -885,8 +926,8 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
// update security rules
|
||||
dirtySg := false
|
||||
var updatedRules []network.SecurityRule
|
||||
if sg.SecurityRules != nil {
|
||||
updatedRules = *sg.SecurityRules
|
||||
if sg.SecurityGroupPropertiesFormat != nil && sg.SecurityGroupPropertiesFormat.SecurityRules != nil {
|
||||
updatedRules = *sg.SecurityGroupPropertiesFormat.SecurityRules
|
||||
}
|
||||
|
||||
for _, r := range updatedRules {
|
||||
@ -987,7 +1028,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
|
||||
if dirtySg {
|
||||
sg.SecurityRules = &updatedRules
|
||||
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
err := az.CreateOrUpdateSGWithRetry(sg)
|
||||
if err != nil {
|
||||
@ -1167,15 +1208,15 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
|
||||
for _, pip := range pips {
|
||||
if pip.Tags != nil &&
|
||||
(*pip.Tags)["service"] != nil &&
|
||||
*(*pip.Tags)["service"] == serviceName {
|
||||
(pip.Tags)["service"] != nil &&
|
||||
*(pip.Tags)["service"] == serviceName {
|
||||
// We need to process for pips belong to this service
|
||||
pipName := *pip.Name
|
||||
if wantLb && !isInternal && pipName == desiredPipName {
|
||||
// This is the only case we should preserve the
|
||||
// Public ip resource with match service tag
|
||||
} else {
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
@ -1189,7 +1230,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - finished", serviceName, pipName)
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1218,13 +1259,30 @@ func findProbe(probes []network.Probe, probe network.Probe) bool {
|
||||
|
||||
func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool {
|
||||
for _, existingRule := range rules {
|
||||
if strings.EqualFold(*existingRule.Name, *rule.Name) {
|
||||
if strings.EqualFold(*existingRule.Name, *rule.Name) &&
|
||||
equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// equalLoadBalancingRulePropertiesFormat checks whether the provided LoadBalancingRulePropertiesFormat are equal.
|
||||
// Note: only fields used in reconcileLoadBalancer are considered.
|
||||
func equalLoadBalancingRulePropertiesFormat(s, t *network.LoadBalancingRulePropertiesFormat) bool {
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(s.Protocol, t.Protocol) &&
|
||||
reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) &&
|
||||
reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) &&
|
||||
reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) &&
|
||||
reflect.DeepEqual(s.FrontendPort, t.FrontendPort) &&
|
||||
reflect.DeepEqual(s.BackendPort, t.BackendPort) &&
|
||||
reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP)
|
||||
}
|
||||
|
||||
// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction.
|
||||
// Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined.
|
||||
// We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal,
|
||||
@ -1320,3 +1378,23 @@ func useSharedSecurityRule(service *v1.Service) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getServiceTags(service *v1.Service) ([]string, error) {
|
||||
if serviceTags, found := service.Annotations[ServiceAnnotationAllowedServiceTag]; found {
|
||||
tags := strings.Split(strings.TrimSpace(serviceTags), ",")
|
||||
for _, tag := range tags {
|
||||
// Storage and Sql service tags support setting regions with suffix ".Region"
|
||||
if strings.HasPrefix(tag, "Storage.") || strings.HasPrefix(tag, "Sql.") {
|
||||
continue
|
||||
}
|
||||
|
||||
if !supportedServiceTags.Has(tag) {
|
||||
return nil, fmt.Errorf("only %q are allowed in service tags", supportedServiceTags.List())
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
115
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
115
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -97,3 +97,116 @@ func TestFindProbe(t *testing.T) {
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindRule(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
existingRule []network.LoadBalancingRule
|
||||
curRule network.LoadBalancingRule
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
msg: "empty existing rules should return false",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe1"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe2"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while frontend ports unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while backend ports unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while LoadDistribution unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("probe1"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
LoadDistribution: network.Default,
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("probe2"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "both rule names and LoadBalancingRulePropertiesFormats match should return true",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("matchName"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("matchName"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
findResult := findRule(test.existingRule, test.curRule)
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
|
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
@ -17,13 +17,17 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
//ManagedDiskController : managed disk controller struct
|
||||
@ -36,7 +40,8 @@ func newManagedDiskController(common *controllerCommon) (*ManagedDiskController,
|
||||
}
|
||||
|
||||
//CreateManagedDisk : create managed disk
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) {
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, resourceGroup string,
|
||||
sizeGB int, tags map[string]string) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
|
||||
newTags := make(map[string]*string)
|
||||
@ -54,18 +59,24 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
}
|
||||
|
||||
diskSizeGB := int32(sizeGB)
|
||||
model := disk.Model{
|
||||
model := compute.Disk{
|
||||
Location: &c.common.location,
|
||||
Tags: &newTags,
|
||||
Properties: &disk.Properties{
|
||||
AccountType: disk.StorageAccountTypes(storageAccountType),
|
||||
Tags: newTags,
|
||||
Sku: &compute.DiskSku{
|
||||
Name: compute.StorageAccountTypes(storageAccountType),
|
||||
},
|
||||
DiskProperties: &compute.DiskProperties{
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &disk.CreationData{CreateOption: disk.Empty},
|
||||
CreationData: &compute.CreationData{CreateOption: compute.Empty},
|
||||
}}
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
|
||||
if resourceGroup == "" {
|
||||
resourceGroup = c.common.resourceGroup
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, model)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -73,7 +84,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisionState, id, err := c.getDisk(diskName)
|
||||
provisionState, id, err := c.getDisk(resourceGroup, diskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
@ -99,10 +110,15 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
//DeleteManagedDisk : delete managed disk
|
||||
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
diskName := path.Base(diskURI)
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
_, err = c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -115,15 +131,73 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
}
|
||||
|
||||
// return: disk provisionState, diskID, error
|
||||
func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) {
|
||||
result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName)
|
||||
func (c *ManagedDiskController) getDisk(resourceGroup, diskName string) (string, string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if result.Properties != nil && (*result.Properties).ProvisioningState != nil {
|
||||
return *(*result.Properties).ProvisioningState, *result.ID, nil
|
||||
if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil {
|
||||
return *(*result.DiskProperties).ProvisioningState, *result.ID, nil
|
||||
}
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// ResizeDisk Expand the disk to new size
|
||||
func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
diskName := path.Base(diskURI)
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil {
|
||||
return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName)
|
||||
}
|
||||
|
||||
requestBytes := newSize.Value()
|
||||
// Azure resizes in chunks of GiB (not GB)
|
||||
requestGiB := int32(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
||||
|
||||
glog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
|
||||
// If disk already of greater or equal size than requested we return
|
||||
if *result.DiskProperties.DiskSizeGB >= requestGiB {
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
result.DiskProperties.DiskSizeGB = &requestGiB
|
||||
|
||||
ctx, cancel = getContextWithCancel()
|
||||
defer cancel()
|
||||
if _, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
|
||||
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
// get resource group name from a managed disk URI, e.g. return {group-name} according to
|
||||
// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}
|
||||
// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get
|
||||
func getResourceGroupFromDiskURI(diskURI string) (string, error) {
|
||||
fields := strings.Split(diskURI, "/")
|
||||
if len(fields) != 9 || fields[3] != "resourceGroups" {
|
||||
return "", fmt.Errorf("invalid disk URI: %s", diskURI)
|
||||
}
|
||||
return fields[4], nil
|
||||
}
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -30,7 +30,7 @@ import (
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
||||
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
}
|
||||
@ -50,7 +50,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.AddressPrefix
|
||||
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
|
||||
glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
@ -60,13 +60,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(10).Info("list: FINISH")
|
||||
glog.V(10).Info("ListRoutes: FINISH")
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
return nil
|
||||
@ -81,17 +81,17 @@ func (az *Cloud) createRouteTable() error {
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -107,11 +107,11 @@ func (az *Cloud) createRouteTable() error {
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
}
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
targetIP, _, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,51 +126,51 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) {
|
||||
glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.DeleteRouteWithRetry(routeName)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("delete abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
@ -94,10 +94,8 @@ func TestCreateRoute(t *testing.T) {
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
|
||||
nodeIP := "2.4.6.8"
|
||||
fakeVM.NodeToIP = map[string]map[string]string{
|
||||
"": {
|
||||
"node": nodeIP,
|
||||
},
|
||||
fakeVM.NodeToIP = map[string]string{
|
||||
"node": nodeIP,
|
||||
}
|
||||
|
||||
err := cloud.CreateRoute(context.TODO(), "cluster", "unused", &route)
|
||||
|
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
@ -28,8 +28,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -52,13 +52,15 @@ const (
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
nicFailedState = "Failed"
|
||||
|
||||
storageAccountNameMaxLength = 24
|
||||
)
|
||||
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
||||
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
@ -123,7 +125,7 @@ func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) {
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
@ -172,7 +174,7 @@ func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.Transpor
|
||||
securityProto = network.SecurityRuleProtocolUDP
|
||||
return &transportProto, &securityProto, nil, nil
|
||||
default:
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers")
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers")
|
||||
}
|
||||
|
||||
}
|
||||
@ -284,11 +286,11 @@ outer:
|
||||
return smallest, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
|
||||
return -1, fmt.Errorf("securityGroup priorities are exhausted")
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
|
||||
return az.vmSet.GetIPByNodeName(string(nodeName), "")
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
||||
return az.vmSet.GetIPByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
var polyTable = crc32.MakeTable(crc32.Koopman)
|
||||
@ -366,12 +368,15 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
var err error
|
||||
|
||||
machine, err = as.getVirtualMachine(types.NodeName(name))
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
@ -396,7 +401,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -424,21 +429,37 @@ func (as *availabilitySet) GetPrimaryVMSetName() string {
|
||||
return as.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (as *availabilitySet) GetIPByNodeName(name, vmSetName string) (string, error) {
|
||||
nic, err := as.GetPrimaryInterface(name, vmSetName)
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) {
|
||||
nic, err := as.GetPrimaryInterface(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", err
|
||||
glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
privateIP := *ipConfig.PrivateIPAddress
|
||||
publicIP := ""
|
||||
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
|
||||
pipID := *ipConfig.PublicIPAddress.ID
|
||||
pipName, err := getLastSegment(pipID)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
|
||||
}
|
||||
pip, existsPip, err := as.getPublicIPAddress(as.ResourceGroup, pipName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if existsPip {
|
||||
publicIP = *pip.IPAddress
|
||||
}
|
||||
}
|
||||
|
||||
return privateIP, publicIP, nil
|
||||
}
|
||||
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
@ -534,8 +555,13 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
// GetPrimaryInterface gets machine primary network interface by node name.
|
||||
func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
|
||||
}
|
||||
|
||||
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
|
||||
@ -553,8 +579,14 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set
|
||||
if vmSetName != "" {
|
||||
// Check availability set name. Note that vmSetName is empty string when getting
|
||||
// the Node's IP address. While vmSetName is not empty, it should be checked with
|
||||
// Node's real availability set name:
|
||||
// - For basic SKU load balancer, errNotInVMSet should be returned if the node's
|
||||
// availability set is mismatched with vmSetName.
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !as.useStandardLoadBalancer() {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
@ -563,7 +595,9 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw
|
||||
}
|
||||
}
|
||||
|
||||
nic, err := as.InterfacesClient.Get(as.ResourceGroup, nicName, "")
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := as.InterfacesClient.Get(ctx, as.ResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
@ -573,9 +607,9 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (netw
|
||||
|
||||
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string) error {
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nic, err := as.GetPrimaryInterface(vmName, vmSetName)
|
||||
nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
|
||||
if err != nil {
|
||||
if err == errNotInVMSet {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
|
||||
@ -586,6 +620,11 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
return err
|
||||
}
|
||||
|
||||
if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because its primdary nic %s is in Failed state", nodeName, nic.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
@ -604,6 +643,24 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
if as.useStandardLoadBalancer() && len(newBackendPools) > 0 {
|
||||
// Although standard load balancer supports backends from multiple availability
|
||||
// sets, the same network interface couldn't be added to more than one load balancer of
|
||||
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
|
||||
// about this.
|
||||
for _, pool := range newBackendPools {
|
||||
backendPool := *pool.ID
|
||||
matches := backendPoolIDRE.FindStringSubmatch(backendPool)
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
@ -613,11 +670,11 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
|
||||
nicName := *nic.Name
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
respChan, errChan := as.InterfacesClient.CreateOrUpdate(as.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
if retryErr != nil {
|
||||
@ -634,18 +691,23 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
hostUpdates := make([]func() error, len(nodes))
|
||||
for i, node := range nodes {
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
hostUpdates := make([]func() error, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
|
||||
continue
|
||||
}
|
||||
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName)
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates[i] = f
|
||||
hostUpdates = append(hostUpdates, f)
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
@ -657,7 +719,7 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
// Do nothing for availability set.
|
||||
return nil
|
||||
}
|
||||
|
87
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
87
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
@ -130,6 +130,7 @@ func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
cases := []struct {
|
||||
description string
|
||||
lbName string
|
||||
useStandardLB bool
|
||||
clusterName string
|
||||
expectedVMSet string
|
||||
}{
|
||||
@ -160,7 +161,93 @@ func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if c.useStandardLB {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
vmset := az.mapLoadBalancerNameToVMSet(c.lbName, c.clusterName)
|
||||
assert.Equal(t, c.expectedVMSet, vmset, c.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancerName(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
cases := []struct {
|
||||
description string
|
||||
vmSet string
|
||||
isInternal bool
|
||||
useStandardLB bool
|
||||
clusterName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
description: "default external LB should get primary vmset",
|
||||
vmSet: "primary",
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "default internal LB should get primary vmset",
|
||||
vmSet: "primary",
|
||||
clusterName: "azure",
|
||||
isInternal: true,
|
||||
expected: "azure-internal",
|
||||
},
|
||||
{
|
||||
description: "non-default external LB should get its own vmset",
|
||||
vmSet: "as",
|
||||
clusterName: "azure",
|
||||
expected: "as",
|
||||
},
|
||||
{
|
||||
description: "non-default internal LB should get its own vmset",
|
||||
vmSet: "as",
|
||||
clusterName: "azure",
|
||||
isInternal: true,
|
||||
expected: "as-internal",
|
||||
},
|
||||
{
|
||||
description: "default standard external LB should get cluster name",
|
||||
vmSet: "primary",
|
||||
useStandardLB: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "default standard internal LB should get cluster name",
|
||||
vmSet: "primary",
|
||||
useStandardLB: true,
|
||||
isInternal: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure-internal",
|
||||
},
|
||||
{
|
||||
description: "non-default standard external LB should get cluster-name",
|
||||
vmSet: "as",
|
||||
useStandardLB: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "non-default standard internal LB should get cluster-name",
|
||||
vmSet: "as",
|
||||
useStandardLB: true,
|
||||
isInternal: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure-internal",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if c.useStandardLB {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
loadbalancerName := az.getLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
|
||||
assert.Equal(t, c.expected, loadbalancerName, c.description)
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
@ -19,7 +19,7 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
@ -19,7 +19,7 @@ package azure
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
)
|
||||
|
||||
func TestCreateFileShare(t *testing.T) {
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -31,7 +31,9 @@ type accountWithLocation struct {
|
||||
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -60,7 +62,10 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := az.StorageAccountClient.ListKeys(ctx, az.ResourceGroup, account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -107,13 +112,16 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
|
||||
Kind: storage.StorageV2,
|
||||
AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{EnableHTTPSTrafficOnly: to.BoolPtr(true)},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel)
|
||||
err := <-errchan
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := az.StorageAccountClient.Create(ctx, az.ResourceGroup, accountName, cp)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
)
|
||||
|
||||
func TestGetStorageAccessKeys(t *testing.T) {
|
||||
|
179
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
179
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -34,13 +35,88 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var testClusterName = "testCluster"
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
azureConfig := `{
|
||||
"aadClientCertPassword": "aadClientCertPassword",
|
||||
"aadClientCertPath": "aadClientCertPath",
|
||||
"aadClientId": "aadClientId",
|
||||
"aadClientSecret": "aadClientSecret",
|
||||
"cloud":"AzurePublicCloud",
|
||||
"cloudProviderBackoff": true,
|
||||
"cloudProviderBackoffDuration": 1,
|
||||
"cloudProviderBackoffExponent": 1,
|
||||
"cloudProviderBackoffJitter": 1,
|
||||
"cloudProviderBackoffRetries": 1,
|
||||
"cloudProviderRatelimit": true,
|
||||
"cloudProviderRateLimitBucket": 1,
|
||||
"CloudProviderRateLimitBucketWrite": 1,
|
||||
"cloudProviderRateLimitQPS": 1,
|
||||
"CloudProviderRateLimitQPSWrite": 1,
|
||||
"location": "location",
|
||||
"maximumLoadBalancerRuleCount": 1,
|
||||
"primaryAvailabilitySetName": "primaryAvailabilitySetName",
|
||||
"primaryScaleSetName": "primaryScaleSetName",
|
||||
"resourceGroup": "resourceGroup",
|
||||
"routeTableName": "routeTableName",
|
||||
"securityGroupName": "securityGroupName",
|
||||
"subnetName": "subnetName",
|
||||
"subscriptionId": "subscriptionId",
|
||||
"tenantId": "tenantId",
|
||||
"useInstanceMetadata": true,
|
||||
"useManagedIdentityExtension": true,
|
||||
"vnetName": "vnetName",
|
||||
"vnetResourceGroup": "vnetResourceGroup",
|
||||
vmType: "standard"
|
||||
}`
|
||||
expected := &Config{
|
||||
AzureAuthConfig: auth.AzureAuthConfig{
|
||||
AADClientCertPassword: "aadClientCertPassword",
|
||||
AADClientCertPath: "aadClientCertPath",
|
||||
AADClientID: "aadClientId",
|
||||
AADClientSecret: "aadClientSecret",
|
||||
Cloud: "AzurePublicCloud",
|
||||
SubscriptionID: "subscriptionId",
|
||||
TenantID: "tenantId",
|
||||
UseManagedIdentityExtension: true,
|
||||
},
|
||||
CloudProviderBackoff: true,
|
||||
CloudProviderBackoffDuration: 1,
|
||||
CloudProviderBackoffExponent: 1,
|
||||
CloudProviderBackoffJitter: 1,
|
||||
CloudProviderBackoffRetries: 1,
|
||||
CloudProviderRateLimit: true,
|
||||
CloudProviderRateLimitBucket: 1,
|
||||
CloudProviderRateLimitBucketWrite: 1,
|
||||
CloudProviderRateLimitQPS: 1,
|
||||
CloudProviderRateLimitQPSWrite: 1,
|
||||
Location: "location",
|
||||
MaximumLoadBalancerRuleCount: 1,
|
||||
PrimaryAvailabilitySetName: "primaryAvailabilitySetName",
|
||||
PrimaryScaleSetName: "primaryScaleSetName",
|
||||
ResourceGroup: "resourceGroup",
|
||||
RouteTableName: "routeTableName",
|
||||
SecurityGroupName: "securityGroupName",
|
||||
SubnetName: "subnetName",
|
||||
UseInstanceMetadata: true,
|
||||
VMType: "standard",
|
||||
VnetName: "vnetName",
|
||||
VnetResourceGroup: "vnetResourceGroup",
|
||||
}
|
||||
|
||||
buffer := bytes.NewBufferString(azureConfig)
|
||||
config, err := parseConfig(buffer)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, config)
|
||||
}
|
||||
|
||||
// Test flipServiceInternalAnnotation
|
||||
func TestFlipServiceInternalAnnotation(t *testing.T) {
|
||||
svc := getTestService("servicea", v1.ProtocolTCP, 80)
|
||||
@ -139,9 +215,11 @@ func testLoadBalancerServiceDefaultModeSelection(t *testing.T, isInternal bool)
|
||||
expectedLBName = testClusterName + "-internal"
|
||||
}
|
||||
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lb := (*result.Value)[0]
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lb := result[0]
|
||||
lbCount := len(result)
|
||||
expectedNumOfLB := 1
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
@ -189,15 +267,17 @@ func testLoadBalancerServiceAutoModeSelection(t *testing.T, isInternal bool) {
|
||||
|
||||
// expected is MIN(index, availabilitySetCount)
|
||||
expectedNumOfLB := int(math.Min(float64(index), float64(availabilitySetCount)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
|
||||
maxRules := 0
|
||||
minRules := serviceCount
|
||||
for _, lb := range *result.Value {
|
||||
for _, lb := range result {
|
||||
ruleCount := len(*lb.LoadBalancingRules)
|
||||
if ruleCount < minRules {
|
||||
minRules = ruleCount
|
||||
@ -252,8 +332,10 @@ func testLoadBalancerServicesSpecifiedSelection(t *testing.T, isInternal bool) {
|
||||
|
||||
// expected is MIN(index, 2)
|
||||
expectedNumOfLB := int(math.Min(float64(index), float64(2)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
@ -290,8 +372,10 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) {
|
||||
|
||||
// expected is MIN(index, az.Config.MaximumLoadBalancerRuleCount)
|
||||
expectedNumOfLBRules := int(math.Min(float64(index), float64(az.Config.MaximumLoadBalancerRuleCount)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLBRules {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLBRules, lbCount)
|
||||
}
|
||||
@ -360,8 +444,10 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo
|
||||
|
||||
// expected is MIN(index, availabilitySetCount)
|
||||
expectedNumOfLB := int(math.Min(float64(index), float64(availabilitySetCount)))
|
||||
result, _ := az.LoadBalancerClient.List(az.Config.ResourceGroup)
|
||||
lbCount := len(*result.Value)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, _ := az.LoadBalancerClient.List(ctx, az.Config.ResourceGroup)
|
||||
lbCount := len(result)
|
||||
if lbCount != expectedNumOfLB {
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
@ -864,6 +950,7 @@ func getTestCloud() (az *Cloud) {
|
||||
RouteTableName: "rt",
|
||||
PrimaryAvailabilitySetName: "as",
|
||||
MaximumLoadBalancerRuleCount: 250,
|
||||
VMType: vmTypeStandard,
|
||||
},
|
||||
}
|
||||
az.DisksClient = newFakeDisksClient()
|
||||
@ -969,7 +1056,9 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
|
||||
},
|
||||
},
|
||||
}
|
||||
az.InterfacesClient.CreateOrUpdate(az.Config.ResourceGroup, nicName, newNIC, nil)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
az.InterfacesClient.CreateOrUpdate(ctx, az.Config.ResourceGroup, nicName, newNIC)
|
||||
|
||||
// create vm
|
||||
asID := az.getAvailabilitySetID(asName)
|
||||
@ -990,8 +1079,10 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
|
||||
},
|
||||
}
|
||||
|
||||
_, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.Config.ResourceGroup, vmName, newVM, nil)
|
||||
if err := <-errChan; err != nil {
|
||||
vmCtx, vmCancel := getContextWithCancel()
|
||||
defer vmCancel()
|
||||
_, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM)
|
||||
if err != nil {
|
||||
}
|
||||
// add to kubernetes
|
||||
newNode := &v1.Node{
|
||||
@ -1087,11 +1178,13 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
az.SecurityGroupsClient.CreateOrUpdate(
|
||||
ctx,
|
||||
az.ResourceGroup,
|
||||
az.SecurityGroupName,
|
||||
sg,
|
||||
nil)
|
||||
sg)
|
||||
|
||||
return &sg
|
||||
}
|
||||
@ -1237,12 +1330,12 @@ func validatePublicIP(t *testing.T, publicIP *network.PublicIPAddress, service *
|
||||
t.Errorf("Expected publicIP resource exists, when it is not an internal service")
|
||||
}
|
||||
|
||||
if publicIP.Tags == nil || (*publicIP.Tags)["service"] == nil {
|
||||
if publicIP.Tags == nil || publicIP.Tags["service"] == nil {
|
||||
t.Errorf("Expected publicIP resource has tags[service]")
|
||||
}
|
||||
|
||||
serviceName := getServiceName(service)
|
||||
if serviceName != *(*publicIP.Tags)["service"] {
|
||||
if serviceName != *(publicIP.Tags["service"]) {
|
||||
t.Errorf("Expected publicIP resource has matching tags[service]")
|
||||
}
|
||||
// We cannot use service.Spec.LoadBalancerIP to compare with
|
||||
@ -1765,13 +1858,15 @@ func addTestSubnet(t *testing.T, az *Cloud, svc *v1.Service) {
|
||||
az.VnetName,
|
||||
subName)
|
||||
|
||||
_, errChan := az.SubnetsClient.CreateOrUpdate(az.VnetResourceGroup, az.VnetName, subName,
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := az.SubnetsClient.CreateOrUpdate(ctx, az.VnetResourceGroup, az.VnetName, subName,
|
||||
network.Subnet{
|
||||
ID: &subnetID,
|
||||
Name: &subName,
|
||||
}, nil)
|
||||
})
|
||||
|
||||
if err := <-errChan; err != nil {
|
||||
if err != nil {
|
||||
t.Errorf("Subnet cannot be created or update, %v", err)
|
||||
}
|
||||
svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = subName
|
||||
@ -2604,3 +2699,39 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
|
||||
// func TestIfServiceIsEditedFromSharedRuleToOwnRuleThenItIsRemovedFromSharedRuleAndOwnRuleIsCreated(t *testing.T) {
|
||||
// t.Error()
|
||||
// }
|
||||
|
||||
func TestGetResourceGroupFromDiskURI(t *testing.T) {
|
||||
tests := []struct {
|
||||
diskURL string
|
||||
expectedResult string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
diskURL: "/subscriptions/4be8920b-2978-43d7-axyz-04d8549c1d05/resourceGroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
|
||||
expectedResult: "azure-k8s1102",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
diskURL: "/4be8920b-2978-43d7-axyz-04d8549c1d05/resourceGroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
|
||||
expectedResult: "",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
diskURL: "",
|
||||
expectedResult: "",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result, err := getResourceGroupFromDiskURI(test.diskURL)
|
||||
assert.Equal(t, result, test.expectedResult, "Expect result not equal with getResourceGroupFromDiskURI(%s) return: %q, expected: %q",
|
||||
test.diskURL, result, test.expectedResult)
|
||||
|
||||
if test.expectError {
|
||||
assert.NotNil(t, err, "Expect error during getResourceGroupFromDiskURI(%s)", test.diskURL)
|
||||
} else {
|
||||
assert.Nil(t, err, "Expect error is nil during getResourceGroupFromDiskURI(%s)", test.diskURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -34,10 +34,10 @@ type VMSet interface {
|
||||
GetInstanceIDByNodeName(name string) (string, error)
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
GetInstanceTypeByNodeName(name string) (string, error)
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
GetIPByNodeName(name, vmSetName string) (string, error)
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error)
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
GetIPByNodeName(name string) (string, string, error)
|
||||
// GetPrimaryInterface gets machine primary network interface by node name.
|
||||
GetPrimaryInterface(nodeName string) (network.Interface, error)
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
GetNodeNameByProviderID(providerID string) (types.NodeName, error)
|
||||
|
||||
@ -54,18 +54,14 @@ type VMSet interface {
|
||||
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string) error
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error)
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
|
||||
}
|
||||
|
294
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
294
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
@ -24,13 +24,14 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
@ -90,7 +91,7 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
instanceID, err = getScaleSetVMInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
@ -116,12 +117,12 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
if err != nil {
|
||||
@ -133,7 +134,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm c
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
@ -161,7 +162,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is not part of providerID for vmss instances.
|
||||
scaleSetName, err := extractScaleSetNameByExternalID(providerID)
|
||||
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
@ -243,26 +244,28 @@ func (ss *scaleSet) GetPrimaryVMSetName() string {
|
||||
return ss.Config.PrimaryScaleSetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName, vmSetName string) (string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName, vmSetName)
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet,
|
||||
// fix this after it is supported.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q, %q), err=%v", nodeName, nodeName, vmSetName, err)
|
||||
return "", err
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
return targetIP, "", nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine computepreview.VirtualMachineScaleSetVM) (string, error) {
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
@ -293,9 +296,9 @@ func getScaleSetVMInstanceID(machineName string) (string, error) {
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByExternalID extracts the scaleset name by node's externalID.
|
||||
func extractScaleSetNameByExternalID(externalID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(externalID)
|
||||
// extractScaleSetNameByProviderID extracts the scaleset name by node's ProviderID.
|
||||
func extractScaleSetNameByProviderID(providerID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
@ -324,12 +327,12 @@ func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]computepreview.VirtualMachineScaleSetVM, error) {
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]compute.VirtualMachineScaleSetVM, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(computepreview.InstanceView))
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView))
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
@ -415,7 +418,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
@ -423,7 +426,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName, "")
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
||||
}
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
@ -432,11 +435,6 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check scale set name.
|
||||
if vmSetName != "" && !strings.EqualFold(ssName, vmSetName) {
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
@ -449,7 +447,9 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
@ -465,8 +465,8 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMachineScaleSet, bool, error) {
|
||||
var result computepreview.VirtualMachineScaleSet
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
var result compute.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
@ -479,7 +479,7 @@ func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMac
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
result = *(cached.(*computepreview.VirtualMachineScaleSet))
|
||||
result = *(cached.(*compute.VirtualMachineScaleSet))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
@ -489,7 +489,7 @@ func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMac
|
||||
}
|
||||
|
||||
// getPrimaryNetworkConfiguration gets primary network interface configuration for scale sets.
|
||||
func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetNetworkConfiguration, error) {
|
||||
func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
|
||||
networkConfigurations := *networkConfigurationList
|
||||
if len(networkConfigurations) == 1 {
|
||||
return &networkConfigurations[0], nil
|
||||
@ -505,7 +505,7 @@ func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]c
|
||||
return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetIPConfiguration, error) {
|
||||
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
|
||||
ipConfigurations := *config.IPConfigurations
|
||||
if len(ipConfigurations) == 1 {
|
||||
return &ipConfigurations[0], nil
|
||||
@ -522,7 +522,7 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *computepreview.Virtual
|
||||
}
|
||||
|
||||
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet computepreview.VirtualMachineScaleSet) error {
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -533,7 +533,7 @@ func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet computepr
|
||||
}
|
||||
|
||||
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -543,9 +543,44 @@ func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstance
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
// getNodesScaleSets returns scalesets with instanceIDs and standard node names for given nodes.
|
||||
func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, []*v1.Node, error) {
|
||||
scalesets := make(map[string]sets.String)
|
||||
standardNodes := []*v1.Node{}
|
||||
|
||||
for _, curNode := range nodes {
|
||||
if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
|
||||
standardNodes = append(standardNodes, curNode)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := scalesets[curScaleSetName]; !ok {
|
||||
scalesets[curScaleSetName] = sets.NewString()
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
scalesets[curScaleSetName].Insert(instanceID)
|
||||
}
|
||||
|
||||
return scalesets, standardNodes, nil
|
||||
}
|
||||
|
||||
// ensureHostsInVMSetPool ensures the given Node's primary IP configurations are
|
||||
// participating in the vmSet's LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
|
||||
glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
@ -572,7 +607,7 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
|
||||
// Update primary IP configuration's LoadBalancerBackendAddressPools.
|
||||
foundPool := false
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
newBackendPools := []compute.SubResource{}
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
}
|
||||
@ -583,8 +618,26 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
if ss.useStandardLoadBalancer() && len(newBackendPools) > 0 {
|
||||
// Although standard load balancer supports backends from multiple vmss,
|
||||
// the same network interface couldn't be added to more than one load balancer of
|
||||
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
|
||||
// about this.
|
||||
for _, pool := range newBackendPools {
|
||||
backendPool := *pool.ID
|
||||
matches := backendPoolIDRE.FindStringSubmatch(backendPool)
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newBackendPools = append(newBackendPools,
|
||||
computepreview.SubResource{
|
||||
compute.SubResource{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
@ -607,30 +660,8 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
}
|
||||
}
|
||||
|
||||
// Construct instanceIDs from nodes.
|
||||
instanceIDs := []string{}
|
||||
for _, curNode := range nodes {
|
||||
curScaleSetName, err := extractScaleSetNameByExternalID(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, omitting it", curNode.Name)
|
||||
continue
|
||||
}
|
||||
if curScaleSetName != vmSetName {
|
||||
glog.V(4).Infof("Node %q is not belonging to scale set %q, omitting it", curNode.Name, vmSetName)
|
||||
continue
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get last segment from %q: %v", curNode.Spec.ExternalID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
instanceIDs = append(instanceIDs, instanceID)
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
@ -652,27 +683,68 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
scalesets, standardNodes, err := ss.getNodesScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.EnsureBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, vmSetName, vmSetName, err)
|
||||
glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
for ssName, instanceIDs := range scalesets {
|
||||
// Only add nodes belonging to specified vmSet for basic SKU LB.
|
||||
if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if instanceIDs.Len() == 0 {
|
||||
// This may happen when scaling a vmss capacity to 0.
|
||||
glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName)
|
||||
// InstanceIDs is required to update vmss, use * instead here since there are no nodes actually.
|
||||
instanceIDs.Insert("*")
|
||||
}
|
||||
|
||||
err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ss.useStandardLoadBalancer() && len(standardNodes) > 0 {
|
||||
err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset.
|
||||
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error {
|
||||
glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("ss.EnsureBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, vmSetName, vmSetName)
|
||||
glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName)
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, ssName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName)
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, ssName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -682,12 +754,12 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
return nil
|
||||
}
|
||||
existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
newBackendPools := []compute.SubResource{}
|
||||
foundPool := false
|
||||
for i := len(existingBackendPools) - 1; i >= 0; i-- {
|
||||
curPool := existingBackendPools[i]
|
||||
if strings.EqualFold(poolID, *curPool.ID) {
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, vmSetName)
|
||||
glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName)
|
||||
foundPool = true
|
||||
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
|
||||
}
|
||||
@ -699,17 +771,17 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
|
||||
// Update scale set with backoff.
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", vmSetName)
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -718,17 +790,19 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
instanceIDs := []string{"*"}
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", vmSetName)
|
||||
instanceCtx, instanceCancel := getContextWithCancel()
|
||||
defer instanceCancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", vmSetName)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -738,14 +812,16 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
// Update virtualMachineScaleSet again. This is a workaround for removing VMSS reference from LB.
|
||||
// TODO: remove this workaround when figuring out the root cause.
|
||||
if len(newBackendPools) == 0 {
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", vmSetName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
updateCtx, updateCancel := getContextWithCancel()
|
||||
defer updateCancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -753,6 +829,48 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
if backendAddressPools == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scalesets := sets.NewString()
|
||||
for _, backendPool := range *backendAddressPools {
|
||||
if strings.EqualFold(*backendPool.ID, poolID) && backendPool.BackendIPConfigurations != nil {
|
||||
for _, ipConfigurations := range *backendPool.BackendIPConfigurations {
|
||||
if ipConfigurations.ID == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
|
||||
continue
|
||||
}
|
||||
|
||||
scalesets.Insert(ssName)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for ssName := range scalesets {
|
||||
// Only remove nodes belonging to specified vmSet to basic LB backends.
|
||||
if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) {
|
||||
continue
|
||||
}
|
||||
|
||||
err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf(
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
@ -47,13 +47,19 @@ func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
}
|
||||
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
ret := strings.Split(name, vmssNameSeparator)
|
||||
if len(ret) != 2 {
|
||||
split := strings.SplitAfter(name, vmssNameSeparator)
|
||||
if len(split) < 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return ret[0], ret[1], nil
|
||||
ssName := strings.Join(split[0:len(split)-1], "")
|
||||
// removing the trailing `vmssNameSeparator` since we used SplitAfter
|
||||
ssName = ssName[:len(ssName)-1]
|
||||
|
||||
instanceID := split[len(split)-1]
|
||||
|
||||
return ssName, instanceID, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
@ -61,12 +67,13 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -141,12 +148,13 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
@ -46,6 +46,12 @@ func TestExtractVmssVMName(t *testing.T) {
|
||||
expectedScaleSet: "vm",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
{
|
||||
description: "correct vmss VM name with Extra Separator should return correct scaleSet and instanceID",
|
||||
vmName: "vm_test_1234",
|
||||
expectedScaleSet: "vm_test",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -37,8 +37,8 @@ func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
|
||||
scaleSets := make(map[string]map[string]computepreview.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]computepreview.VirtualMachineScaleSet{
|
||||
scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{
|
||||
scaleSetName: {
|
||||
Name: &scaleSetName,
|
||||
},
|
||||
@ -46,24 +46,24 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
||||
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
|
||||
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
ssVMs := make(map[string]map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM)
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
networkInterfaces := []computepreview.NetworkInterfaceReference{
|
||||
networkInterfaces := []compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmName] = computepreview.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &computepreview.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &computepreview.OSProfile{
|
||||
ssVMs["rg"][vmName] = compute.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
},
|
||||
NetworkProfile: &computepreview.NetworkProfile{
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &networkInterfaces,
|
||||
},
|
||||
},
|
||||
|
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
@ -19,11 +19,13 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
@ -39,18 +41,18 @@ var (
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
// false if error is an autorest.Error with StatusCode=404 and will return the
|
||||
// error back if error is another status code or another type of error.
|
||||
func checkResourceExistsFromError(err error) (bool, error) {
|
||||
func checkResourceExistsFromError(err error) (bool, string, error) {
|
||||
if err == nil {
|
||||
return true, nil
|
||||
return true, "", nil
|
||||
}
|
||||
v, ok := err.(autorest.DetailedError)
|
||||
if !ok {
|
||||
return false, err
|
||||
return false, "", err
|
||||
}
|
||||
if v.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
return false, err.Error(), nil
|
||||
}
|
||||
return false, v
|
||||
return false, "", v
|
||||
}
|
||||
|
||||
// If it is StatusNotFound return nil,
|
||||
@ -103,13 +105,17 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi
|
||||
}
|
||||
|
||||
var realErr error
|
||||
pip, err = az.PublicIPAddressesClient.Get(resourceGroup, pipName, "")
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
var message string
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
pip, err = az.PublicIPAddressesClient.Get(ctx, resourceGroup, pipName, "")
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return pip, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
|
||||
return pip, false, nil
|
||||
}
|
||||
|
||||
@ -118,6 +124,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi
|
||||
|
||||
func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) {
|
||||
var realErr error
|
||||
var message string
|
||||
var rg string
|
||||
|
||||
if len(az.VnetResourceGroup) > 0 {
|
||||
@ -126,13 +133,16 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
rg = az.ResourceGroup
|
||||
}
|
||||
|
||||
subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "")
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
subnet, err = az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "")
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return subnet, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
|
||||
return subnet, false, nil
|
||||
}
|
||||
|
||||
@ -153,6 +163,10 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) {
|
||||
if az.SecurityGroupName == "" {
|
||||
return nsg, fmt.Errorf("securityGroupName is not configured")
|
||||
}
|
||||
|
||||
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName)
|
||||
if err != nil {
|
||||
return nsg, err
|
||||
@ -173,13 +187,16 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
// case we do get instance view every time to fulfill the azure_zones requirement without hitting
|
||||
// throttling.
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, key, compute.InstanceView)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -191,13 +208,17 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
|
||||
func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
lb, err := az.LoadBalancerClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
lb, err := az.LoadBalancerClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -209,13 +230,16 @@ func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
|
||||
func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
nsg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nsg, err := az.SecurityGroupsClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Security group %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -227,13 +251,16 @@ func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
|
||||
func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
rt, err := az.RouteTablesClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
rt, err := az.RouteTablesClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Route table %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -242,3 +269,11 @@ func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
|
||||
return newTimedcache(rtCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) useStandardLoadBalancer() bool {
|
||||
return strings.EqualFold(az.LoadBalancerSku, loadBalancerSkuStandard)
|
||||
}
|
||||
|
||||
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
|
||||
return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
@ -42,7 +42,7 @@ func TestExtractNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
exists, err := checkResourceExistsFromError(test.err)
|
||||
exists, _, err := checkResourceExistsFromError(test.err)
|
||||
if test.exists != exists {
|
||||
t.Errorf("expected: %v, saw: %v", test.exists, exists)
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go
generated
vendored
@ -80,11 +80,6 @@ func (cs *CSCloud) nodeAddresses(instance *cloudstack.VirtualMachine) ([]v1.Node
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (cs *CSCloud) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return cs.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (cs *CSCloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
@ -135,7 +130,7 @@ func (cs *CSCloud) InstanceTypeByProviderID(ctx context.Context, providerID stri
|
||||
|
||||
// AddSSHKeyToAllInstances is currently not implemented.
|
||||
func (cs *CSCloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return errors.New("AddSSHKeyToAllInstances not implemented")
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
@ -158,3 +153,8 @@ func (cs *CSCloud) InstanceExistsByProviderID(ctx context.Context, providerID st
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (cs *CSCloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go
generated
vendored
@ -69,11 +69,6 @@ func (m *metadata) NodeAddressesByProviderID(ctx context.Context, providerID str
|
||||
return nil, errors.New("NodeAddressesByProviderID not implemented")
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (m *metadata) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return m.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (m *metadata) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
instanceID, err := m.get(metadataTypeInstanceID)
|
||||
@ -106,7 +101,7 @@ func (m *metadata) InstanceTypeByProviderID(ctx context.Context, providerID stri
|
||||
|
||||
// AddSSHKeyToAllInstances is currently not implemented.
|
||||
func (m *metadata) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return errors.New("AddSSHKeyToAllInstances not implemented")
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
@ -119,6 +114,11 @@ func (m *metadata) InstanceExistsByProviderID(ctx context.Context, providerID st
|
||||
return false, errors.New("InstanceExistsByProviderID not implemented")
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns if the instance is shutdown.
|
||||
func (m *metadata) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the region that the program is running in.
|
||||
func (m *metadata) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
@ -50,8 +50,10 @@ type FakeCloud struct {
|
||||
Exists bool
|
||||
Err error
|
||||
|
||||
ExistsByProviderID bool
|
||||
ErrByProviderID error
|
||||
ExistsByProviderID bool
|
||||
ErrByProviderID error
|
||||
NodeShutdown bool
|
||||
ErrShutdownByProviderID error
|
||||
|
||||
Calls []string
|
||||
Addresses []v1.NodeAddress
|
||||
@ -208,14 +210,6 @@ func (f *FakeCloud) NodeAddressesByProviderID(ctx context.Context, providerID st
|
||||
return f.Addresses, f.Err
|
||||
}
|
||||
|
||||
// ExternalID is a test-spy implementation of Instances.ExternalID.
|
||||
// It adds an entry "external-id" into the internal method call record.
|
||||
// It returns an external id to the mapped instance name, if not found, it will return "ext-{instance}"
|
||||
func (f *FakeCloud) ExternalID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
f.addCall("external-id")
|
||||
return f.ExtID[nodeName], f.Err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (f *FakeCloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
f.addCall("instance-id")
|
||||
@ -241,6 +235,12 @@ func (f *FakeCloud) InstanceExistsByProviderID(ctx context.Context, providerID s
|
||||
return f.ExistsByProviderID, f.ErrByProviderID
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instances is in safe state to detach volumes
|
||||
func (f *FakeCloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
f.addCall("instance-shutdown-by-provider-id")
|
||||
return f.NodeShutdown, f.ErrShutdownByProviderID
|
||||
}
|
||||
|
||||
// List is a test-spy implementation of Instances.List.
|
||||
// It adds an entry "list" into the internal method call record.
|
||||
func (f *FakeCloud) List(filter string) ([]types.NodeName, error) {
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
@ -13,7 +13,6 @@ go_library(
|
||||
"gce.go",
|
||||
"gce_address_manager.go",
|
||||
"gce_addresses.go",
|
||||
"gce_addresses_fakes.go",
|
||||
"gce_alpha.go",
|
||||
"gce_annotations.go",
|
||||
"gce_backendservice.go",
|
||||
@ -32,8 +31,8 @@ go_library(
|
||||
"gce_loadbalancer_internal.go",
|
||||
"gce_loadbalancer_naming.go",
|
||||
"gce_networkendpointgroup.go",
|
||||
"gce_op.go",
|
||||
"gce_routes.go",
|
||||
"gce_securitypolicy.go",
|
||||
"gce_targetpool.go",
|
||||
"gce_targetproxy.go",
|
||||
"gce_tpu.go",
|
||||
@ -70,7 +69,7 @@ go_library(
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/container/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/google.golang.org/api/tpu/v1alpha1:go_default_library",
|
||||
"//vendor/google.golang.org/api/tpu/v1:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -101,16 +100,22 @@ go_test(
|
||||
"gce_disks_test.go",
|
||||
"gce_healthchecks_test.go",
|
||||
"gce_loadbalancer_external_test.go",
|
||||
"gce_loadbalancer_internal_test.go",
|
||||
"gce_loadbalancer_test.go",
|
||||
"gce_loadbalancer_utils_test.go",
|
||||
"gce_test.go",
|
||||
"gce_util_test.go",
|
||||
"metrics_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
@ -122,6 +127,8 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD
generated
vendored
@ -4,7 +4,9 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"context.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"gce_projects.go",
|
||||
"gen.go",
|
||||
"op.go",
|
||||
@ -31,6 +33,8 @@ go_test(
|
||||
srcs = [
|
||||
"gen_test.go",
|
||||
"mock_test.go",
|
||||
"ratelimit_test.go",
|
||||
"service_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/context.go
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/context.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCallTimeout = 1 * time.Hour
|
||||
)
|
||||
|
||||
// ContextWithCallTimeout returns a context with a default timeout, used for generated client calls.
|
||||
func ContextWithCallTimeout() (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), defaultCallTimeout)
|
||||
}
|
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/doc.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/doc.go
generated
vendored
@ -109,4 +109,9 @@ limitations under the License.
|
||||
// func (gce *GCEInstanceGroups) MyMethod() {
|
||||
// // Custom implementation.
|
||||
// }
|
||||
//
|
||||
// Update generated codes
|
||||
//
|
||||
// Run hack/update-cloudprovider-gce.sh to update the generated codes.
|
||||
//
|
||||
package cloud
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/errors.go
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/errors.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import "fmt"
|
||||
|
||||
// OperationPollingError occurs when the GCE Operation cannot be retrieved for a prolonged period.
|
||||
type OperationPollingError struct {
|
||||
LastPollError error
|
||||
}
|
||||
|
||||
// Error returns a string representation including the last poll error encountered.
|
||||
func (e *OperationPollingError) Error() string {
|
||||
return fmt.Sprintf("GCE operation polling error: %v", e.LastPollError)
|
||||
}
|
||||
|
||||
// GCEOperationError occurs when the GCE Operation finishes with an error.
|
||||
type GCEOperationError struct {
|
||||
// HTTPStatusCode is the HTTP status code of the final error.
|
||||
// For example, a failed operation may have 400 - BadRequest.
|
||||
HTTPStatusCode int
|
||||
// Code is GCE's code of what went wrong.
|
||||
// For example, RESOURCE_IN_USE_BY_ANOTHER_RESOURCE
|
||||
Code string
|
||||
// Message is a human readable message.
|
||||
// For example, "The network resource 'xxx' is already being used by 'xxx'"
|
||||
Message string
|
||||
}
|
||||
|
||||
// Error returns a string representation including the HTTP Status code, GCE's error code
|
||||
// and a human readable message.
|
||||
func (e *GCEOperationError) Error() string {
|
||||
return fmt.Sprintf("GCE %v - %v: %v", e.HTTPStatusCode, e.Code, e.Message)
|
||||
}
|
1881
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
1881
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
File diff suppressed because it is too large
Load Diff
119
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/main.go
generated
vendored
119
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/main.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
@ -986,6 +985,38 @@ func (g *{{.GCEWrapType}}) {{.FcnArgs}} {
|
||||
}
|
||||
}
|
||||
|
||||
// genTypes generates the type wrappers.
|
||||
func genResourceIDs(wr io.Writer) {
|
||||
const text = `
|
||||
// New{{.Service}}ResourceID creates a ResourceID for the {{.Service}} resource.
|
||||
{{- if .KeyIsProject}}
|
||||
func New{{.Service}}ResourceID(project string) *ResourceID {
|
||||
var key *meta.Key
|
||||
{{- else}}
|
||||
{{- if .KeyIsGlobal}}
|
||||
func New{{.Service}}ResourceID(project, name string) *ResourceID {
|
||||
key := meta.GlobalKey(name)
|
||||
{{- end}}
|
||||
{{- if .KeyIsRegional}}
|
||||
func New{{.Service}}ResourceID(project, region, name string) *ResourceID {
|
||||
key := meta.RegionalKey(name, region)
|
||||
{{- end}}
|
||||
{{- if .KeyIsZonal}}
|
||||
func New{{.Service}}ResourceID(project, zone, name string) *ResourceID {
|
||||
key := meta.ZonalKey(name, zone)
|
||||
{{- end -}}
|
||||
{{end}}
|
||||
return &ResourceID{project, "{{.Resource}}", key}
|
||||
}
|
||||
`
|
||||
tmpl := template.Must(template.New("resourceIDs").Parse(text))
|
||||
for _, sg := range meta.SortedServicesGroups {
|
||||
if err := tmpl.Execute(wr, sg.ServiceInfo()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genUnitTestHeader(wr io.Writer) {
|
||||
const text = `/*
|
||||
Copyright {{.Year}} The Kubernetes Authors.
|
||||
@ -1238,20 +1269,86 @@ func Test{{.Service}}Group(t *testing.T) {
|
||||
}
|
||||
`
|
||||
tmpl := template.Must(template.New("unittest").Parse(text))
|
||||
// Sort keys so the output will be stable.
|
||||
var keys []string
|
||||
for k := range meta.AllServicesByGroup {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
s := meta.AllServicesByGroup[k]
|
||||
if err := tmpl.Execute(wr, s); err != nil {
|
||||
for _, sg := range meta.SortedServicesGroups {
|
||||
if err := tmpl.Execute(wr, sg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genUnitTestResourceIDConversion(wr io.Writer) {
|
||||
const text = `
|
||||
func TestResourceIDConversion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, id := range []*ResourceID{
|
||||
{{- range .Groups}}
|
||||
{{- with .ServiceInfo}}
|
||||
{{- if .KeyIsProject}}
|
||||
New{{.Service}}ResourceID("my-{{.Resource}}-resource"),
|
||||
{{- else}}
|
||||
{{- if .KeyIsGlobal}}
|
||||
New{{.Service}}ResourceID("some-project", "my-{{.Resource}}-resource"),
|
||||
{{- end}}
|
||||
{{- if .KeyIsRegional}}
|
||||
New{{.Service}}ResourceID("some-project", "us-central1", "my-{{.Resource}}-resource"),
|
||||
{{- end}}
|
||||
{{- if .KeyIsZonal}}
|
||||
New{{.Service}}ResourceID("some-project", "us-east1-b", "my-{{.Resource}}-resource"),
|
||||
{{- end -}}
|
||||
{{end -}}
|
||||
{{end -}}
|
||||
{{end}}
|
||||
} {
|
||||
t.Run(id.Resource, func(t *testing.T) {
|
||||
// Test conversion to and from full URL.
|
||||
fullURL := id.SelfLink(meta.VersionGA)
|
||||
parsedID, err := ParseResourceURL(fullURL)
|
||||
if err != nil {
|
||||
t.Errorf("ParseResourceURL(%s) = _, %v, want nil", fullURL, err)
|
||||
}
|
||||
if !reflect.DeepEqual(id, parsedID) {
|
||||
t.Errorf("SelfLink(%+v) -> ParseResourceURL(%s) = %+v, want original ID", id, fullURL, parsedID)
|
||||
}
|
||||
|
||||
// Test conversion to and from relative resource name.
|
||||
relativeName := id.RelativeResourceName()
|
||||
parsedID, err = ParseResourceURL(relativeName)
|
||||
if err != nil {
|
||||
t.Errorf("ParseResourceURL(%s) = _, %v, want nil", relativeName, err)
|
||||
}
|
||||
if !reflect.DeepEqual(id, parsedID) {
|
||||
t.Errorf("RelativeResourceName(%+v) -> ParseResourceURL(%s) = %+v, want original ID", id, relativeName, parsedID)
|
||||
}
|
||||
|
||||
// Do not test ResourcePath for projects.
|
||||
if id.Resource == "projects" {
|
||||
return
|
||||
}
|
||||
|
||||
// Test conversion to and from resource path.
|
||||
resourcePath := id.ResourcePath()
|
||||
parsedID, err = ParseResourceURL(resourcePath)
|
||||
if err != nil {
|
||||
t.Errorf("ParseResourceURL(%s) = _, %v, want nil", resourcePath, err)
|
||||
}
|
||||
id.ProjectID = ""
|
||||
if !reflect.DeepEqual(id, parsedID) {
|
||||
t.Errorf("ResourcePath(%+v) -> ParseResourceURL(%s) = %+v, want %+v", id, resourcePath, parsedID, id)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
`
|
||||
data := struct {
|
||||
Groups []*meta.ServiceGroup
|
||||
}{meta.SortedServicesGroups}
|
||||
tmpl := template.Must(template.New("unittest-resourceIDs").Parse(text))
|
||||
if err := tmpl.Execute(wr, data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
@ -1262,9 +1359,11 @@ func main() {
|
||||
genHeader(out)
|
||||
genStubs(out)
|
||||
genTypes(out)
|
||||
genResourceIDs(out)
|
||||
case "test":
|
||||
genUnitTestHeader(out)
|
||||
genUnitTestServices(out)
|
||||
genUnitTestResourceIDConversion(out)
|
||||
default:
|
||||
log.Fatalf("Invalid -mode: %q", flags.mode)
|
||||
}
|
||||
|
241
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
241
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
@ -179,6 +179,8 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
var key *meta.Key
|
||||
keyAlpha := meta.GlobalKey("key-alpha")
|
||||
key = keyAlpha
|
||||
keyBeta := meta.GlobalKey("key-beta")
|
||||
key = keyBeta
|
||||
keyGA := meta.GlobalKey("key-ga")
|
||||
key = keyGA
|
||||
// Ignore unused variables.
|
||||
@ -188,6 +190,9 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
if _, err := mock.AlphaBackendServices().Get(ctx, key); err == nil {
|
||||
t.Errorf("AlphaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.BetaBackendServices().Get(ctx, key); err == nil {
|
||||
t.Errorf("BetaBackendServices().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.BackendServices().Get(ctx, key); err == nil {
|
||||
t.Errorf("BackendServices().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
@ -199,6 +204,12 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
t.Errorf("AlphaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &beta.BackendService{}
|
||||
if err := mock.BetaBackendServices().Insert(ctx, keyBeta, obj); err != nil {
|
||||
t.Errorf("BetaBackendServices().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &ga.BackendService{}
|
||||
if err := mock.BackendServices().Insert(ctx, keyGA, obj); err != nil {
|
||||
@ -210,15 +221,20 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
if obj, err := mock.AlphaBackendServices().Get(ctx, key); err != nil {
|
||||
t.Errorf("AlphaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.BetaBackendServices().Get(ctx, key); err != nil {
|
||||
t.Errorf("BetaBackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.BackendServices().Get(ctx, key); err != nil {
|
||||
t.Errorf("BackendServices().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
|
||||
// List.
|
||||
mock.MockAlphaBackendServices.Objects[*keyAlpha] = mock.MockAlphaBackendServices.Obj(&alpha.BackendService{Name: keyAlpha.Name})
|
||||
mock.MockBetaBackendServices.Objects[*keyBeta] = mock.MockBetaBackendServices.Obj(&beta.BackendService{Name: keyBeta.Name})
|
||||
mock.MockBackendServices.Objects[*keyGA] = mock.MockBackendServices.Obj(&ga.BackendService{Name: keyGA.Name})
|
||||
want := map[string]bool{
|
||||
"key-alpha": true,
|
||||
"key-beta": true,
|
||||
"key-ga": true,
|
||||
}
|
||||
_ = want // ignore unused variables.
|
||||
@ -236,6 +252,20 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.BetaBackendServices().List(ctx, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("BetaBackendServices().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.BackendServices().List(ctx, filter.None)
|
||||
if err != nil {
|
||||
@ -255,6 +285,9 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
if err := mock.AlphaBackendServices().Delete(ctx, keyAlpha); err != nil {
|
||||
t.Errorf("AlphaBackendServices().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
|
||||
}
|
||||
if err := mock.BetaBackendServices().Delete(ctx, keyBeta); err != nil {
|
||||
t.Errorf("BetaBackendServices().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
|
||||
}
|
||||
if err := mock.BackendServices().Delete(ctx, keyGA); err != nil {
|
||||
t.Errorf("BackendServices().Delete(%v, %v) = %v; want nil", ctx, keyGA, err)
|
||||
}
|
||||
@ -263,6 +296,9 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
if err := mock.AlphaBackendServices().Delete(ctx, keyAlpha); err == nil {
|
||||
t.Errorf("AlphaBackendServices().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
|
||||
}
|
||||
if err := mock.BetaBackendServices().Delete(ctx, keyBeta); err == nil {
|
||||
t.Errorf("BetaBackendServices().Delete(%v, %v) = nil; want error", ctx, keyBeta)
|
||||
}
|
||||
if err := mock.BackendServices().Delete(ctx, keyGA); err == nil {
|
||||
t.Errorf("BackendServices().Delete(%v, %v) = nil; want error", ctx, keyGA)
|
||||
}
|
||||
@ -276,28 +312,17 @@ func TestDisksGroup(t *testing.T) {
|
||||
mock := NewMockGCE(pr)
|
||||
|
||||
var key *meta.Key
|
||||
keyAlpha := meta.ZonalKey("key-alpha", "location")
|
||||
key = keyAlpha
|
||||
keyGA := meta.ZonalKey("key-ga", "location")
|
||||
key = keyGA
|
||||
// Ignore unused variables.
|
||||
_, _, _ = ctx, mock, key
|
||||
|
||||
// Get not found.
|
||||
if _, err := mock.AlphaDisks().Get(ctx, key); err == nil {
|
||||
t.Errorf("AlphaDisks().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.Disks().Get(ctx, key); err == nil {
|
||||
t.Errorf("Disks().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
|
||||
// Insert.
|
||||
{
|
||||
obj := &alpha.Disk{}
|
||||
if err := mock.AlphaDisks().Insert(ctx, keyAlpha, obj); err != nil {
|
||||
t.Errorf("AlphaDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &ga.Disk{}
|
||||
if err := mock.Disks().Insert(ctx, keyGA, obj); err != nil {
|
||||
@ -306,35 +331,16 @@ func TestDisksGroup(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get across versions.
|
||||
if obj, err := mock.AlphaDisks().Get(ctx, key); err != nil {
|
||||
t.Errorf("AlphaDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.Disks().Get(ctx, key); err != nil {
|
||||
t.Errorf("Disks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
|
||||
// List.
|
||||
mock.MockAlphaDisks.Objects[*keyAlpha] = mock.MockAlphaDisks.Obj(&alpha.Disk{Name: keyAlpha.Name})
|
||||
mock.MockDisks.Objects[*keyGA] = mock.MockDisks.Obj(&ga.Disk{Name: keyGA.Name})
|
||||
want := map[string]bool{
|
||||
"key-alpha": true,
|
||||
"key-ga": true,
|
||||
"key-ga": true,
|
||||
}
|
||||
_ = want // ignore unused variables.
|
||||
{
|
||||
objs, err := mock.AlphaDisks().List(ctx, location, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("AlphaDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.Disks().List(ctx, location, filter.None)
|
||||
if err != nil {
|
||||
@ -351,17 +357,11 @@ func TestDisksGroup(t *testing.T) {
|
||||
}
|
||||
|
||||
// Delete across versions.
|
||||
if err := mock.AlphaDisks().Delete(ctx, keyAlpha); err != nil {
|
||||
t.Errorf("AlphaDisks().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
|
||||
}
|
||||
if err := mock.Disks().Delete(ctx, keyGA); err != nil {
|
||||
t.Errorf("Disks().Delete(%v, %v) = %v; want nil", ctx, keyGA, err)
|
||||
}
|
||||
|
||||
// Delete not found.
|
||||
if err := mock.AlphaDisks().Delete(ctx, keyAlpha); err == nil {
|
||||
t.Errorf("AlphaDisks().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
|
||||
}
|
||||
if err := mock.Disks().Delete(ctx, keyGA); err == nil {
|
||||
t.Errorf("Disks().Delete(%v, %v) = nil; want error", ctx, keyGA)
|
||||
}
|
||||
@ -1279,39 +1279,39 @@ func TestRegionDisksGroup(t *testing.T) {
|
||||
mock := NewMockGCE(pr)
|
||||
|
||||
var key *meta.Key
|
||||
keyAlpha := meta.RegionalKey("key-alpha", "location")
|
||||
key = keyAlpha
|
||||
keyBeta := meta.RegionalKey("key-beta", "location")
|
||||
key = keyBeta
|
||||
// Ignore unused variables.
|
||||
_, _, _ = ctx, mock, key
|
||||
|
||||
// Get not found.
|
||||
if _, err := mock.AlphaRegionDisks().Get(ctx, key); err == nil {
|
||||
t.Errorf("AlphaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
if _, err := mock.BetaRegionDisks().Get(ctx, key); err == nil {
|
||||
t.Errorf("BetaRegionDisks().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
|
||||
// Insert.
|
||||
{
|
||||
obj := &alpha.Disk{}
|
||||
if err := mock.AlphaRegionDisks().Insert(ctx, keyAlpha, obj); err != nil {
|
||||
t.Errorf("AlphaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
|
||||
obj := &beta.Disk{}
|
||||
if err := mock.BetaRegionDisks().Insert(ctx, keyBeta, obj); err != nil {
|
||||
t.Errorf("BetaRegionDisks().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get across versions.
|
||||
if obj, err := mock.AlphaRegionDisks().Get(ctx, key); err != nil {
|
||||
t.Errorf("AlphaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
if obj, err := mock.BetaRegionDisks().Get(ctx, key); err != nil {
|
||||
t.Errorf("BetaRegionDisks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
|
||||
// List.
|
||||
mock.MockAlphaRegionDisks.Objects[*keyAlpha] = mock.MockAlphaRegionDisks.Obj(&alpha.Disk{Name: keyAlpha.Name})
|
||||
mock.MockBetaRegionDisks.Objects[*keyBeta] = mock.MockBetaRegionDisks.Obj(&beta.Disk{Name: keyBeta.Name})
|
||||
want := map[string]bool{
|
||||
"key-alpha": true,
|
||||
"key-beta": true,
|
||||
}
|
||||
_ = want // ignore unused variables.
|
||||
{
|
||||
objs, err := mock.AlphaRegionDisks().List(ctx, location, filter.None)
|
||||
objs, err := mock.BetaRegionDisks().List(ctx, location, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("AlphaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
|
||||
t.Errorf("BetaRegionDisks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
@ -1324,13 +1324,13 @@ func TestRegionDisksGroup(t *testing.T) {
|
||||
}
|
||||
|
||||
// Delete across versions.
|
||||
if err := mock.AlphaRegionDisks().Delete(ctx, keyAlpha); err != nil {
|
||||
t.Errorf("AlphaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
|
||||
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err != nil {
|
||||
t.Errorf("BetaRegionDisks().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
|
||||
}
|
||||
|
||||
// Delete not found.
|
||||
if err := mock.AlphaRegionDisks().Delete(ctx, keyAlpha); err == nil {
|
||||
t.Errorf("AlphaRegionDisks().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
|
||||
if err := mock.BetaRegionDisks().Delete(ctx, keyBeta); err == nil {
|
||||
t.Errorf("BetaRegionDisks().Delete(%v, %v) = nil; want error", ctx, keyBeta)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1445,6 +1445,69 @@ func TestRoutesGroup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecurityPoliciesGroup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
pr := &SingleProjectRouter{"mock-project"}
|
||||
mock := NewMockGCE(pr)
|
||||
|
||||
var key *meta.Key
|
||||
keyBeta := meta.GlobalKey("key-beta")
|
||||
key = keyBeta
|
||||
// Ignore unused variables.
|
||||
_, _, _ = ctx, mock, key
|
||||
|
||||
// Get not found.
|
||||
if _, err := mock.BetaSecurityPolicies().Get(ctx, key); err == nil {
|
||||
t.Errorf("BetaSecurityPolicies().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
|
||||
// Insert.
|
||||
{
|
||||
obj := &beta.SecurityPolicy{}
|
||||
if err := mock.BetaSecurityPolicies().Insert(ctx, keyBeta, obj); err != nil {
|
||||
t.Errorf("BetaSecurityPolicies().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get across versions.
|
||||
if obj, err := mock.BetaSecurityPolicies().Get(ctx, key); err != nil {
|
||||
t.Errorf("BetaSecurityPolicies().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
|
||||
// List.
|
||||
mock.MockBetaSecurityPolicies.Objects[*keyBeta] = mock.MockBetaSecurityPolicies.Obj(&beta.SecurityPolicy{Name: keyBeta.Name})
|
||||
want := map[string]bool{
|
||||
"key-beta": true,
|
||||
}
|
||||
_ = want // ignore unused variables.
|
||||
{
|
||||
objs, err := mock.BetaSecurityPolicies().List(ctx, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("BetaSecurityPolicies().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaSecurityPolicies().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete across versions.
|
||||
if err := mock.BetaSecurityPolicies().Delete(ctx, keyBeta); err != nil {
|
||||
t.Errorf("BetaSecurityPolicies().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
|
||||
}
|
||||
|
||||
// Delete not found.
|
||||
if err := mock.BetaSecurityPolicies().Delete(ctx, keyBeta); err == nil {
|
||||
t.Errorf("BetaSecurityPolicies().Delete(%v, %v) = nil; want error", ctx, keyBeta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSslCertificatesGroup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -1807,3 +1870,73 @@ func TestZonesGroup(t *testing.T) {
|
||||
|
||||
// Delete not found.
|
||||
}
|
||||
|
||||
func TestResourceIDConversion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, id := range []*ResourceID{
|
||||
NewAddressesResourceID("some-project", "us-central1", "my-addresses-resource"),
|
||||
NewBackendServicesResourceID("some-project", "my-backendServices-resource"),
|
||||
NewDisksResourceID("some-project", "us-east1-b", "my-disks-resource"),
|
||||
NewFirewallsResourceID("some-project", "my-firewalls-resource"),
|
||||
NewForwardingRulesResourceID("some-project", "us-central1", "my-forwardingRules-resource"),
|
||||
NewGlobalAddressesResourceID("some-project", "my-addresses-resource"),
|
||||
NewGlobalForwardingRulesResourceID("some-project", "my-forwardingRules-resource"),
|
||||
NewHealthChecksResourceID("some-project", "my-healthChecks-resource"),
|
||||
NewHttpHealthChecksResourceID("some-project", "my-httpHealthChecks-resource"),
|
||||
NewHttpsHealthChecksResourceID("some-project", "my-httpsHealthChecks-resource"),
|
||||
NewInstanceGroupsResourceID("some-project", "us-east1-b", "my-instanceGroups-resource"),
|
||||
NewInstancesResourceID("some-project", "us-east1-b", "my-instances-resource"),
|
||||
NewNetworkEndpointGroupsResourceID("some-project", "us-east1-b", "my-networkEndpointGroups-resource"),
|
||||
NewProjectsResourceID("my-projects-resource"),
|
||||
NewRegionBackendServicesResourceID("some-project", "us-central1", "my-backendServices-resource"),
|
||||
NewRegionDisksResourceID("some-project", "us-central1", "my-disks-resource"),
|
||||
NewRegionsResourceID("some-project", "my-regions-resource"),
|
||||
NewRoutesResourceID("some-project", "my-routes-resource"),
|
||||
NewSecurityPoliciesResourceID("some-project", "my-securityPolicies-resource"),
|
||||
NewSslCertificatesResourceID("some-project", "my-sslCertificates-resource"),
|
||||
NewTargetHttpProxiesResourceID("some-project", "my-targetHttpProxies-resource"),
|
||||
NewTargetHttpsProxiesResourceID("some-project", "my-targetHttpsProxies-resource"),
|
||||
NewTargetPoolsResourceID("some-project", "us-central1", "my-targetPools-resource"),
|
||||
NewUrlMapsResourceID("some-project", "my-urlMaps-resource"),
|
||||
NewZonesResourceID("some-project", "my-zones-resource"),
|
||||
} {
|
||||
t.Run(id.Resource, func(t *testing.T) {
|
||||
// Test conversion to and from full URL.
|
||||
fullURL := id.SelfLink(meta.VersionGA)
|
||||
parsedID, err := ParseResourceURL(fullURL)
|
||||
if err != nil {
|
||||
t.Errorf("ParseResourceURL(%s) = _, %v, want nil", fullURL, err)
|
||||
}
|
||||
if !reflect.DeepEqual(id, parsedID) {
|
||||
t.Errorf("SelfLink(%+v) -> ParseResourceURL(%s) = %+v, want original ID", id, fullURL, parsedID)
|
||||
}
|
||||
|
||||
// Test conversion to and from relative resource name.
|
||||
relativeName := id.RelativeResourceName()
|
||||
parsedID, err = ParseResourceURL(relativeName)
|
||||
if err != nil {
|
||||
t.Errorf("ParseResourceURL(%s) = _, %v, want nil", relativeName, err)
|
||||
}
|
||||
if !reflect.DeepEqual(id, parsedID) {
|
||||
t.Errorf("RelativeResourceName(%+v) -> ParseResourceURL(%s) = %+v, want original ID", id, relativeName, parsedID)
|
||||
}
|
||||
|
||||
// Do not test ResourcePath for projects.
|
||||
if id.Resource == "projects" {
|
||||
return
|
||||
}
|
||||
|
||||
// Test conversion to and from resource path.
|
||||
resourcePath := id.ResourcePath()
|
||||
parsedID, err = ParseResourceURL(resourcePath)
|
||||
if err != nil {
|
||||
t.Errorf("ParseResourceURL(%s) = _, %v, want nil", resourcePath, err)
|
||||
}
|
||||
id.ProjectID = ""
|
||||
if !reflect.DeepEqual(id, parsedID) {
|
||||
t.Errorf("ResourcePath(%+v) -> ParseResourceURL(%s) = %+v, want %+v", id, resourcePath, parsedID, id)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go
generated
vendored
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go
generated
vendored
@ -103,17 +103,32 @@ var AllServices = []*ServiceInfo{
|
||||
serviceType: reflect.TypeOf(&ga.BackendServicesService{}),
|
||||
additionalMethods: []string{
|
||||
"GetHealth",
|
||||
"Patch",
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "BackendService",
|
||||
Service: "BackendServices",
|
||||
Resource: "backendServices",
|
||||
version: VersionAlpha,
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&alpha.BackendServicesService{}),
|
||||
additionalMethods: []string{"Update"},
|
||||
Object: "BackendService",
|
||||
Service: "BackendServices",
|
||||
Resource: "backendServices",
|
||||
version: VersionBeta,
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&beta.BackendServicesService{}),
|
||||
additionalMethods: []string{
|
||||
"SetSecurityPolicy",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "BackendService",
|
||||
Service: "BackendServices",
|
||||
Resource: "backendServices",
|
||||
version: VersionAlpha,
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&alpha.BackendServicesService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
"SetSecurityPolicy",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "BackendService",
|
||||
@ -145,22 +160,20 @@ var AllServices = []*ServiceInfo{
|
||||
Resource: "disks",
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&ga.DisksService{}),
|
||||
},
|
||||
{
|
||||
Object: "Disk",
|
||||
Service: "Disks",
|
||||
Resource: "disks",
|
||||
version: VersionAlpha,
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&alpha.DisksService{}),
|
||||
additionalMethods: []string{
|
||||
"Resize",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "Disk",
|
||||
Service: "RegionDisks",
|
||||
Resource: "disks",
|
||||
version: VersionAlpha,
|
||||
version: VersionBeta,
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&alpha.DisksService{}),
|
||||
serviceType: reflect.TypeOf(&beta.RegionDisksService{}),
|
||||
additionalMethods: []string{
|
||||
"Resize",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "Firewall",
|
||||
@ -326,6 +339,21 @@ var AllServices = []*ServiceInfo{
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.RoutesService{}),
|
||||
},
|
||||
{
|
||||
Object: "SecurityPolicy",
|
||||
Service: "SecurityPolicies",
|
||||
Resource: "securityPolicies",
|
||||
version: VersionBeta,
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&beta.SecurityPoliciesService{}),
|
||||
additionalMethods: []string{
|
||||
"AddRule",
|
||||
"GetRule",
|
||||
"Patch",
|
||||
"PatchRule",
|
||||
"RemoveRule",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "SslCertificate",
|
||||
Service: "SslCertificates",
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/service.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/service.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ServiceInfo defines the entry for a Service that code will be generated for.
|
||||
@ -159,6 +160,13 @@ func (i *ServiceInfo) KeyIsZonal() bool {
|
||||
return i.keyType == Zonal
|
||||
}
|
||||
|
||||
// KeyIsProject is true if the key represents the project resource.
|
||||
func (i *ServiceInfo) KeyIsProject() bool {
|
||||
// Projects are a special resource for ResourceId because there is no 'key' value. This func
|
||||
// is used by the generator to not accept a key parameter.
|
||||
return i.Service == "Projects"
|
||||
}
|
||||
|
||||
// MakeKey returns the call used to create the appropriate key type.
|
||||
func (i *ServiceInfo) MakeKey(name, location string) string {
|
||||
switch i.keyType {
|
||||
@ -220,15 +228,20 @@ type ServiceGroup struct {
|
||||
GA *ServiceInfo
|
||||
}
|
||||
|
||||
// Service returns any ServiceInfo object belonging to the ServiceGroup.
|
||||
// Service returns any ServiceInfo string belonging to the ServiceGroup.
|
||||
func (sg *ServiceGroup) Service() string {
|
||||
return sg.ServiceInfo().Service
|
||||
}
|
||||
|
||||
// ServiceInfo returns any ServiceInfo object belonging to the ServiceGroup.
|
||||
func (sg *ServiceGroup) ServiceInfo() *ServiceInfo {
|
||||
switch {
|
||||
case sg.GA != nil:
|
||||
return sg.GA.Service
|
||||
return sg.GA
|
||||
case sg.Alpha != nil:
|
||||
return sg.Alpha.Service
|
||||
return sg.Alpha
|
||||
case sg.Beta != nil:
|
||||
return sg.Beta.Service
|
||||
return sg.Beta
|
||||
default:
|
||||
panic(errors.New("service group is empty"))
|
||||
}
|
||||
@ -272,6 +285,16 @@ func groupServices(services []*ServiceInfo) map[string]*ServiceGroup {
|
||||
// AllServicesByGroup is a map of service name to ServicesGroup.
|
||||
var AllServicesByGroup map[string]*ServiceGroup
|
||||
|
||||
// SortedServicesGroups is a slice of Servicegroup sorted by Service name.
|
||||
var SortedServicesGroups []*ServiceGroup
|
||||
|
||||
func init() {
|
||||
AllServicesByGroup = groupServices(AllServices)
|
||||
|
||||
for _, sg := range AllServicesByGroup {
|
||||
SortedServicesGroups = append(SortedServicesGroups, sg)
|
||||
}
|
||||
sort.Slice(SortedServicesGroups, func(i, j int) bool {
|
||||
return SortedServicesGroups[i].Service() < SortedServicesGroups[j].Service()
|
||||
})
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD
generated
vendored
@ -7,6 +7,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
|
360
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go
generated
vendored
360
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go
generated
vendored
@ -28,15 +28,24 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
alpha "google.golang.org/api/compute/v0.alpha"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
var (
|
||||
// InUseError is a shared variable with error code StatusBadRequest for error verification.
|
||||
InUseError = &googleapi.Error{Code: http.StatusBadRequest, Message: "It's being used by god."}
|
||||
// InternalServerError is shared variable with error code StatusInternalServerError for error verification.
|
||||
InternalServerError = &googleapi.Error{Code: http.StatusInternalServerError}
|
||||
)
|
||||
|
||||
// gceObject is an abstraction of all GCE API object in go client
|
||||
type gceObject interface {
|
||||
MarshalJSON() ([]byte, error)
|
||||
@ -85,7 +94,7 @@ func RemoveInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsR
|
||||
|
||||
func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules map[meta.Key]*cloud.MockForwardingRulesObj, version meta.Version, projectID string) (bool, error) {
|
||||
if !key.Valid() {
|
||||
return false, fmt.Errorf("invalid GCE key (%+v)", key)
|
||||
return true, fmt.Errorf("invalid GCE key (%+v)", key)
|
||||
}
|
||||
|
||||
if _, ok := mRules[*key]; ok {
|
||||
@ -93,16 +102,16 @@ func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules ma
|
||||
Code: http.StatusConflict,
|
||||
Message: fmt.Sprintf("MockForwardingRule %v exists", key),
|
||||
}
|
||||
return false, err
|
||||
return true, err
|
||||
}
|
||||
|
||||
enc, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return true, err
|
||||
}
|
||||
var fwdRule alpha.ForwardingRule
|
||||
if err := json.Unmarshal(enc, &fwdRule); err != nil {
|
||||
return false, err
|
||||
return true, err
|
||||
}
|
||||
// Set the default values for the Alpha fields.
|
||||
if fwdRule.NetworkTier == "" {
|
||||
@ -121,28 +130,39 @@ func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules ma
|
||||
// InsertFwdRuleHook mocks inserting a ForwardingRule. ForwardingRules are
|
||||
// expected to default to Premium tier if no NetworkTier is specified.
|
||||
func InsertFwdRuleHook(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "forwardingRules")
|
||||
return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionGA, projectID)
|
||||
}
|
||||
|
||||
// InsertBetaFwdRuleHook mocks inserting a BetaForwardingRule.
|
||||
func InsertBetaFwdRuleHook(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "forwardingRules")
|
||||
return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionBeta, projectID)
|
||||
}
|
||||
|
||||
// InsertAlphaFwdRuleHook mocks inserting an AlphaForwardingRule.
|
||||
func InsertAlphaFwdRuleHook(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionAlpha, "forwardingRules")
|
||||
return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionAlpha, projectID)
|
||||
}
|
||||
|
||||
// Used to assign Addresses with no IP a unique IP address
|
||||
var ipCounter = 1
|
||||
// AddressAttributes maps from Address key to a map of Instances
|
||||
type AddressAttributes struct {
|
||||
IPCounter int // Used to assign Addresses with no IP a unique IP address
|
||||
}
|
||||
|
||||
func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.Key]*cloud.MockAddressesObj, version meta.Version, projectID string) (bool, error) {
|
||||
func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.Key]*cloud.MockAddressesObj, version meta.Version, projectID string, addressAttrs AddressAttributes) (bool, error) {
|
||||
if !key.Valid() {
|
||||
return false, fmt.Errorf("invalid GCE key (%+v)", key)
|
||||
return true, fmt.Errorf("invalid GCE key (%+v)", key)
|
||||
}
|
||||
|
||||
if _, ok := mAddrs[*key]; ok {
|
||||
@ -150,16 +170,16 @@ func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.
|
||||
Code: http.StatusConflict,
|
||||
Message: fmt.Sprintf("MockAddresses %v exists", key),
|
||||
}
|
||||
return false, err
|
||||
return true, err
|
||||
}
|
||||
|
||||
enc, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return true, err
|
||||
}
|
||||
var addr alpha.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
return false, err
|
||||
return true, err
|
||||
}
|
||||
|
||||
// Set default address type if not present.
|
||||
@ -184,7 +204,7 @@ func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.
|
||||
errorCode = http.StatusBadRequest
|
||||
}
|
||||
|
||||
return false, &googleapi.Error{Code: errorCode, Message: msg}
|
||||
return true, &googleapi.Error{Code: errorCode, Message: msg}
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,8 +215,8 @@ func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.
|
||||
}
|
||||
|
||||
if addr.Address == "" {
|
||||
addr.Address = fmt.Sprintf("1.2.3.%d", ipCounter)
|
||||
ipCounter++
|
||||
addr.Address = fmt.Sprintf("1.2.3.%d", addressAttrs.IPCounter)
|
||||
addressAttrs.IPCounter++
|
||||
}
|
||||
|
||||
// Set the default values for the Alpha fields.
|
||||
@ -210,19 +230,323 @@ func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.
|
||||
|
||||
// InsertAddressHook mocks inserting an Address.
|
||||
func InsertAddressHook(ctx context.Context, key *meta.Key, obj *ga.Address, m *cloud.MockAddresses) (bool, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "addresses")
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionGA, projectID)
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionGA, projectID, m.X.(AddressAttributes))
|
||||
}
|
||||
|
||||
// InsertBetaAddressHook mocks inserting a BetaAddress.
|
||||
func InsertBetaAddressHook(ctx context.Context, key *meta.Key, obj *beta.Address, m *cloud.MockAddresses) (bool, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses")
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionBeta, projectID)
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionBeta, projectID, m.X.(AddressAttributes))
|
||||
}
|
||||
|
||||
// InsertAlphaAddressHook mocks inserting an Address. Addresses are expected to
|
||||
// default to Premium tier if no NetworkTier is specified.
|
||||
func InsertAlphaAddressHook(ctx context.Context, key *meta.Key, obj *alpha.Address, m *cloud.MockAlphaAddresses) (bool, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses")
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionAlpha, projectID)
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionAlpha, projectID, m.X.(AddressAttributes))
|
||||
}
|
||||
|
||||
// InstanceGroupAttributes maps from InstanceGroup key to a map of Instances
|
||||
type InstanceGroupAttributes struct {
|
||||
InstanceMap map[meta.Key]map[string]*ga.InstanceWithNamedPorts
|
||||
Lock *sync.Mutex
|
||||
}
|
||||
|
||||
// AddInstances adds a list of Instances passed by InstanceReference
|
||||
func (igAttrs *InstanceGroupAttributes) AddInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error {
|
||||
igAttrs.Lock.Lock()
|
||||
defer igAttrs.Lock.Unlock()
|
||||
|
||||
instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key]
|
||||
if !ok {
|
||||
instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts)
|
||||
}
|
||||
|
||||
for _, instance := range instanceRefs {
|
||||
iWithPort := &ga.InstanceWithNamedPorts{
|
||||
Instance: instance.Instance,
|
||||
}
|
||||
|
||||
instancesWithNamedPorts[instance.Instance] = iWithPort
|
||||
}
|
||||
|
||||
igAttrs.InstanceMap[*key] = instancesWithNamedPorts
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveInstances removes a list of Instances passed by InstanceReference
|
||||
func (igAttrs *InstanceGroupAttributes) RemoveInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error {
|
||||
igAttrs.Lock.Lock()
|
||||
defer igAttrs.Lock.Unlock()
|
||||
|
||||
instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key]
|
||||
if !ok {
|
||||
instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts)
|
||||
}
|
||||
|
||||
for _, instanceToRemove := range instanceRefs {
|
||||
if _, ok := instancesWithNamedPorts[instanceToRemove.Instance]; ok {
|
||||
delete(instancesWithNamedPorts, instanceToRemove.Instance)
|
||||
} else {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusBadRequest,
|
||||
Message: fmt.Sprintf("%s is not a member of %s", instanceToRemove.Instance, key.String()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
igAttrs.InstanceMap[*key] = instancesWithNamedPorts
|
||||
return nil
|
||||
}
|
||||
|
||||
// List gets a list of InstanceWithNamedPorts
|
||||
func (igAttrs *InstanceGroupAttributes) List(key *meta.Key) []*ga.InstanceWithNamedPorts {
|
||||
igAttrs.Lock.Lock()
|
||||
defer igAttrs.Lock.Unlock()
|
||||
|
||||
instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key]
|
||||
if !ok {
|
||||
instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts)
|
||||
}
|
||||
|
||||
var instanceList []*ga.InstanceWithNamedPorts
|
||||
for _, val := range instancesWithNamedPorts {
|
||||
instanceList = append(instanceList, val)
|
||||
}
|
||||
|
||||
return instanceList
|
||||
}
|
||||
|
||||
// AddInstancesHook mocks adding instances from an InstanceGroup
|
||||
func AddInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsAddInstancesRequest, m *cloud.MockInstanceGroups) error {
|
||||
_, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
var attrs InstanceGroupAttributes
|
||||
attrs = m.X.(InstanceGroupAttributes)
|
||||
attrs.AddInstances(key, req.Instances)
|
||||
m.X = attrs
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListInstancesHook mocks listing instances from an InstanceGroup
|
||||
func ListInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsListInstancesRequest, filter *filter.F, m *cloud.MockInstanceGroups) ([]*ga.InstanceWithNamedPorts, error) {
|
||||
_, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
var attrs InstanceGroupAttributes
|
||||
attrs = m.X.(InstanceGroupAttributes)
|
||||
instances := attrs.List(key)
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
// RemoveInstancesHook mocks removing instances from an InstanceGroup
|
||||
func RemoveInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsRemoveInstancesRequest, m *cloud.MockInstanceGroups) error {
|
||||
_, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
var attrs InstanceGroupAttributes
|
||||
attrs = m.X.(InstanceGroupAttributes)
|
||||
attrs.RemoveInstances(key, req.Instances)
|
||||
m.X = attrs
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFirewallHook defines the hook for updating a Firewall. It replaces the
|
||||
// object with the same key in the mock with the updated object.
|
||||
func UpdateFirewallHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error {
|
||||
_, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in Firewalls", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
obj.Name = key.Name
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "firewalls")
|
||||
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "firewalls", key)
|
||||
|
||||
m.Objects[*key] = &cloud.MockFirewallsObj{Obj: obj}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateHealthCheckHook defines the hook for updating a HealthCheck. It
|
||||
// replaces the object with the same key in the mock with the updated object.
|
||||
func UpdateHealthCheckHook(ctx context.Context, key *meta.Key, obj *ga.HealthCheck, m *cloud.MockHealthChecks) error {
|
||||
_, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in HealthChecks", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
obj.Name = key.Name
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "healthChecks")
|
||||
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "healthChecks", key)
|
||||
|
||||
m.Objects[*key] = &cloud.MockHealthChecksObj{Obj: obj}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRegionBackendServiceHook defines the hook for updating a Region
|
||||
// BackendsService. It replaces the object with the same key in the mock with
|
||||
// the updated object.
|
||||
func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockRegionBackendServices) error {
|
||||
_, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in RegionBackendServices", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
obj.Name = key.Name
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices")
|
||||
obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key)
|
||||
|
||||
m.Objects[*key] = &cloud.MockRegionBackendServicesObj{Obj: obj}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertFirewallsUnauthorizedErrHook mocks firewall insertion. A forbidden error will be thrown as return.
|
||||
func InsertFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) (bool, error) {
|
||||
return true, &googleapi.Error{Code: http.StatusForbidden}
|
||||
}
|
||||
|
||||
// UpdateFirewallsUnauthorizedErrHook mocks firewall updating. A forbidden error will be thrown as return.
|
||||
func UpdateFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error {
|
||||
return &googleapi.Error{Code: http.StatusForbidden}
|
||||
}
|
||||
|
||||
// DeleteFirewallsUnauthorizedErrHook mocks firewall deletion. A forbidden error will be thrown as return.
|
||||
func DeleteFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, m *cloud.MockFirewalls) (bool, error) {
|
||||
return true, &googleapi.Error{Code: http.StatusForbidden}
|
||||
}
|
||||
|
||||
// GetFirewallsUnauthorizedErrHook mocks firewall information retrival. A forbidden error will be thrown as return.
|
||||
func GetFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, m *cloud.MockFirewalls) (bool, *ga.Firewall, error) {
|
||||
return true, nil, &googleapi.Error{Code: http.StatusForbidden}
|
||||
}
|
||||
|
||||
// GetTargetPoolInternalErrHook mocks getting target pool. It returns a internal server error.
|
||||
func GetTargetPoolInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockTargetPools) (bool, *ga.TargetPool, error) {
|
||||
return true, nil, InternalServerError
|
||||
}
|
||||
|
||||
// GetForwardingRulesInternalErrHook mocks getting forwarding rules and returns an internal server error.
|
||||
func GetForwardingRulesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockForwardingRules) (bool, *ga.ForwardingRule, error) {
|
||||
return true, nil, InternalServerError
|
||||
}
|
||||
|
||||
// GetAddressesInternalErrHook mocks getting network address and returns an internal server error.
|
||||
func GetAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, *ga.Address, error) {
|
||||
return true, nil, InternalServerError
|
||||
}
|
||||
|
||||
// GetHTTPHealthChecksInternalErrHook mocks getting http health check and returns an internal server error.
|
||||
func GetHTTPHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHttpHealthChecks) (bool, *ga.HttpHealthCheck, error) {
|
||||
return true, nil, InternalServerError
|
||||
}
|
||||
|
||||
// InsertTargetPoolsInternalErrHook mocks getting target pool and returns an internal server error.
|
||||
func InsertTargetPoolsInternalErrHook(ctx context.Context, key *meta.Key, obj *ga.TargetPool, m *cloud.MockTargetPools) (bool, error) {
|
||||
return true, InternalServerError
|
||||
}
|
||||
|
||||
// InsertForwardingRulesInternalErrHook mocks getting forwarding rule and returns an internal server error.
|
||||
func InsertForwardingRulesInternalErrHook(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) {
|
||||
return true, InternalServerError
|
||||
}
|
||||
|
||||
// DeleteAddressesNotFoundErrHook mocks deleting network address and returns a not found error.
|
||||
func DeleteAddressesNotFoundErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, error) {
|
||||
return true, &googleapi.Error{Code: http.StatusNotFound}
|
||||
}
|
||||
|
||||
// DeleteAddressesInternalErrHook mocks deleting address and returns an internal server error.
|
||||
func DeleteAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, error) {
|
||||
return true, InternalServerError
|
||||
}
|
||||
|
||||
// GetRegionBackendServicesErrHook mocks getting region backend service and returns an internal server error.
|
||||
func GetRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, *ga.BackendService, error) {
|
||||
return true, nil, InternalServerError
|
||||
}
|
||||
|
||||
// UpdateRegionBackendServicesErrHook mocks updating a reegion backend service and returns an internal server error.
|
||||
func UpdateRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, svc *ga.BackendService, m *cloud.MockRegionBackendServices) error {
|
||||
return InternalServerError
|
||||
}
|
||||
|
||||
// DeleteRegionBackendServicesErrHook mocks deleting region backend service and returns an internal server error.
|
||||
func DeleteRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, error) {
|
||||
return true, InternalServerError
|
||||
}
|
||||
|
||||
// DeleteRegionBackendServicesInUseErrHook mocks deleting region backend service and returns an InUseError.
|
||||
func DeleteRegionBackendServicesInUseErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, error) {
|
||||
return true, InUseError
|
||||
}
|
||||
|
||||
// GetInstanceGroupInternalErrHook mocks getting instance group and returns an internal server error.
|
||||
func GetInstanceGroupInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockInstanceGroups) (bool, *ga.InstanceGroup, error) {
|
||||
return true, nil, InternalServerError
|
||||
}
|
||||
|
||||
// GetHealthChecksInternalErrHook mocks getting health check and returns an internal server erorr.
|
||||
func GetHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, *ga.HealthCheck, error) {
|
||||
return true, nil, InternalServerError
|
||||
}
|
||||
|
||||
// DeleteHealthChecksInternalErrHook mocks deleting health check and returns an internal server error.
|
||||
func DeleteHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, error) {
|
||||
return true, InternalServerError
|
||||
}
|
||||
|
||||
// DeleteHealthChecksInuseErrHook mocks deleting health check and returns an in use error.
|
||||
func DeleteHealthChecksInuseErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, error) {
|
||||
return true, InUseError
|
||||
}
|
||||
|
||||
// DeleteForwardingRuleErrHook mocks deleting forwarding rule and returns an internal server error.
|
||||
func DeleteForwardingRuleErrHook(ctx context.Context, key *meta.Key, m *cloud.MockForwardingRules) (bool, error) {
|
||||
return true, InternalServerError
|
||||
}
|
||||
|
||||
// ListZonesInternalErrHook mocks listing zone and returns an internal server error.
|
||||
func ListZonesInternalErrHook(ctx context.Context, fl *filter.F, m *cloud.MockZones) (bool, []*ga.Zone, error) {
|
||||
return true, []*ga.Zone{}, InternalServerError
|
||||
}
|
||||
|
||||
// DeleteInstanceGroupInternalErrHook mocks deleting instance group and returns an internal server error.
|
||||
func DeleteInstanceGroupInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockInstanceGroups) (bool, error) {
|
||||
return true, InternalServerError
|
||||
}
|
||||
|
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go
generated
vendored
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go
generated
vendored
@ -29,10 +29,17 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
const (
|
||||
operationStatusDone = "DONE"
|
||||
)
|
||||
|
||||
// operation is a GCE operation that can be watied on.
|
||||
type operation interface {
|
||||
// isDone queries GCE for the done status. This call can block.
|
||||
isDone(ctx context.Context) (bool, error)
|
||||
// error returns the resulting error of the operation. This may be nil if the operations
|
||||
// was successful.
|
||||
error() error
|
||||
// rateLimitKey returns the rate limit key to use for the given operation.
|
||||
// This rate limit will govern how fast the server will be polled for
|
||||
// operation completion status.
|
||||
@ -43,6 +50,7 @@ type gaOperation struct {
|
||||
s *Service
|
||||
projectID string
|
||||
key *meta.Key
|
||||
err error
|
||||
}
|
||||
|
||||
func (o *gaOperation) String() string {
|
||||
@ -71,7 +79,15 @@ func (o *gaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return op != nil && op.Status == "DONE", nil
|
||||
if op == nil || op.Status != operationStatusDone {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
|
||||
e := op.Error.Errors[0]
|
||||
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o *gaOperation) rateLimitKey() *RateLimitKey {
|
||||
@ -83,10 +99,15 @@ func (o *gaOperation) rateLimitKey() *RateLimitKey {
|
||||
}
|
||||
}
|
||||
|
||||
func (o *gaOperation) error() error {
|
||||
return o.err
|
||||
}
|
||||
|
||||
type alphaOperation struct {
|
||||
s *Service
|
||||
projectID string
|
||||
key *meta.Key
|
||||
err error
|
||||
}
|
||||
|
||||
func (o *alphaOperation) String() string {
|
||||
@ -115,7 +136,15 @@ func (o *alphaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return op != nil && op.Status == "DONE", nil
|
||||
if op == nil || op.Status != operationStatusDone {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
|
||||
e := op.Error.Errors[0]
|
||||
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o *alphaOperation) rateLimitKey() *RateLimitKey {
|
||||
@ -127,10 +156,15 @@ func (o *alphaOperation) rateLimitKey() *RateLimitKey {
|
||||
}
|
||||
}
|
||||
|
||||
func (o *alphaOperation) error() error {
|
||||
return o.err
|
||||
}
|
||||
|
||||
type betaOperation struct {
|
||||
s *Service
|
||||
projectID string
|
||||
key *meta.Key
|
||||
err error
|
||||
}
|
||||
|
||||
func (o *betaOperation) String() string {
|
||||
@ -159,7 +193,15 @@ func (o *betaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return op != nil && op.Status == "DONE", nil
|
||||
if op == nil || op.Status != operationStatusDone {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
|
||||
e := op.Error.Errors[0]
|
||||
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o *betaOperation) rateLimitKey() *RateLimitKey {
|
||||
@ -170,3 +212,7 @@ func (o *betaOperation) rateLimitKey() *RateLimitKey {
|
||||
Version: meta.VersionBeta,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *betaOperation) error() error {
|
||||
return o.err
|
||||
}
|
||||
|
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/ratelimit.go
generated
vendored
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/ratelimit.go
generated
vendored
@ -47,22 +47,60 @@ type RateLimiter interface {
|
||||
Accept(ctx context.Context, key *RateLimitKey) error
|
||||
}
|
||||
|
||||
// acceptor is an object which blocks within Accept until a call is allowed to run.
|
||||
// Accept is a behavior of the flowcontrol.RateLimiter interface.
|
||||
type acceptor interface {
|
||||
// Accept blocks until a call is allowed to run.
|
||||
Accept()
|
||||
}
|
||||
|
||||
// AcceptRateLimiter wraps an Acceptor with RateLimiter parameters.
|
||||
type AcceptRateLimiter struct {
|
||||
// Acceptor is the underlying rate limiter.
|
||||
Acceptor acceptor
|
||||
}
|
||||
|
||||
// Accept wraps an Acceptor and blocks on Accept or context.Done(). Key is ignored.
|
||||
func (rl *AcceptRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
rl.Acceptor.Accept()
|
||||
close(ch)
|
||||
}()
|
||||
select {
|
||||
case <-ch:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NopRateLimiter is a rate limiter that performs no rate limiting.
|
||||
type NopRateLimiter struct {
|
||||
}
|
||||
|
||||
// Accept the operation to be rate limited.
|
||||
// Accept everything immediately.
|
||||
func (*NopRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error {
|
||||
// Rate limit polling of the Operation status to avoid hammering GCE
|
||||
// for the status of an operation.
|
||||
const pollTime = time.Duration(1) * time.Second
|
||||
if key.Operation == "Get" && key.Service == "Operations" {
|
||||
select {
|
||||
case <-time.NewTimer(pollTime).C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MinimumRateLimiter wraps a RateLimiter and will only call its Accept until the minimum
|
||||
// duration has been met or the context is cancelled.
|
||||
type MinimumRateLimiter struct {
|
||||
// RateLimiter is the underlying ratelimiter which is called after the mininum time is reacehd.
|
||||
RateLimiter RateLimiter
|
||||
// Minimum is the minimum wait time before the underlying ratelimiter is called.
|
||||
Minimum time.Duration
|
||||
}
|
||||
|
||||
// Accept blocks on the minimum duration and context. Once the minimum duration is met,
|
||||
// the func is blocked on the underlying ratelimiter.
|
||||
func (m *MinimumRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error {
|
||||
select {
|
||||
case <-time.After(m.Minimum):
|
||||
return m.RateLimiter.Accept(ctx, key)
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/ratelimit_test.go
generated
vendored
Normal file
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/ratelimit_test.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FakeAcceptor struct{ accept func() }
|
||||
|
||||
func (f *FakeAcceptor) Accept() {
|
||||
f.accept()
|
||||
}
|
||||
|
||||
func TestAcceptRateLimiter(t *testing.T) {
|
||||
fa := &FakeAcceptor{accept: func() {}}
|
||||
arl := &AcceptRateLimiter{fa}
|
||||
err := arl.Accept(context.Background(), nil)
|
||||
if err != nil {
|
||||
t.Errorf("AcceptRateLimiter.Accept() = %v, want nil", err)
|
||||
}
|
||||
|
||||
// Use context that has been cancelled and expect a context error returned.
|
||||
ctxCancelled, cancelled := context.WithCancel(context.Background())
|
||||
cancelled()
|
||||
// Verify context is cancelled by now.
|
||||
<-ctxCancelled.Done()
|
||||
|
||||
fa.accept = func() { time.Sleep(1 * time.Second) }
|
||||
err = arl.Accept(ctxCancelled, nil)
|
||||
if err != ctxCancelled.Err() {
|
||||
t.Errorf("AcceptRateLimiter.Accept() = %v, want %v", err, ctxCancelled.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimumRateLimiter(t *testing.T) {
|
||||
fa := &FakeAcceptor{accept: func() {}}
|
||||
arl := &AcceptRateLimiter{fa}
|
||||
var called bool
|
||||
fa.accept = func() { called = true }
|
||||
m := &MinimumRateLimiter{RateLimiter: arl, Minimum: 10 * time.Millisecond}
|
||||
|
||||
err := m.Accept(context.Background(), nil)
|
||||
if err != nil {
|
||||
t.Errorf("MinimumRateLimiter.Accept = %v, want nil", err)
|
||||
}
|
||||
if !called {
|
||||
t.Errorf("`called` = false, want true")
|
||||
}
|
||||
|
||||
// Use context that has been cancelled and expect a context error returned.
|
||||
ctxCancelled, cancelled := context.WithCancel(context.Background())
|
||||
cancelled()
|
||||
// Verify context is cancelled by now.
|
||||
<-ctxCancelled.Done()
|
||||
called = false
|
||||
err = m.Accept(ctxCancelled, nil)
|
||||
if err != ctxCancelled.Err() {
|
||||
t.Errorf("AcceptRateLimiter.Accept() = %v, want %v", err, ctxCancelled.Err())
|
||||
}
|
||||
if called {
|
||||
t.Errorf("`called` = true, want false")
|
||||
}
|
||||
}
|
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go
generated
vendored
@ -45,19 +45,19 @@ func (s *Service) wrapOperation(anyOp interface{}) (operation, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &gaOperation{s, r.ProjectID, r.Key}, nil
|
||||
return &gaOperation{s: s, projectID: r.ProjectID, key: r.Key}, nil
|
||||
case *alpha.Operation:
|
||||
r, err := ParseResourceURL(o.SelfLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &alphaOperation{s, r.ProjectID, r.Key}, nil
|
||||
return &alphaOperation{s: s, projectID: r.ProjectID, key: r.Key}, nil
|
||||
case *beta.Operation:
|
||||
r, err := ParseResourceURL(o.SelfLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &betaOperation{s, r.ProjectID, r.Key}, nil
|
||||
return &betaOperation{s: s, projectID: r.ProjectID, key: r.Key}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid type %T", anyOp)
|
||||
}
|
||||
@ -72,14 +72,39 @@ func (s *Service) WaitForCompletion(ctx context.Context, genericOp interface{})
|
||||
glog.Errorf("wrapOperation(%+v) error: %v", genericOp, err)
|
||||
return err
|
||||
}
|
||||
for done, err := op.isDone(ctx); !done; done, err = op.isDone(ctx) {
|
||||
if err != nil {
|
||||
glog.V(4).Infof("op.isDone(%v) error; op = %v, err = %v", ctx, op, err)
|
||||
return err
|
||||
}
|
||||
glog.V(5).Infof("op.isDone(%v) waiting; op = %v", ctx, op)
|
||||
s.RateLimiter.Accept(ctx, op.rateLimitKey())
|
||||
}
|
||||
glog.V(5).Infof("op.isDone(%v) complete; op = %v", ctx, op)
|
||||
return nil
|
||||
|
||||
return s.pollOperation(ctx, op)
|
||||
}
|
||||
|
||||
// pollOperation calls operations.isDone until the function comes back true or context is Done.
|
||||
// If an error occurs retrieving the operation, the loop will continue until the context is done.
|
||||
// This is to prevent a transient error from bubbling up to controller-level logic.
|
||||
func (s *Service) pollOperation(ctx context.Context, op operation) error {
|
||||
var pollCount int
|
||||
for {
|
||||
// Check if context has been cancelled. Note that ctx.Done() must be checked before
|
||||
// returning ctx.Err().
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
glog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err())
|
||||
return ctx.Err()
|
||||
default:
|
||||
// ctx is not canceled, continue immediately
|
||||
}
|
||||
|
||||
pollCount++
|
||||
glog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount)
|
||||
s.RateLimiter.Accept(ctx, op.rateLimitKey())
|
||||
done, err := op.isDone(ctx)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err)
|
||||
}
|
||||
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error())
|
||||
return op.error()
|
||||
}
|
||||
|
84
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service_test.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service_test.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPollOperation(t *testing.T) {
|
||||
const totalAttempts = 10
|
||||
var attempts int
|
||||
fo := &fakeOperation{isDoneFunc: func(ctx context.Context) (bool, error) {
|
||||
attempts++
|
||||
if attempts < totalAttempts {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}}
|
||||
s := Service{RateLimiter: &NopRateLimiter{}}
|
||||
// Check that pollOperation will retry the operation multiple times.
|
||||
err := s.pollOperation(context.Background(), fo)
|
||||
if err != nil {
|
||||
t.Errorf("pollOperation() = %v, want nil", err)
|
||||
}
|
||||
if attempts != totalAttempts {
|
||||
t.Errorf("`attempts` = %d, want %d", attempts, totalAttempts)
|
||||
}
|
||||
|
||||
// Check that the operation's error is returned.
|
||||
fo.err = fmt.Errorf("test operation failed")
|
||||
err = s.pollOperation(context.Background(), fo)
|
||||
if err != fo.err {
|
||||
t.Errorf("pollOperation() = %v, want %v", err, fo.err)
|
||||
}
|
||||
fo.err = nil
|
||||
|
||||
fo.isDoneFunc = func(ctx context.Context) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
// Use context that has been cancelled and expect a context error returned.
|
||||
ctxCancelled, cancelled := context.WithCancel(context.Background())
|
||||
cancelled()
|
||||
// Verify context is cancelled by now.
|
||||
<-ctxCancelled.Done()
|
||||
// Check that pollOperation returns because the context is cancelled.
|
||||
err = s.pollOperation(ctxCancelled, fo)
|
||||
if err == nil {
|
||||
t.Errorf("pollOperation() = nil, want: %v", ctxCancelled.Err())
|
||||
}
|
||||
}
|
||||
|
||||
type fakeOperation struct {
|
||||
isDoneFunc func(ctx context.Context) (bool, error)
|
||||
err error
|
||||
rateKey *RateLimitKey
|
||||
}
|
||||
|
||||
func (f *fakeOperation) isDone(ctx context.Context) (bool, error) {
|
||||
return f.isDoneFunc(ctx)
|
||||
}
|
||||
|
||||
func (f *fakeOperation) error() error {
|
||||
return f.err
|
||||
}
|
||||
|
||||
func (f *fakeOperation) rateLimitKey() *RateLimitKey {
|
||||
return f.rateKey
|
||||
}
|
145
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils.go
generated
vendored
145
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils.go
generated
vendored
@ -25,9 +25,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
gaPrefix = "https://www.googleapis.com/compute/v1/"
|
||||
alphaPrefix = "https://www.googleapis.com/compute/alpha/"
|
||||
betaPrefix = "https://www.googleapis.com/compute/beta/"
|
||||
gaPrefix = "https://www.googleapis.com/compute/v1"
|
||||
alphaPrefix = "https://www.googleapis.com/compute/alpha"
|
||||
betaPrefix = "https://www.googleapis.com/compute/beta"
|
||||
)
|
||||
|
||||
// ResourceID identifies a GCE resource as parsed from compute resource URL.
|
||||
@ -51,8 +51,27 @@ func (r *ResourceID) Equal(other *ResourceID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// RelativeResourceName returns the relative resource name string
|
||||
// representing this ResourceID.
|
||||
func (r *ResourceID) RelativeResourceName() string {
|
||||
return RelativeResourceName(r.ProjectID, r.Resource, r.Key)
|
||||
}
|
||||
|
||||
// ResourcePath returns the resource path representing this ResourceID.
|
||||
func (r *ResourceID) ResourcePath() string {
|
||||
return ResourcePath(r.Resource, r.Key)
|
||||
}
|
||||
|
||||
func (r *ResourceID) SelfLink(ver meta.Version) string {
|
||||
return SelfLink(ver, r.ProjectID, r.Resource, r.Key)
|
||||
}
|
||||
|
||||
// ParseResourceURL parses resource URLs of the following formats:
|
||||
//
|
||||
// global/<res>/<name>
|
||||
// regions/<region>/<res>/<name>
|
||||
// zones/<zone>/<res>/<name>
|
||||
// projects/<proj>
|
||||
// projects/<proj>/global/<res>/<name>
|
||||
// projects/<proj>/regions/<region>/<res>/<name>
|
||||
// projects/<proj>/zones/<zone>/<res>/<name>
|
||||
@ -62,64 +81,63 @@ func (r *ResourceID) Equal(other *ResourceID) bool {
|
||||
func ParseResourceURL(url string) (*ResourceID, error) {
|
||||
errNotValid := fmt.Errorf("%q is not a valid resource URL", url)
|
||||
|
||||
// Remove the prefix up to ...projects/
|
||||
// Trim prefix off URL leaving "projects/..."
|
||||
projectsIndex := strings.Index(url, "/projects/")
|
||||
if projectsIndex >= 0 {
|
||||
url = url[projectsIndex+1:]
|
||||
}
|
||||
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 2 || parts[0] != "projects" {
|
||||
if len(parts) < 2 || len(parts) > 6 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
|
||||
ret := &ResourceID{ProjectID: parts[1]}
|
||||
if len(parts) == 2 {
|
||||
ret := &ResourceID{}
|
||||
scopedName := parts
|
||||
if parts[0] == "projects" {
|
||||
ret.Resource = "projects"
|
||||
return ret, nil
|
||||
}
|
||||
ret.ProjectID = parts[1]
|
||||
scopedName = parts[2:]
|
||||
|
||||
if len(parts) < 4 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
|
||||
if len(parts) == 4 {
|
||||
switch parts[2] {
|
||||
case "regions":
|
||||
ret.Resource = "regions"
|
||||
ret.Key = meta.GlobalKey(parts[3])
|
||||
if len(scopedName) == 0 {
|
||||
return ret, nil
|
||||
case "zones":
|
||||
ret.Resource = "zones"
|
||||
ret.Key = meta.GlobalKey(parts[3])
|
||||
}
|
||||
}
|
||||
|
||||
switch scopedName[0] {
|
||||
case "global":
|
||||
if len(scopedName) != 3 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
ret.Resource = scopedName[1]
|
||||
ret.Key = meta.GlobalKey(scopedName[2])
|
||||
return ret, nil
|
||||
case "regions":
|
||||
switch len(scopedName) {
|
||||
case 2:
|
||||
ret.Resource = "regions"
|
||||
ret.Key = meta.GlobalKey(scopedName[1])
|
||||
return ret, nil
|
||||
case 4:
|
||||
ret.Resource = scopedName[2]
|
||||
ret.Key = meta.RegionalKey(scopedName[3], scopedName[1])
|
||||
return ret, nil
|
||||
default:
|
||||
return nil, errNotValid
|
||||
}
|
||||
}
|
||||
|
||||
switch parts[2] {
|
||||
case "global":
|
||||
if len(parts) != 5 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
ret.Resource = parts[3]
|
||||
ret.Key = meta.GlobalKey(parts[4])
|
||||
return ret, nil
|
||||
case "regions":
|
||||
if len(parts) != 6 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
ret.Resource = parts[4]
|
||||
ret.Key = meta.RegionalKey(parts[5], parts[3])
|
||||
return ret, nil
|
||||
case "zones":
|
||||
if len(parts) != 6 {
|
||||
switch len(scopedName) {
|
||||
case 2:
|
||||
ret.Resource = "zones"
|
||||
ret.Key = meta.GlobalKey(scopedName[1])
|
||||
return ret, nil
|
||||
case 4:
|
||||
ret.Resource = scopedName[2]
|
||||
ret.Key = meta.ZonalKey(scopedName[3], scopedName[1])
|
||||
return ret, nil
|
||||
default:
|
||||
return nil, errNotValid
|
||||
}
|
||||
ret.Resource = parts[4]
|
||||
ret.Key = meta.ZonalKey(parts[5], parts[3])
|
||||
return ret, nil
|
||||
}
|
||||
return nil, errNotValid
|
||||
}
|
||||
@ -132,6 +150,38 @@ func copyViaJSON(dest, src interface{}) error {
|
||||
return json.Unmarshal(bytes, dest)
|
||||
}
|
||||
|
||||
// ResourcePath returns the path starting from the location.
|
||||
// Example: regions/us-central1/subnetworks/my-subnet
|
||||
func ResourcePath(resource string, key *meta.Key) string {
|
||||
switch resource {
|
||||
case "zones", "regions":
|
||||
return fmt.Sprintf("%s/%s", resource, key.Name)
|
||||
case "projects":
|
||||
return "invalid-resource"
|
||||
}
|
||||
|
||||
switch key.Type() {
|
||||
case meta.Zonal:
|
||||
return fmt.Sprintf("zones/%s/%s/%s", key.Zone, resource, key.Name)
|
||||
case meta.Regional:
|
||||
return fmt.Sprintf("regions/%s/%s/%s", key.Region, resource, key.Name)
|
||||
case meta.Global:
|
||||
return fmt.Sprintf("global/%s/%s", resource, key.Name)
|
||||
}
|
||||
return "invalid-key-type"
|
||||
}
|
||||
|
||||
// RelativeResourceName returns the path starting from project.
|
||||
// Example: projects/my-project/regions/us-central1/subnetworks/my-subnet
|
||||
func RelativeResourceName(project, resource string, key *meta.Key) string {
|
||||
switch resource {
|
||||
case "projects":
|
||||
return fmt.Sprintf("projects/%s", project)
|
||||
default:
|
||||
return fmt.Sprintf("projects/%s/%s", project, ResourcePath(resource, key))
|
||||
}
|
||||
}
|
||||
|
||||
// SelfLink returns the self link URL for the given object.
|
||||
func SelfLink(ver meta.Version, project, resource string, key *meta.Key) string {
|
||||
var prefix string
|
||||
@ -146,13 +196,6 @@ func SelfLink(ver meta.Version, project, resource string, key *meta.Key) string
|
||||
prefix = "invalid-prefix"
|
||||
}
|
||||
|
||||
switch key.Type() {
|
||||
case meta.Zonal:
|
||||
return fmt.Sprintf("%sprojects/%s/zones/%s/%s/%s", prefix, project, key.Zone, resource, key.Name)
|
||||
case meta.Regional:
|
||||
return fmt.Sprintf("%sprojects/%s/regions/%s/%s/%s", prefix, project, key.Region, resource, key.Name)
|
||||
case meta.Global:
|
||||
return fmt.Sprintf("%sprojects/%s/%s/%s", prefix, project, resource, key.Name)
|
||||
}
|
||||
return "invalid-self-link"
|
||||
return fmt.Sprintf("%s/%s", prefix, RelativeResourceName(project, resource, key))
|
||||
|
||||
}
|
||||
|
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils_test.go
generated
vendored
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils_test.go
generated
vendored
@ -23,6 +23,54 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
func TestEqualResourceID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
a *ResourceID
|
||||
b *ResourceID
|
||||
}{
|
||||
{
|
||||
a: &ResourceID{"some-gce-project", "projects", nil},
|
||||
b: &ResourceID{"some-gce-project", "projects", nil},
|
||||
},
|
||||
{
|
||||
a: &ResourceID{"", "networks", meta.GlobalKey("my-net")},
|
||||
b: &ResourceID{"", "networks", meta.GlobalKey("my-net")},
|
||||
},
|
||||
{
|
||||
a: &ResourceID{"some-gce-project", "projects", meta.GlobalKey("us-central1")},
|
||||
b: &ResourceID{"some-gce-project", "projects", meta.GlobalKey("us-central1")},
|
||||
},
|
||||
} {
|
||||
if !tc.a.Equal(tc.b) {
|
||||
t.Errorf("%v.Equal(%v) = false, want true", tc.a, tc.b)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
a *ResourceID
|
||||
b *ResourceID
|
||||
}{
|
||||
{
|
||||
a: &ResourceID{"some-gce-project", "projects", nil},
|
||||
b: &ResourceID{"some-other-project", "projects", nil},
|
||||
},
|
||||
{
|
||||
a: &ResourceID{"some-gce-project", "projects", nil},
|
||||
b: &ResourceID{"some-gce-project", "projects", meta.GlobalKey("us-central1")},
|
||||
},
|
||||
{
|
||||
a: &ResourceID{"some-gce-project", "networks", meta.GlobalKey("us-central1")},
|
||||
b: &ResourceID{"some-gce-project", "projects", meta.GlobalKey("us-central1")},
|
||||
},
|
||||
} {
|
||||
if tc.a.Equal(tc.b) {
|
||||
t.Errorf("%v.Equal(%v) = true, want false", tc.a, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseResourceURL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -90,6 +138,18 @@ func TestParseResourceURL(t *testing.T) {
|
||||
"projects/some-gce-project/zones/us-central1-c/instances/instance-1",
|
||||
&ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "us-central1-c")},
|
||||
},
|
||||
{
|
||||
"global/networks/my-network",
|
||||
&ResourceID{"", "networks", meta.GlobalKey("my-network")},
|
||||
},
|
||||
{
|
||||
"regions/us-central1/subnetworks/my-subnet",
|
||||
&ResourceID{"", "subnetworks", meta.RegionalKey("my-subnet", "us-central1")},
|
||||
},
|
||||
{
|
||||
"zones/us-central1-c/instances/instance-1",
|
||||
&ResourceID{"", "instances", meta.ZonalKey("instance-1", "us-central1-c")},
|
||||
},
|
||||
} {
|
||||
r, err := ParseResourceURL(tc.in)
|
||||
if err != nil {
|
||||
@ -112,7 +172,9 @@ func TestParseResourceURL(t *testing.T) {
|
||||
"/a/b/c/d/e/f",
|
||||
"https://www.googleapis.com/compute/v1/projects/some-gce-project/global",
|
||||
"projects/some-gce-project/global",
|
||||
"projects/some-gce-project/global/foo",
|
||||
"projects/some-gce-project/global/foo/bar/baz",
|
||||
"projects/some-gce-project/regions/us-central1/res",
|
||||
"projects/some-gce-project/zones/us-central1-c/res",
|
||||
"projects/some-gce-project/zones/us-central1-c/res/name/extra",
|
||||
} {
|
||||
@ -198,7 +260,28 @@ func TestSelfLink(t *testing.T) {
|
||||
"proj4",
|
||||
"urlMaps",
|
||||
meta.GlobalKey("key3"),
|
||||
"https://www.googleapis.com/compute/v1/projects/proj4/urlMaps/key3",
|
||||
"https://www.googleapis.com/compute/v1/projects/proj4/global/urlMaps/key3",
|
||||
},
|
||||
{
|
||||
meta.VersionGA,
|
||||
"proj4",
|
||||
"projects",
|
||||
nil,
|
||||
"https://www.googleapis.com/compute/v1/projects/proj4",
|
||||
},
|
||||
{
|
||||
meta.VersionGA,
|
||||
"proj4",
|
||||
"regions",
|
||||
meta.GlobalKey("us-central1"),
|
||||
"https://www.googleapis.com/compute/v1/projects/proj4/regions/us-central1",
|
||||
},
|
||||
{
|
||||
meta.VersionGA,
|
||||
"proj4",
|
||||
"zones",
|
||||
meta.GlobalKey("us-central1-a"),
|
||||
"https://www.googleapis.com/compute/v1/projects/proj4/zones/us-central1-a",
|
||||
},
|
||||
} {
|
||||
if link := SelfLink(tc.ver, tc.project, tc.resource, tc.key); link != tc.want {
|
||||
|
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
@ -68,7 +68,7 @@ const (
|
||||
// AffinityTypeClientIPProto - affinity based on Client IP and port.
|
||||
gceAffinityTypeClientIPProto = "CLIENT_IP_PROTO"
|
||||
|
||||
operationPollInterval = 3 * time.Second
|
||||
operationPollInterval = time.Second
|
||||
// Creating Route in very large clusters, may take more than half an hour.
|
||||
operationPollTimeoutDuration = time.Hour
|
||||
|
||||
@ -88,8 +88,8 @@ const (
|
||||
// Defaults to 5 * 2 = 10 seconds before the LB will steer traffic away
|
||||
gceHcUnhealthyThreshold = int64(5)
|
||||
|
||||
gceComputeAPIEndpoint = "https://www.googleapis.com/compute/v1/"
|
||||
gceComputeAPIEndpointAlpha = "https://www.googleapis.com/compute/alpha/"
|
||||
gceComputeAPIEndpoint = "https://www.googleapis.com/compute/v1/"
|
||||
gceComputeAPIEndpointBeta = "https://www.googleapis.com/compute/beta/"
|
||||
)
|
||||
|
||||
// gceObject is an abstraction of all GCE API object in go client
|
||||
@ -152,6 +152,9 @@ type GCECloud struct {
|
||||
|
||||
// New code generated interface to the GCE compute library.
|
||||
c cloud.Cloud
|
||||
|
||||
// Keep a reference of this around so we can inject a new cloud.RateLimiter implementation.
|
||||
s *cloud.Service
|
||||
}
|
||||
|
||||
// TODO: replace gcfg with json
|
||||
@ -269,9 +272,7 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
|
||||
// By default, fetch token from GCE metadata server
|
||||
cloudConfig.TokenSource = google.ComputeTokenSource("")
|
||||
cloudConfig.UseMetadataServer = true
|
||||
|
||||
featureMap := make(map[string]bool)
|
||||
cloudConfig.AlphaFeatureGate = &AlphaFeatureGate{featureMap}
|
||||
cloudConfig.AlphaFeatureGate = NewAlphaFeatureGate([]string{})
|
||||
if configFile != nil {
|
||||
if configFile.Global.ApiEndpoint != "" {
|
||||
cloudConfig.ApiEndpoint = configFile.Global.ApiEndpoint
|
||||
@ -289,19 +290,7 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
|
||||
|
||||
cloudConfig.NodeTags = configFile.Global.NodeTags
|
||||
cloudConfig.NodeInstancePrefix = configFile.Global.NodeInstancePrefix
|
||||
|
||||
alphaFeatureGate, err := NewAlphaFeatureGate(configFile.Global.AlphaFeatures)
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for creating alpha feature gate: %v", err)
|
||||
}
|
||||
cloudConfig.AlphaFeatureGate = alphaFeatureGate
|
||||
} else {
|
||||
// initialize AlphaFeatureGate when no AlphaFeatures are configured.
|
||||
alphaFeatureGate, err := NewAlphaFeatureGate([]string{})
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for initializing alpha feature gate: %v", err)
|
||||
}
|
||||
cloudConfig.AlphaFeatureGate = alphaFeatureGate
|
||||
cloudConfig.AlphaFeatureGate = NewAlphaFeatureGate(configFile.Global.AlphaFeatures)
|
||||
}
|
||||
|
||||
// retrieve projectID and zone
|
||||
@ -495,7 +484,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
glog.Infof("managing multiple zones: %v", config.ManagedZones)
|
||||
}
|
||||
|
||||
operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(10, 100) // 10 qps, 100 bucket size.
|
||||
operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(5, 5) // 5 qps, 5 burst.
|
||||
|
||||
gce := &GCECloud{
|
||||
service: service,
|
||||
@ -522,17 +511,27 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
}
|
||||
|
||||
gce.manager = &gceServiceManager{gce}
|
||||
gce.c = cloud.NewGCE(&cloud.Service{
|
||||
gce.s = &cloud.Service{
|
||||
GA: service,
|
||||
Alpha: serviceAlpha,
|
||||
Beta: serviceBeta,
|
||||
ProjectRouter: &gceProjectRouter{gce},
|
||||
RateLimiter: &gceRateLimiter{gce},
|
||||
})
|
||||
}
|
||||
gce.c = cloud.NewGCE(gce.s)
|
||||
|
||||
return gce, nil
|
||||
}
|
||||
|
||||
// SetRateLimiter adds a custom cloud.RateLimiter implementation.
|
||||
// WARNING: Calling this could have unexpected behavior if you have in-flight
|
||||
// requests. It is best to use this immediately after creating a GCECloud.
|
||||
func (g *GCECloud) SetRateLimiter(rl cloud.RateLimiter) {
|
||||
if rl != nil {
|
||||
g.s.RateLimiter = rl
|
||||
}
|
||||
}
|
||||
|
||||
// determineSubnetURL queries for all subnetworks in a region for a given network and returns
|
||||
// the URL of the subnetwork which exists in the auto-subnet range.
|
||||
func determineSubnetURL(service *compute.Service, networkProjectID, networkName, region string) (string, error) {
|
||||
@ -592,7 +591,7 @@ func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder
|
||||
|
||||
if gce.OnXPN() {
|
||||
gce.eventBroadcaster = record.NewBroadcaster()
|
||||
gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.CoreV1().RESTClient()).Events("")})
|
||||
gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gce.client.CoreV1().Events("")})
|
||||
gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"})
|
||||
}
|
||||
|
||||
@ -878,10 +877,10 @@ func (manager *gceServiceManager) getProjectsAPIEndpoint() string {
|
||||
return projectsApiEndpoint
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getProjectsAPIEndpointAlpha() string {
|
||||
projectsApiEndpoint := gceComputeAPIEndpointAlpha + "projects/"
|
||||
func (manager *gceServiceManager) getProjectsAPIEndpointBeta() string {
|
||||
projectsApiEndpoint := gceComputeAPIEndpointBeta + "projects/"
|
||||
if manager.gce.service != nil {
|
||||
projectsApiEndpoint = manager.gce.serviceAlpha.BasePath
|
||||
projectsApiEndpoint = manager.gce.serviceBeta.BasePath
|
||||
}
|
||||
|
||||
return projectsApiEndpoint
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager_test.go
generated
vendored
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager_test.go
generated
vendored
@ -26,95 +26,104 @@ import (
|
||||
)
|
||||
|
||||
const testSvcName = "my-service"
|
||||
const testRegion = "us-central1"
|
||||
const testSubnet = "/projects/x/testRegions/us-central1/testSubnetworks/customsub"
|
||||
const testLBName = "a111111111111111"
|
||||
|
||||
var vals = DefaultTestClusterValues()
|
||||
|
||||
// TestAddressManagerNoRequestedIP tests the typical case of passing in no requested IP
|
||||
func TestAddressManagerNoRequestedIP(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
svc, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
targetIP := ""
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, vals.Region)
|
||||
}
|
||||
|
||||
// TestAddressManagerBasic tests the typical case of reserving and unreserving an address.
|
||||
func TestAddressManagerBasic(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
svc, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, vals.Region)
|
||||
}
|
||||
|
||||
// TestAddressManagerOrphaned tests the case where the address exists with the IP being equal
|
||||
// to the requested address (forwarding rule or loadbalancer IP).
|
||||
func TestAddressManagerOrphaned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
svc, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &compute.Address{Name: testLBName, Address: targetIP, AddressType: string(cloud.SchemeInternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
err = svc.ReserveRegionAddress(addr, vals.Region)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, vals.Region)
|
||||
}
|
||||
|
||||
// TestAddressManagerOutdatedOrphan tests the case where an address exists but points to
|
||||
// an IP other than the forwarding rule or loadbalancer IP.
|
||||
func TestAddressManagerOutdatedOrphan(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
svc, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
previousAddress := "1.1.0.0"
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &compute.Address{Name: testLBName, Address: previousAddress, AddressType: string(cloud.SchemeExternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
err = svc.ReserveRegionAddress(addr, vals.Region)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, vals.Region)
|
||||
}
|
||||
|
||||
// TestAddressManagerExternallyOwned tests the case where the address exists but isn't
|
||||
// owned by the controller.
|
||||
func TestAddressManagerExternallyOwned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
svc, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(cloud.SchemeInternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
err = svc.ReserveRegionAddress(addr, vals.Region)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
ipToUse, err := mgr.HoldAddress()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, ipToUse)
|
||||
|
||||
_, err = svc.GetRegionAddress(testLBName, testRegion)
|
||||
ad, err := svc.GetRegionAddress(testLBName, vals.Region)
|
||||
assert.True(t, isNotFound(err))
|
||||
require.Nil(t, ad)
|
||||
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
testReleaseAddress(t, mgr, svc, testLBName, vals.Region)
|
||||
}
|
||||
|
||||
// TestAddressManagerExternallyOwned tests the case where the address exists but isn't
|
||||
// owned by the controller. However, this address has the wrong type.
|
||||
func TestAddressManagerBadExternallyOwned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
svc, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(cloud.SchemeExternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
err = svc.ReserveRegionAddress(addr, vals.Region)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
_, err = mgr.HoldAddress()
|
||||
assert.NotNil(t, err)
|
||||
mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
ad, err := mgr.HoldAddress()
|
||||
assert.NotNil(t, err) // FIXME
|
||||
require.Equal(t, ad, "")
|
||||
}
|
||||
|
||||
func testHoldAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, name, region, targetIP, scheme string) {
|
||||
|
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go
generated
vendored
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go
generated
vendored
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -44,72 +43,105 @@ func newAddressMetricContextWithVersion(request, region, version string) *metric
|
||||
// ipAddress is specified, it must belong to the current project, eg: an
|
||||
// ephemeral IP associated with a global forwarding rule.
|
||||
func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("reserve", "")
|
||||
return mc.Observe(gce.c.GlobalAddresses().Insert(context.Background(), meta.GlobalKey(addr.Name), addr))
|
||||
return mc.Observe(gce.c.GlobalAddresses().Insert(ctx, meta.GlobalKey(addr.Name), addr))
|
||||
}
|
||||
|
||||
// DeleteGlobalAddress deletes a global address by name.
|
||||
func (gce *GCECloud) DeleteGlobalAddress(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("delete", "")
|
||||
return mc.Observe(gce.c.GlobalAddresses().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.GlobalAddresses().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// GetGlobalAddress returns the global address by name.
|
||||
func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("get", "")
|
||||
v, err := gce.c.GlobalAddresses().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.GlobalAddresses().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ReserveRegionAddress creates a region address
|
||||
func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("reserve", region)
|
||||
return mc.Observe(gce.c.Addresses().Insert(context.Background(), meta.RegionalKey(addr.Name, region), addr))
|
||||
return mc.Observe(gce.c.Addresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
|
||||
}
|
||||
|
||||
// ReserveAlphaRegionAddress creates an Alpha, regional address.
|
||||
func (gce *GCECloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("reserve", region)
|
||||
return mc.Observe(gce.c.AlphaAddresses().Insert(context.Background(), meta.RegionalKey(addr.Name, region), addr))
|
||||
return mc.Observe(gce.c.AlphaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
|
||||
}
|
||||
|
||||
// ReserveBetaRegionAddress creates a beta region address
|
||||
func (gce *GCECloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("reserve", region)
|
||||
return mc.Observe(gce.c.BetaAddresses().Insert(context.Background(), meta.RegionalKey(addr.Name, region), addr))
|
||||
return mc.Observe(gce.c.BetaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr))
|
||||
}
|
||||
|
||||
// DeleteRegionAddress deletes a region address by name.
|
||||
func (gce *GCECloud) DeleteRegionAddress(name, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("delete", region)
|
||||
return mc.Observe(gce.c.Addresses().Delete(context.Background(), meta.RegionalKey(name, region)))
|
||||
return mc.Observe(gce.c.Addresses().Delete(ctx, meta.RegionalKey(name, region)))
|
||||
}
|
||||
|
||||
// GetRegionAddress returns the region address by name
|
||||
func (gce *GCECloud) GetRegionAddress(name, region string) (*compute.Address, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("get", region)
|
||||
v, err := gce.c.Addresses().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
v, err := gce.c.Addresses().Get(ctx, meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaRegionAddress returns the Alpha, regional address by name.
|
||||
func (gce *GCECloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("get", region)
|
||||
v, err := gce.c.AlphaAddresses().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
v, err := gce.c.AlphaAddresses().Get(ctx, meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetBetaRegionAddress returns the beta region address by name
|
||||
func (gce *GCECloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("get", region)
|
||||
v, err := gce.c.BetaAddresses().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
v, err := gce.c.BetaAddresses().Get(ctx, meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionAddressByIP returns the regional address matching the given IP address.
|
||||
func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("list", region)
|
||||
addrs, err := gce.c.Addresses().List(context.Background(), region, filter.Regexp("address", ipAddress))
|
||||
addrs, err := gce.c.Addresses().List(ctx, region, filter.Regexp("address", ipAddress))
|
||||
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
@ -129,8 +161,11 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad
|
||||
|
||||
// GetBetaRegionAddressByIP returns the beta regional address matching the given IP address.
|
||||
func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newAddressMetricContext("list", region)
|
||||
addrs, err := gce.c.BetaAddresses().List(context.Background(), region, filter.Regexp("address", ipAddress))
|
||||
addrs, err := gce.c.BetaAddresses().List(ctx, region, filter.Regexp("address", ipAddress))
|
||||
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
|
239
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go
generated
vendored
239
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go
generated
vendored
@ -1,239 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
)
|
||||
|
||||
// test
|
||||
|
||||
type FakeCloudAddressService struct {
|
||||
count int
|
||||
// reservedAddrs tracks usage of IP addresses
|
||||
// Key is the IP address as a string
|
||||
reservedAddrs map[string]bool
|
||||
// addrsByRegionAndName
|
||||
// Outer key is for region string; inner key is for address name.
|
||||
addrsByRegionAndName map[string]map[string]*computealpha.Address
|
||||
}
|
||||
|
||||
// FakeCloudAddressService Implements CloudAddressService
|
||||
var _ CloudAddressService = &FakeCloudAddressService{}
|
||||
|
||||
func NewFakeCloudAddressService() *FakeCloudAddressService {
|
||||
return &FakeCloudAddressService{
|
||||
reservedAddrs: make(map[string]bool),
|
||||
addrsByRegionAndName: make(map[string]map[string]*computealpha.Address),
|
||||
}
|
||||
}
|
||||
|
||||
// SetRegionalAddresses sets the addresses of there region. This is used for
|
||||
// setting the test environment.
|
||||
func (cas *FakeCloudAddressService) SetRegionalAddresses(region string, addrs []*computealpha.Address) {
|
||||
// Reset addresses in the region.
|
||||
cas.addrsByRegionAndName[region] = make(map[string]*computealpha.Address)
|
||||
|
||||
for _, addr := range addrs {
|
||||
cas.reservedAddrs[addr.Address] = true
|
||||
cas.addrsByRegionAndName[region][addr.Name] = addr
|
||||
}
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error {
|
||||
if addr.Address == "" {
|
||||
addr.Address = fmt.Sprintf("1.2.3.%d", cas.count)
|
||||
cas.count++
|
||||
}
|
||||
|
||||
if addr.AddressType == "" {
|
||||
addr.AddressType = string(cloud.SchemeExternal)
|
||||
}
|
||||
|
||||
if cas.reservedAddrs[addr.Address] {
|
||||
msg := "IP in use"
|
||||
// When the IP is already in use, this call returns an error code based
|
||||
// on the type (internal vs external) of the address. This is to be
|
||||
// consistent with actual GCE API.
|
||||
switch cloud.LbScheme(addr.AddressType) {
|
||||
case cloud.SchemeExternal:
|
||||
return makeGoogleAPIError(http.StatusBadRequest, msg)
|
||||
default:
|
||||
return makeGoogleAPIError(http.StatusConflict, msg)
|
||||
}
|
||||
}
|
||||
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
cas.addrsByRegionAndName[region] = make(map[string]*computealpha.Address)
|
||||
}
|
||||
|
||||
if _, exists := cas.addrsByRegionAndName[region][addr.Name]; exists {
|
||||
return makeGoogleAPIError(http.StatusConflict, "name in use")
|
||||
}
|
||||
|
||||
cas.addrsByRegionAndName[region][addr.Name] = addr
|
||||
cas.reservedAddrs[addr.Address] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error {
|
||||
alphaAddr := convertToAlphaAddress(addr)
|
||||
return cas.ReserveAlphaRegionAddress(alphaAddr, region)
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) ReserveRegionAddress(addr *compute.Address, region string) error {
|
||||
alphaAddr := convertToAlphaAddress(addr)
|
||||
return cas.ReserveAlphaRegionAddress(alphaAddr, region)
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) {
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
if addr, exists := cas.addrsByRegionAndName[region][name]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
} else {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddress(name, region)
|
||||
if addr != nil {
|
||||
return convertToBetaAddress(addr), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetRegionAddress(name, region string) (*compute.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddress(name, region)
|
||||
if addr != nil {
|
||||
return convertToV1Address(addr), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) DeleteRegionAddress(name, region string) error {
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
return makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
addr, exists := cas.addrsByRegionAndName[region][name]
|
||||
if !exists {
|
||||
return makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
delete(cas.reservedAddrs, addr.Address)
|
||||
delete(cas.addrsByRegionAndName[region], name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetAlphaRegionAddressByIP(region, ipAddress string) (*computealpha.Address, error) {
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
for _, addr := range cas.addrsByRegionAndName[region] {
|
||||
if addr.Address == ipAddress {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetBetaRegionAddressByIP(name, region string) (*computebeta.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddressByIP(name, region)
|
||||
if addr != nil {
|
||||
return convertToBetaAddress(addr), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetRegionAddressByIP(name, region string) (*compute.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddressByIP(name, region)
|
||||
if addr != nil {
|
||||
return convertToV1Address(addr), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) getNetworkTierFromAddress(name, region string) (string, error) {
|
||||
addr, err := cas.GetAlphaRegionAddress(name, region)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return addr.NetworkTier, nil
|
||||
}
|
||||
|
||||
func convertToV1Address(object gceObject) *compute.Address {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var addr compute.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to v1 address: %v", object, err))
|
||||
}
|
||||
return &addr
|
||||
}
|
||||
|
||||
func convertToAlphaAddress(object gceObject) *computealpha.Address {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var addr computealpha.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to alpha address: %v", object, err))
|
||||
}
|
||||
// Set the default values for the Alpha fields.
|
||||
addr.NetworkTier = cloud.NetworkTierDefault.ToGCEValue()
|
||||
return &addr
|
||||
}
|
||||
|
||||
func convertToBetaAddress(object gceObject) *computebeta.Address {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var addr computebeta.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to beta address: %v", object, err))
|
||||
}
|
||||
return &addr
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) String() string {
|
||||
var b bytes.Buffer
|
||||
for region, regAddresses := range cas.addrsByRegionAndName {
|
||||
b.WriteString(fmt.Sprintf("%v:\n", region))
|
||||
for name, addr := range regAddresses {
|
||||
b.WriteString(fmt.Sprintf(" %v: %v\n", name, addr.Address))
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
@ -18,8 +18,6 @@ package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -32,12 +30,6 @@ const (
|
||||
AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup"
|
||||
)
|
||||
|
||||
// All known alpha features
|
||||
var knownAlphaFeatures = map[string]bool{
|
||||
AlphaFeatureNetworkTiers: true,
|
||||
AlphaFeatureNetworkEndpointGroup: true,
|
||||
}
|
||||
|
||||
type AlphaFeatureGate struct {
|
||||
features map[string]bool
|
||||
}
|
||||
@ -46,17 +38,12 @@ func (af *AlphaFeatureGate) Enabled(key string) bool {
|
||||
return af.features[key]
|
||||
}
|
||||
|
||||
func NewAlphaFeatureGate(features []string) (*AlphaFeatureGate, error) {
|
||||
errList := []error{}
|
||||
func NewAlphaFeatureGate(features []string) *AlphaFeatureGate {
|
||||
featureMap := make(map[string]bool)
|
||||
for _, name := range features {
|
||||
if _, ok := knownAlphaFeatures[name]; !ok {
|
||||
errList = append(errList, fmt.Errorf("alpha feature %q is not supported.", name))
|
||||
} else {
|
||||
featureMap[name] = true
|
||||
}
|
||||
featureMap[name] = true
|
||||
}
|
||||
return &AlphaFeatureGate{featureMap}, utilerrors.NewAggregate(errList)
|
||||
return &AlphaFeatureGate{featureMap}
|
||||
}
|
||||
|
||||
func (gce *GCECloud) alphaFeatureEnabled(feature string) error {
|
||||
|
109
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
109
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
@ -17,11 +17,11 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@ -36,54 +36,88 @@ func newBackendServiceMetricContextWithVersion(request, region, version string)
|
||||
|
||||
// GetGlobalBackendService retrieves a backend by name.
|
||||
func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendService, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("get", "")
|
||||
v, err := gce.c.BackendServices().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.BackendServices().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetBetaGlobalBackendService retrieves beta backend by name.
|
||||
func (gce *GCECloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContextWithVersion("get", "", computeBetaVersion)
|
||||
v, err := gce.c.BetaBackendServices().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaGlobalBackendService retrieves alpha backend by name.
|
||||
func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion)
|
||||
v, err := gce.c.AlphaBackendServices().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.AlphaBackendServices().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateGlobalBackendService applies the given BackendService as an update to
|
||||
// an existing service.
|
||||
func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("update", "")
|
||||
return mc.Observe(gce.c.BackendServices().Update(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
return mc.Observe(gce.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// UpdateAlphaGlobalBackendService applies the given alpha BackendService as an
|
||||
// update to an existing service.
|
||||
func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("update", "")
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Update(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// DeleteGlobalBackendService deletes the given BackendService by name.
|
||||
func (gce *GCECloud) DeleteGlobalBackendService(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("delete", "")
|
||||
return mc.Observe(gce.c.BackendServices().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.BackendServices().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// CreateGlobalBackendService creates the given BackendService.
|
||||
func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("create", "")
|
||||
return mc.Observe(gce.c.BackendServices().Insert(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
return mc.Observe(gce.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// CreateAlphaGlobalBackendService creates the given alpha BackendService.
|
||||
func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("create", "")
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Insert(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// ListGlobalBackendServices lists all backend services in the project.
|
||||
func (gce *GCECloud) ListGlobalBackendServices() ([]*compute.BackendService, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("list", "")
|
||||
v, err := gce.c.BackendServices().List(context.Background(), filter.None)
|
||||
v, err := gce.c.BackendServices().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
@ -91,42 +125,60 @@ func (gce *GCECloud) ListGlobalBackendServices() ([]*compute.BackendService, err
|
||||
// identified by the given name, in the given instanceGroup. The
|
||||
// instanceGroupLink is the fully qualified self link of an instance group.
|
||||
func (gce *GCECloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("get_health", "")
|
||||
groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink}
|
||||
v, err := gce.c.BackendServices().GetHealth(context.Background(), meta.GlobalKey(name), groupRef)
|
||||
v, err := gce.c.BackendServices().GetHealth(ctx, meta.GlobalKey(name), groupRef)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionBackendService retrieves a backend by name.
|
||||
func (gce *GCECloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("get", region)
|
||||
v, err := gce.c.RegionBackendServices().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
v, err := gce.c.RegionBackendServices().Get(ctx, meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateRegionBackendService applies the given BackendService as an update to
|
||||
// an existing service.
|
||||
func (gce *GCECloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("update", region)
|
||||
return mc.Observe(gce.c.RegionBackendServices().Update(context.Background(), meta.RegionalKey(bg.Name, region), bg))
|
||||
return mc.Observe(gce.c.RegionBackendServices().Update(ctx, meta.RegionalKey(bg.Name, region), bg))
|
||||
}
|
||||
|
||||
// DeleteRegionBackendService deletes the given BackendService by name.
|
||||
func (gce *GCECloud) DeleteRegionBackendService(name, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("delete", region)
|
||||
return mc.Observe(gce.c.RegionBackendServices().Delete(context.Background(), meta.RegionalKey(name, region)))
|
||||
return mc.Observe(gce.c.RegionBackendServices().Delete(ctx, meta.RegionalKey(name, region)))
|
||||
}
|
||||
|
||||
// CreateRegionBackendService creates the given BackendService.
|
||||
func (gce *GCECloud) CreateRegionBackendService(bg *compute.BackendService, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("create", region)
|
||||
return mc.Observe(gce.c.RegionBackendServices().Insert(context.Background(), meta.RegionalKey(bg.Name, region), bg))
|
||||
return mc.Observe(gce.c.RegionBackendServices().Insert(ctx, meta.RegionalKey(bg.Name, region), bg))
|
||||
}
|
||||
|
||||
// ListRegionBackendServices lists all backend services in the project.
|
||||
func (gce *GCECloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("list", region)
|
||||
v, err := gce.c.RegionBackendServices().List(context.Background(), region, filter.None)
|
||||
v, err := gce.c.RegionBackendServices().List(ctx, region, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
@ -134,8 +186,31 @@ func (gce *GCECloud) ListRegionBackendServices(region string) ([]*compute.Backen
|
||||
// identified by the given name, in the given instanceGroup. The
|
||||
// instanceGroupLink is the fully qualified self link of an instance group.
|
||||
func (gce *GCECloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("get_health", region)
|
||||
ref := &compute.ResourceGroupReference{Group: instanceGroupLink}
|
||||
v, err := gce.c.RegionBackendServices().GetHealth(context.Background(), meta.RegionalKey(name, region), ref)
|
||||
v, err := gce.c.RegionBackendServices().GetHealth(ctx, meta.RegionalKey(name, region), ref)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// SetSecurityPolicyForBetaGlobalBackendService sets the given
|
||||
// SecurityPolicyReference for the BackendService identified by the given name.
|
||||
func (gce *GCECloud) SetSecurityPolicyForBetaGlobalBackendService(backendServiceName string, securityPolicyReference *computebeta.SecurityPolicyReference) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference))
|
||||
}
|
||||
|
||||
// SetSecurityPolicyForAlphaGlobalBackendService sets the given
|
||||
// SecurityPolicyReference for the BackendService identified by the given name.
|
||||
func (gce *GCECloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeAlphaVersion)
|
||||
return mc.Observe(gce.c.AlphaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference))
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go
generated
vendored
@ -17,10 +17,9 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@ -31,15 +30,21 @@ func newCertMetricContext(request string) *metricContext {
|
||||
|
||||
// GetSslCertificate returns the SslCertificate by name.
|
||||
func (gce *GCECloud) GetSslCertificate(name string) (*compute.SslCertificate, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newCertMetricContext("get")
|
||||
v, err := gce.c.SslCertificates().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.SslCertificates().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateSslCertificate creates and returns a SslCertificate.
|
||||
func (gce *GCECloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newCertMetricContext("create")
|
||||
err := gce.c.SslCertificates().Insert(context.Background(), meta.GlobalKey(sslCerts.Name), sslCerts)
|
||||
err := gce.c.SslCertificates().Insert(ctx, meta.GlobalKey(sslCerts.Name), sslCerts)
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
@ -48,13 +53,19 @@ func (gce *GCECloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*co
|
||||
|
||||
// DeleteSslCertificate deletes the SslCertificate by name.
|
||||
func (gce *GCECloud) DeleteSslCertificate(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newCertMetricContext("delete")
|
||||
return mc.Observe(gce.c.SslCertificates().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.SslCertificates().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// ListSslCertificates lists all SslCertificates in the project.
|
||||
func (gce *GCECloud) ListSslCertificates() ([]*compute.SslCertificate, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newCertMetricContext("list")
|
||||
v, err := gce.c.SslCertificates().List(context.Background(), filter.None)
|
||||
v, err := gce.c.SslCertificates().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
317
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
317
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
@ -34,10 +34,12 @@ import (
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
@ -65,7 +67,7 @@ type diskServiceManager interface {
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zone string) (gceObject, error)
|
||||
zone string) error
|
||||
|
||||
// Creates a new regional persistent disk on GCE with the given disk spec.
|
||||
CreateRegionalDiskOnCloudProvider(
|
||||
@ -73,41 +75,35 @@ type diskServiceManager interface {
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zones sets.String) (gceObject, error)
|
||||
zones sets.String) error
|
||||
|
||||
// Deletes the persistent disk from GCE with the given diskName.
|
||||
DeleteDiskOnCloudProvider(zone string, disk string) (gceObject, error)
|
||||
DeleteDiskOnCloudProvider(zone string, disk string) error
|
||||
|
||||
// Deletes the regional persistent disk from GCE with the given diskName.
|
||||
DeleteRegionalDiskOnCloudProvider(diskName string) (gceObject, error)
|
||||
DeleteRegionalDiskOnCloudProvider(diskName string) error
|
||||
|
||||
// Attach a persistent disk on GCE with the given disk spec to the specified instance.
|
||||
AttachDiskOnCloudProvider(
|
||||
disk *GCEDisk,
|
||||
readWrite string,
|
||||
instanceZone string,
|
||||
instanceName string) (gceObject, error)
|
||||
instanceName string) error
|
||||
|
||||
// Detach a persistent disk on GCE with the given disk spec from the specified instance.
|
||||
DetachDiskOnCloudProvider(
|
||||
instanceZone string,
|
||||
instanceName string,
|
||||
devicePath string) (gceObject, error)
|
||||
devicePath string) error
|
||||
|
||||
ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error)
|
||||
RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) (gceObject, error)
|
||||
ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error
|
||||
RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error
|
||||
|
||||
// Gets the persistent disk from GCE with the given diskName.
|
||||
GetDiskFromCloudProvider(zone string, diskName string) (*GCEDisk, error)
|
||||
|
||||
// Gets the regional persistent disk from GCE with the given diskName.
|
||||
GetRegionalDiskFromCloudProvider(diskName string) (*GCEDisk, error)
|
||||
|
||||
// Waits until GCE reports the given operation in the given zone as done.
|
||||
WaitForZoneOp(op gceObject, zone string, mc *metricContext) error
|
||||
|
||||
// Waits until GCE reports the given operation in the given region is done.
|
||||
WaitForRegionalOp(op gceObject, mc *metricContext) error
|
||||
}
|
||||
|
||||
type gceServiceManager struct {
|
||||
@ -121,11 +117,11 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zone string) (gceObject, error) {
|
||||
zone string) error {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useAlphaAPI */)
|
||||
manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useBetaAPI */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
diskToCreateV1 := &compute.Disk{
|
||||
@ -134,8 +130,10 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
||||
Description: tagsStr,
|
||||
Type: diskTypeURI,
|
||||
}
|
||||
return manager.gce.service.Disks.Insert(
|
||||
manager.gce.projectID, zone, diskToCreateV1).Do()
|
||||
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.Disks().Insert(ctx, meta.ZonalKey(name, zone), diskToCreateV1)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
@ -143,42 +141,44 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
replicaZones sets.String) (gceObject, error) {
|
||||
replicaZones sets.String) error {
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useAlphaAPI */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullyQualifiedReplicaZones := []string{}
|
||||
for _, replicaZone := range replicaZones.UnsortedList() {
|
||||
fullyQualifiedReplicaZones = append(
|
||||
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
|
||||
}
|
||||
|
||||
diskToCreateAlpha := &computealpha.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
Description: tagsStr,
|
||||
Type: diskTypeURI,
|
||||
ReplicaZones: fullyQualifiedReplicaZones,
|
||||
}
|
||||
return manager.gce.serviceAlpha.RegionDisks.Insert(
|
||||
manager.gce.projectID, manager.gce.region, diskToCreateAlpha).Do()
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useBetaAPI */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fullyQualifiedReplicaZones := []string{}
|
||||
for _, replicaZone := range replicaZones.UnsortedList() {
|
||||
fullyQualifiedReplicaZones = append(
|
||||
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
|
||||
}
|
||||
|
||||
diskToCreateBeta := &computebeta.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
Description: tagsStr,
|
||||
Type: diskTypeURI,
|
||||
ReplicaZones: fullyQualifiedReplicaZones,
|
||||
}
|
||||
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.BetaRegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreateBeta)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) AttachDiskOnCloudProvider(
|
||||
disk *GCEDisk,
|
||||
readWrite string,
|
||||
instanceZone string,
|
||||
instanceName string) (gceObject, error) {
|
||||
instanceName string) error {
|
||||
source, err := manager.getDiskSourceURI(disk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
attachedDiskV1 := &compute.AttachedDisk{
|
||||
@ -188,16 +188,19 @@ func (manager *gceServiceManager) AttachDiskOnCloudProvider(
|
||||
Source: source,
|
||||
Type: diskTypePersistent,
|
||||
}
|
||||
return manager.gce.service.Instances.AttachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, attachedDiskV1).Do()
|
||||
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.Instances().AttachDisk(ctx, meta.ZonalKey(instanceName, instanceZone), attachedDiskV1)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) DetachDiskOnCloudProvider(
|
||||
instanceZone string,
|
||||
instanceName string,
|
||||
devicePath string) (gceObject, error) {
|
||||
return manager.gce.service.Instances.DetachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, devicePath).Do()
|
||||
devicePath string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.Instances().DetachDisk(ctx, meta.ZonalKey(instanceName, instanceZone), devicePath)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) GetDiskFromCloudProvider(
|
||||
@ -211,8 +214,9 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
|
||||
return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone)
|
||||
}
|
||||
|
||||
diskStable, err := manager.gce.service.Disks.Get(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
diskStable, err := manager.gce.c.Disks().Get(ctx, meta.ZonalKey(diskName, zone))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -240,56 +244,50 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
|
||||
func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
|
||||
diskName string) (*GCEDisk, error) {
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
diskAlpha, err := manager.gce.serviceAlpha.RegionDisks.Get(
|
||||
manager.gce.projectID, manager.gce.region, diskName).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zones := sets.NewString()
|
||||
for _, zoneURI := range diskAlpha.ReplicaZones {
|
||||
zones.Insert(lastComponent(zoneURI))
|
||||
}
|
||||
|
||||
return &GCEDisk{
|
||||
ZoneInfo: multiZone{zones},
|
||||
Region: lastComponent(diskAlpha.Region),
|
||||
Name: diskAlpha.Name,
|
||||
Kind: diskAlpha.Kind,
|
||||
Type: diskAlpha.Type,
|
||||
SizeGb: diskAlpha.SizeGb,
|
||||
}, nil
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
diskBeta, err := manager.gce.c.BetaRegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zones := sets.NewString()
|
||||
for _, zoneURI := range diskBeta.ReplicaZones {
|
||||
zones.Insert(lastComponent(zoneURI))
|
||||
}
|
||||
|
||||
return &GCEDisk{
|
||||
ZoneInfo: multiZone{zones},
|
||||
Region: lastComponent(diskBeta.Region),
|
||||
Name: diskBeta.Name,
|
||||
Kind: diskBeta.Kind,
|
||||
Type: diskBeta.Type,
|
||||
SizeGb: diskBeta.SizeGb,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) DeleteDiskOnCloudProvider(
|
||||
zone string,
|
||||
diskName string) (gceObject, error) {
|
||||
return manager.gce.service.Disks.Delete(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
diskName string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.Disks().Delete(ctx, meta.ZonalKey(diskName, zone))
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider(
|
||||
diskName string) (gceObject, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return manager.gce.serviceAlpha.RegionDisks.Delete(
|
||||
manager.gce.projectID, manager.gce.region, diskName).Do()
|
||||
diskName string) error {
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) WaitForZoneOp(
|
||||
op gceObject, zone string, mc *metricContext) error {
|
||||
return manager.gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) WaitForRegionalOp(
|
||||
op gceObject, mc *metricContext) error {
|
||||
return manager.gce.waitForRegionOp(op, manager.gce.region, mc)
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.BetaRegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region))
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) {
|
||||
@ -327,11 +325,11 @@ func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getDiskTypeURI(
|
||||
diskRegion string, diskZoneInfo zoneType, diskType string, useAlphaAPI bool) (string, error) {
|
||||
diskRegion string, diskZoneInfo zoneType, diskType string, useBetaAPI bool) (string, error) {
|
||||
|
||||
var getProjectsAPIEndpoint string
|
||||
if useAlphaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
if useBetaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta()
|
||||
} else {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
|
||||
}
|
||||
@ -363,10 +361,10 @@ func (manager *gceServiceManager) getDiskTypeURI(
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getReplicaZoneURI(zone string, useAlphaAPI bool) string {
|
||||
func (manager *gceServiceManager) getReplicaZoneURI(zone string, useBetaAPI bool) string {
|
||||
var getProjectsAPIEndpoint string
|
||||
if useAlphaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
if useBetaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta()
|
||||
} else {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
|
||||
}
|
||||
@ -411,21 +409,28 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string,
|
||||
return region, nil
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error) {
|
||||
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error {
|
||||
resizeServiceRequest := &compute.DisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
return manager.gce.service.Disks.Resize(manager.gce.projectID, zone, disk.Name, resizeServiceRequest).Do()
|
||||
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.Disks().Resize(ctx, meta.ZonalKey(disk.Name, zone), resizeServiceRequest)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) (gceObject, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
resizeServiceRequest := &computealpha.RegionDisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
return manager.gce.serviceAlpha.RegionDisks.Resize(manager.gce.projectID, disk.Region, disk.Name, resizeServiceRequest).Do()
|
||||
func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
|
||||
resizeServiceRequest := &computebeta.RegionDisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
return manager.gce.c.BetaRegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest)
|
||||
}
|
||||
|
||||
// Disks is interface for manipulation with GCE PDs.
|
||||
@ -535,14 +540,10 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn
|
||||
if regional && utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
disk, err = gce.getRegionalDiskByName(diskName)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Could not find regional PD named %q to Attach. Will look for a zonal PD", diskName)
|
||||
err = nil
|
||||
} else {
|
||||
mc = newDiskMetricContextRegional("attach", gce.region)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if disk == nil {
|
||||
mc = newDiskMetricContextRegional("attach", gce.region)
|
||||
} else {
|
||||
disk, err = gce.getDiskByName(diskName, instance.Zone)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -555,14 +556,7 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn
|
||||
readWrite = "READ_ONLY"
|
||||
}
|
||||
|
||||
attachOp, err := gce.manager.AttachDiskOnCloudProvider(
|
||||
disk, readWrite, instance.Zone, instance.Name)
|
||||
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.manager.WaitForZoneOp(attachOp, instance.Zone, mc)
|
||||
return mc.Observe(gce.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) error {
|
||||
@ -582,12 +576,7 @@ func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) erro
|
||||
}
|
||||
|
||||
mc := newDiskMetricContextZonal("detach", gce.region, inst.Zone)
|
||||
detachOp, err := gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath)
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.manager.WaitForZoneOp(detachOp, inst.Zone, mc)
|
||||
return mc.Observe(gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
@ -675,17 +664,10 @@ func (gce *GCECloud) CreateDisk(
|
||||
|
||||
mc := newDiskMetricContextZonal("create", gce.region, zone)
|
||||
|
||||
createOp, err := gce.manager.CreateDiskOnCloudProvider(
|
||||
err = gce.manager.CreateDiskOnCloudProvider(
|
||||
name, sizeGb, tagsStr, diskType, zone)
|
||||
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
glog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
err = gce.manager.WaitForZoneOp(createOp, zone, mc)
|
||||
mc.Observe(err)
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
glog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return nil
|
||||
@ -723,17 +705,10 @@ func (gce *GCECloud) CreateRegionalDisk(
|
||||
|
||||
mc := newDiskMetricContextRegional("create", gce.region)
|
||||
|
||||
createOp, err := gce.manager.CreateRegionalDiskOnCloudProvider(
|
||||
err = gce.manager.CreateRegionalDiskOnCloudProvider(
|
||||
name, sizeGb, tagsStr, diskType, replicaZones)
|
||||
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
glog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
err = gce.manager.WaitForRegionalOp(createOp, mc)
|
||||
mc.Observe(err)
|
||||
if isGCEError(err, "alreadyExists") {
|
||||
glog.Warningf("GCE PD %q already exists, reusing", name)
|
||||
return nil
|
||||
@ -786,31 +761,26 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
|
||||
switch zoneInfo := disk.ZoneInfo.(type) {
|
||||
case singleZone:
|
||||
mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone)
|
||||
resizeOp, err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone)
|
||||
err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone)
|
||||
|
||||
if err != nil {
|
||||
return oldSize, mc.Observe(err)
|
||||
} else {
|
||||
return newSizeQuant, mc.Observe(err)
|
||||
}
|
||||
waitErr := gce.manager.WaitForZoneOp(resizeOp, zoneInfo.zone, mc)
|
||||
if waitErr != nil {
|
||||
return oldSize, waitErr
|
||||
}
|
||||
return newSizeQuant, nil
|
||||
case multiZone:
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
mc = newDiskMetricContextRegional("resize", disk.Region)
|
||||
resizeOp, err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
|
||||
|
||||
if err != nil {
|
||||
return oldSize, mc.Observe(err)
|
||||
}
|
||||
waitErr := gce.manager.WaitForRegionalOp(resizeOp, mc)
|
||||
if waitErr != nil {
|
||||
return oldSize, waitErr
|
||||
}
|
||||
return newSizeQuant, nil
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
|
||||
}
|
||||
|
||||
mc = newDiskMetricContextRegional("resize", disk.Region)
|
||||
err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
|
||||
|
||||
if err != nil {
|
||||
return oldSize, mc.Observe(err)
|
||||
} else {
|
||||
return newSizeQuant, mc.Observe(err)
|
||||
}
|
||||
return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
|
||||
case nil:
|
||||
return oldSize, fmt.Errorf("PD has nil ZoneInfo: %v", disk)
|
||||
default:
|
||||
@ -1026,21 +996,14 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
|
||||
switch zoneInfo := disk.ZoneInfo.(type) {
|
||||
case singleZone:
|
||||
mc = newDiskMetricContextZonal("delete", disk.Region, zoneInfo.zone)
|
||||
deleteOp, err := gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name)
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.manager.WaitForZoneOp(deleteOp, zoneInfo.zone, mc)
|
||||
return mc.Observe(gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name))
|
||||
case multiZone:
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
mc = newDiskMetricContextRegional("delete", disk.Region)
|
||||
deleteOp, err := gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.manager.WaitForRegionalOp(deleteOp, mc)
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
|
||||
}
|
||||
return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
|
||||
|
||||
mc = newDiskMetricContextRegional("delete", disk.Region)
|
||||
return mc.Observe(gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name))
|
||||
case nil:
|
||||
return fmt.Errorf("PD has nil ZoneInfo: %v", disk)
|
||||
default:
|
||||
|
222
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks_test.go
generated
vendored
222
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks_test.go
generated
vendored
@ -39,10 +39,7 @@ func TestCreateDisk_Basic(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
@ -73,9 +70,6 @@ func TestCreateDisk_Basic(t *testing.T) {
|
||||
if !fakeManager.createDiskCalled {
|
||||
t.Error("Never called GCE disk create.")
|
||||
}
|
||||
if !fakeManager.doesOpMatch {
|
||||
t.Error("Ops used in WaitForZoneOp does not match what's returned by CreateDisk.")
|
||||
}
|
||||
|
||||
// Partial check of equality between disk description sent to GCE and parameters of method.
|
||||
diskToCreate := fakeManager.diskToCreateStable
|
||||
@ -116,7 +110,7 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
|
||||
tags := make(map[string]string)
|
||||
tags["test-tag"] = "test-value"
|
||||
|
||||
expectedDiskTypeURI := gceComputeAPIEndpointAlpha + "projects/" + fmt.Sprintf(
|
||||
expectedDiskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(
|
||||
diskTypeURITemplateRegional, gceProjectId, gceRegion, diskType)
|
||||
expectedDescription := "{\"test-tag\":\"test-value\"}"
|
||||
|
||||
@ -130,9 +124,6 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
|
||||
if !fakeManager.createDiskCalled {
|
||||
t.Error("Never called GCE disk create.")
|
||||
}
|
||||
if !fakeManager.doesOpMatch {
|
||||
t.Error("Ops used in WaitForZoneOp does not match what's returned by CreateDisk.")
|
||||
}
|
||||
|
||||
// Partial check of equality between disk description sent to GCE and parameters of method.
|
||||
diskToCreate := fakeManager.diskToCreateStable
|
||||
@ -157,10 +148,7 @@ func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -171,7 +159,7 @@ func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
|
||||
|
||||
// Inject disk AlreadyExists error.
|
||||
alreadyExistsError := googleapi.ErrorItem{Reason: "alreadyExists"}
|
||||
fakeManager.waitForOpError = &googleapi.Error{
|
||||
fakeManager.opError = &googleapi.Error{
|
||||
Errors: []googleapi.ErrorItem{alreadyExistsError},
|
||||
}
|
||||
|
||||
@ -266,10 +254,7 @@ func TestCreateDisk_MultiZone(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -298,10 +283,7 @@ func TestDeleteDisk_Basic(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -326,9 +308,6 @@ func TestDeleteDisk_Basic(t *testing.T) {
|
||||
if !fakeManager.deleteDiskCalled {
|
||||
t.Error("Never called GCE disk delete.")
|
||||
}
|
||||
if !fakeManager.doesOpMatch {
|
||||
t.Error("Ops used in WaitForZoneOp does not match what's returned by DeleteDisk.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -338,10 +317,7 @@ func TestDeleteDisk_NotFound(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -366,10 +342,7 @@ func TestDeleteDisk_ResourceBeingUsed(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -400,10 +373,7 @@ func TestDeleteDisk_SameDiskMultiZone(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -437,10 +407,7 @@ func TestDeleteDisk_DiffDiskMultiZone(t *testing.T) {
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -478,10 +445,7 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) {
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
const sizeGb int64 = 128
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -518,10 +482,7 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeStandard
|
||||
const sizeGb int64 = 128
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -576,10 +537,7 @@ func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
|
||||
zonesWithNodes := []string{}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
diskName := "disk"
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -608,10 +566,7 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
|
||||
zone := "us-west1-b"
|
||||
const sizeGb int64 = 128
|
||||
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -649,10 +604,7 @@ func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
|
||||
diskType := DiskTypeStandard
|
||||
const sizeGb int64 = 128
|
||||
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
@ -683,16 +635,12 @@ const (
|
||||
|
||||
type FakeServiceManager struct {
|
||||
// Common fields shared among tests
|
||||
targetAPI targetClientAPI
|
||||
gceProjectID string
|
||||
gceRegion string
|
||||
opAlpha *computealpha.Operation // Mocks an operation returned by GCE API calls
|
||||
opBeta *computebeta.Operation // Mocks an operation returned by GCE API calls
|
||||
opStable *compute.Operation // Mocks an operation returned by GCE API calls
|
||||
doesOpMatch bool
|
||||
zonalDisks map[string]string // zone: diskName
|
||||
regionalDisks map[string]sets.String // diskName: zones
|
||||
waitForOpError error // Error to be returned by WaitForZoneOp or WaitForRegionalOp
|
||||
targetAPI targetClientAPI
|
||||
gceProjectID string
|
||||
gceRegion string
|
||||
zonalDisks map[string]string // zone: diskName
|
||||
regionalDisks map[string]sets.String // diskName: zones
|
||||
opError error
|
||||
|
||||
// Fields for TestCreateDisk
|
||||
createDiskCalled bool
|
||||
@ -723,12 +671,11 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zone string) (gceObject, error) {
|
||||
zone string) error {
|
||||
manager.createDiskCalled = true
|
||||
|
||||
switch t := manager.targetAPI; t {
|
||||
case targetStable:
|
||||
manager.opStable = &compute.Operation{}
|
||||
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
|
||||
diskToCreateV1 := &compute.Disk{
|
||||
Name: name,
|
||||
@ -738,9 +685,8 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateStable = diskToCreateV1
|
||||
manager.zonalDisks[zone] = diskToCreateV1.Name
|
||||
return manager.opStable, nil
|
||||
return nil
|
||||
case targetBeta:
|
||||
manager.opBeta = &computebeta.Operation{}
|
||||
diskTypeURI := gceComputeAPIEndpoint + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
|
||||
diskToCreateBeta := &computebeta.Disk{
|
||||
Name: name,
|
||||
@ -750,10 +696,9 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateBeta = diskToCreateBeta
|
||||
manager.zonalDisks[zone] = diskToCreateBeta.Name
|
||||
return manager.opBeta, nil
|
||||
return nil
|
||||
case targetAlpha:
|
||||
manager.opAlpha = &computealpha.Operation{}
|
||||
diskTypeURI := gceComputeAPIEndpointAlpha + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
|
||||
diskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(diskTypeURITemplateSingleZone, manager.gceProjectID, zone, diskType)
|
||||
diskToCreateAlpha := &computealpha.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
@ -762,9 +707,9 @@ func (manager *FakeServiceManager) CreateDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateAlpha = diskToCreateAlpha
|
||||
manager.zonalDisks[zone] = diskToCreateAlpha.Name
|
||||
return manager.opAlpha, nil
|
||||
return nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
@ -777,13 +722,12 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
sizeGb int64,
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
zones sets.String) (gceObject, error) {
|
||||
zones sets.String) error {
|
||||
manager.createDiskCalled = true
|
||||
diskTypeURI := gceComputeAPIEndpointAlpha + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
|
||||
diskTypeURI := gceComputeAPIEndpointBeta + "projects/" + fmt.Sprintf(diskTypeURITemplateRegional, manager.gceProjectID, manager.gceRegion, diskType)
|
||||
|
||||
switch t := manager.targetAPI; t {
|
||||
case targetStable:
|
||||
manager.opStable = &compute.Operation{}
|
||||
diskToCreateV1 := &compute.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
@ -792,13 +736,13 @@ func (manager *FakeServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
}
|
||||
manager.diskToCreateStable = diskToCreateV1
|
||||
manager.regionalDisks[diskToCreateV1.Name] = zones
|
||||
return manager.opStable, nil
|
||||
return nil
|
||||
case targetBeta:
|
||||
return nil, fmt.Errorf("RegionalDisk CreateDisk op not supported in beta.")
|
||||
return fmt.Errorf("RegionalDisk CreateDisk op not supported in beta.")
|
||||
case targetAlpha:
|
||||
return nil, fmt.Errorf("RegionalDisk CreateDisk op not supported in alpha.")
|
||||
return fmt.Errorf("RegionalDisk CreateDisk op not supported in alpha.")
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
@ -806,39 +750,33 @@ func (manager *FakeServiceManager) AttachDiskOnCloudProvider(
|
||||
disk *GCEDisk,
|
||||
readWrite string,
|
||||
instanceZone string,
|
||||
instanceName string) (gceObject, error) {
|
||||
instanceName string) error {
|
||||
|
||||
switch t := manager.targetAPI; t {
|
||||
case targetStable:
|
||||
manager.opStable = &compute.Operation{}
|
||||
return manager.opStable, nil
|
||||
return nil
|
||||
case targetBeta:
|
||||
manager.opBeta = &computebeta.Operation{}
|
||||
return manager.opBeta, nil
|
||||
return nil
|
||||
case targetAlpha:
|
||||
manager.opAlpha = &computealpha.Operation{}
|
||||
return manager.opAlpha, nil
|
||||
return nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *FakeServiceManager) DetachDiskOnCloudProvider(
|
||||
instanceZone string,
|
||||
instanceName string,
|
||||
devicePath string) (gceObject, error) {
|
||||
devicePath string) error {
|
||||
switch t := manager.targetAPI; t {
|
||||
case targetStable:
|
||||
manager.opStable = &compute.Operation{}
|
||||
return manager.opStable, nil
|
||||
return nil
|
||||
case targetBeta:
|
||||
manager.opBeta = &computebeta.Operation{}
|
||||
return manager.opBeta, nil
|
||||
return nil
|
||||
case targetAlpha:
|
||||
manager.opAlpha = &computealpha.Operation{}
|
||||
return manager.opAlpha, nil
|
||||
return nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
@ -895,13 +833,13 @@ func (manager *FakeServiceManager) GetRegionalDiskFromCloudProvider(
|
||||
func (manager *FakeServiceManager) ResizeDiskOnCloudProvider(
|
||||
disk *GCEDisk,
|
||||
size int64,
|
||||
zone string) (gceObject, error) {
|
||||
zone string) error {
|
||||
panic("Not implmented")
|
||||
}
|
||||
|
||||
func (manager *FakeServiceManager) RegionalResizeDiskOnCloudProvider(
|
||||
disk *GCEDisk,
|
||||
size int64) (gceObject, error) {
|
||||
size int64) error {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
@ -910,91 +848,41 @@ func (manager *FakeServiceManager) RegionalResizeDiskOnCloudProvider(
|
||||
*/
|
||||
func (manager *FakeServiceManager) DeleteDiskOnCloudProvider(
|
||||
zone string,
|
||||
disk string) (gceObject, error) {
|
||||
disk string) error {
|
||||
|
||||
manager.deleteDiskCalled = true
|
||||
delete(manager.zonalDisks, zone)
|
||||
|
||||
switch t := manager.targetAPI; t {
|
||||
case targetStable:
|
||||
manager.opStable = &compute.Operation{}
|
||||
return manager.opStable, nil
|
||||
return nil
|
||||
case targetBeta:
|
||||
manager.opBeta = &computebeta.Operation{}
|
||||
return manager.opBeta, nil
|
||||
return nil
|
||||
case targetAlpha:
|
||||
manager.opAlpha = &computealpha.Operation{}
|
||||
return manager.opAlpha, nil
|
||||
return nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *FakeServiceManager) DeleteRegionalDiskOnCloudProvider(
|
||||
disk string) (gceObject, error) {
|
||||
disk string) error {
|
||||
|
||||
manager.deleteDiskCalled = true
|
||||
delete(manager.regionalDisks, disk)
|
||||
|
||||
switch t := manager.targetAPI; t {
|
||||
case targetStable:
|
||||
manager.opStable = &compute.Operation{}
|
||||
return manager.opStable, nil
|
||||
return nil
|
||||
case targetBeta:
|
||||
manager.opBeta = &computebeta.Operation{}
|
||||
return manager.opBeta, nil
|
||||
return nil
|
||||
case targetAlpha:
|
||||
manager.opAlpha = &computealpha.Operation{}
|
||||
return manager.opAlpha, nil
|
||||
return nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected type: %T", t)
|
||||
return fmt.Errorf("unexpected type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *FakeServiceManager) WaitForZoneOp(
|
||||
op gceObject,
|
||||
zone string,
|
||||
mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
if op.(*computealpha.Operation) == manager.opAlpha {
|
||||
manager.doesOpMatch = true
|
||||
}
|
||||
case *computebeta.Operation:
|
||||
if op.(*computebeta.Operation) == manager.opBeta {
|
||||
manager.doesOpMatch = true
|
||||
}
|
||||
case *compute.Operation:
|
||||
if op.(*compute.Operation) == manager.opStable {
|
||||
manager.doesOpMatch = true
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
return manager.waitForOpError
|
||||
}
|
||||
|
||||
func (manager *FakeServiceManager) WaitForRegionalOp(
|
||||
op gceObject, mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
if op.(*computealpha.Operation) == manager.opAlpha {
|
||||
manager.doesOpMatch = true
|
||||
}
|
||||
case *computebeta.Operation:
|
||||
if op.(*computebeta.Operation) == manager.opBeta {
|
||||
manager.doesOpMatch = true
|
||||
}
|
||||
case *compute.Operation:
|
||||
if op.(*compute.Operation) == manager.opStable {
|
||||
manager.doesOpMatch = true
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
return manager.waitForOpError
|
||||
}
|
||||
|
||||
func createNodeZones(zones []string) map[string]sets.String {
|
||||
nodeZones := map[string]sets.String{}
|
||||
for _, zone := range zones {
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go
generated
vendored
@ -17,10 +17,9 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
@ -30,25 +29,37 @@ func newFirewallMetricContext(request string) *metricContext {
|
||||
|
||||
// GetFirewall returns the Firewall by name.
|
||||
func (gce *GCECloud) GetFirewall(name string) (*compute.Firewall, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newFirewallMetricContext("get")
|
||||
v, err := gce.c.Firewalls().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.Firewalls().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateFirewall creates the passed firewall
|
||||
func (gce *GCECloud) CreateFirewall(f *compute.Firewall) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newFirewallMetricContext("create")
|
||||
return mc.Observe(gce.c.Firewalls().Insert(context.Background(), meta.GlobalKey(f.Name), f))
|
||||
return mc.Observe(gce.c.Firewalls().Insert(ctx, meta.GlobalKey(f.Name), f))
|
||||
}
|
||||
|
||||
// DeleteFirewall deletes the given firewall rule.
|
||||
func (gce *GCECloud) DeleteFirewall(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newFirewallMetricContext("delete")
|
||||
return mc.Observe(gce.c.Firewalls().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.Firewalls().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// UpdateFirewall applies the given firewall as an update to an existing service.
|
||||
func (gce *GCECloud) UpdateFirewall(f *compute.Firewall) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newFirewallMetricContext("update")
|
||||
return mc.Observe(gce.c.Firewalls().Update(context.Background(), meta.GlobalKey(f.Name), f))
|
||||
return mc.Observe(gce.c.Firewalls().Update(ctx, meta.GlobalKey(f.Name), f))
|
||||
}
|
||||
|
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go
generated
vendored
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go
generated
vendored
@ -17,8 +17,6 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
@ -35,84 +33,120 @@ func newForwardingRuleMetricContextWithVersion(request, region, version string)
|
||||
|
||||
// CreateGlobalForwardingRule creates the passed GlobalForwardingRule
|
||||
func (gce *GCECloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("create", "")
|
||||
return mc.Observe(gce.c.GlobalForwardingRules().Insert(context.Background(), meta.GlobalKey(rule.Name), rule))
|
||||
return mc.Observe(gce.c.GlobalForwardingRules().Insert(ctx, meta.GlobalKey(rule.Name), rule))
|
||||
}
|
||||
|
||||
// SetProxyForGlobalForwardingRule links the given TargetHttp(s)Proxy with the given GlobalForwardingRule.
|
||||
// targetProxyLink is the SelfLink of a TargetHttp(s)Proxy.
|
||||
func (gce *GCECloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("set_proxy", "")
|
||||
target := &compute.TargetReference{Target: targetProxyLink}
|
||||
return mc.Observe(gce.c.GlobalForwardingRules().SetTarget(context.Background(), meta.GlobalKey(forwardingRuleName), target))
|
||||
return mc.Observe(gce.c.GlobalForwardingRules().SetTarget(ctx, meta.GlobalKey(forwardingRuleName), target))
|
||||
}
|
||||
|
||||
// DeleteGlobalForwardingRule deletes the GlobalForwardingRule by name.
|
||||
func (gce *GCECloud) DeleteGlobalForwardingRule(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("delete", "")
|
||||
return mc.Observe(gce.c.GlobalForwardingRules().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.GlobalForwardingRules().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// GetGlobalForwardingRule returns the GlobalForwardingRule by name.
|
||||
func (gce *GCECloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("get", "")
|
||||
v, err := gce.c.GlobalForwardingRules().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.GlobalForwardingRules().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListGlobalForwardingRules lists all GlobalForwardingRules in the project.
|
||||
func (gce *GCECloud) ListGlobalForwardingRules() ([]*compute.ForwardingRule, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("list", "")
|
||||
v, err := gce.c.GlobalForwardingRules().List(context.Background(), filter.None)
|
||||
v, err := gce.c.GlobalForwardingRules().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionForwardingRule returns the RegionalForwardingRule by name & region.
|
||||
func (gce *GCECloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("get", region)
|
||||
v, err := gce.c.ForwardingRules().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
v, err := gce.c.ForwardingRules().Get(ctx, meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaRegionForwardingRule returns the Alpha forwarding rule by name & region.
|
||||
func (gce *GCECloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContextWithVersion("get", region, computeAlphaVersion)
|
||||
v, err := gce.c.AlphaForwardingRules().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
v, err := gce.c.AlphaForwardingRules().Get(ctx, meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListRegionForwardingRules lists all RegionalForwardingRules in the project & region.
|
||||
func (gce *GCECloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("list", region)
|
||||
v, err := gce.c.ForwardingRules().List(context.Background(), region, filter.None)
|
||||
v, err := gce.c.ForwardingRules().List(ctx, region, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListAlphaRegionForwardingRules lists all RegionalForwardingRules in the project & region.
|
||||
func (gce *GCECloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.ForwardingRule, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContextWithVersion("list", region, computeAlphaVersion)
|
||||
v, err := gce.c.AlphaForwardingRules().List(context.Background(), region, filter.None)
|
||||
v, err := gce.c.AlphaForwardingRules().List(ctx, region, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateRegionForwardingRule creates and returns a
|
||||
// RegionalForwardingRule that points to the given BackendService
|
||||
func (gce *GCECloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("create", region)
|
||||
return mc.Observe(gce.c.ForwardingRules().Insert(context.Background(), meta.RegionalKey(rule.Name, region), rule))
|
||||
return mc.Observe(gce.c.ForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule))
|
||||
}
|
||||
|
||||
// CreateAlphaRegionForwardingRule creates and returns an Alpha
|
||||
// forwarding fule in the given region.
|
||||
func (gce *GCECloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContextWithVersion("create", region, computeAlphaVersion)
|
||||
return mc.Observe(gce.c.AlphaForwardingRules().Insert(context.Background(), meta.RegionalKey(rule.Name, region), rule))
|
||||
return mc.Observe(gce.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule))
|
||||
}
|
||||
|
||||
// DeleteRegionForwardingRule deletes the RegionalForwardingRule by name & region.
|
||||
func (gce *GCECloud) DeleteRegionForwardingRule(name, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newForwardingRuleMetricContext("delete", region)
|
||||
return mc.Observe(gce.c.ForwardingRules().Delete(context.Background(), meta.RegionalKey(name, region)))
|
||||
return mc.Observe(gce.c.ForwardingRules().Delete(ctx, meta.RegionalKey(name, region)))
|
||||
}
|
||||
|
||||
// TODO(#51665): retire this function once Network Tiers becomes Beta in GCP.
|
||||
|
93
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go
generated
vendored
93
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go
generated
vendored
@ -17,14 +17,13 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
@ -58,33 +57,48 @@ func newHealthcheckMetricContextWithVersion(request, version string) *metricCont
|
||||
|
||||
// GetHttpHealthCheck returns the given HttpHealthCheck by name.
|
||||
func (gce *GCECloud) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("get_legacy")
|
||||
v, err := gce.c.HttpHealthChecks().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.HttpHealthChecks().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateHttpHealthCheck applies the given HttpHealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("update_legacy")
|
||||
return mc.Observe(gce.c.HttpHealthChecks().Update(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.HttpHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// DeleteHttpHealthCheck deletes the given HttpHealthCheck by name.
|
||||
func (gce *GCECloud) DeleteHttpHealthCheck(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("delete_legacy")
|
||||
return mc.Observe(gce.c.HttpHealthChecks().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.HttpHealthChecks().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// CreateHttpHealthCheck creates the given HttpHealthCheck.
|
||||
func (gce *GCECloud) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("create_legacy")
|
||||
return mc.Observe(gce.c.HttpHealthChecks().Insert(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.HttpHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// ListHttpHealthChecks lists all HttpHealthChecks in the project.
|
||||
func (gce *GCECloud) ListHttpHealthChecks() ([]*compute.HttpHealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("list_legacy")
|
||||
v, err := gce.c.HttpHealthChecks().List(context.Background(), filter.None)
|
||||
v, err := gce.c.HttpHealthChecks().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
@ -92,33 +106,48 @@ func (gce *GCECloud) ListHttpHealthChecks() ([]*compute.HttpHealthCheck, error)
|
||||
|
||||
// GetHttpsHealthCheck returns the given HttpsHealthCheck by name.
|
||||
func (gce *GCECloud) GetHttpsHealthCheck(name string) (*compute.HttpsHealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("get_legacy")
|
||||
v, err := gce.c.HttpsHealthChecks().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.HttpsHealthChecks().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateHttpsHealthCheck applies the given HttpsHealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("update_legacy")
|
||||
return mc.Observe(gce.c.HttpsHealthChecks().Update(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.HttpsHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// DeleteHttpsHealthCheck deletes the given HttpsHealthCheck by name.
|
||||
func (gce *GCECloud) DeleteHttpsHealthCheck(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("delete_legacy")
|
||||
return mc.Observe(gce.c.HttpsHealthChecks().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.HttpsHealthChecks().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// CreateHttpsHealthCheck creates the given HttpsHealthCheck.
|
||||
func (gce *GCECloud) CreateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("create_legacy")
|
||||
return mc.Observe(gce.c.HttpsHealthChecks().Insert(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.HttpsHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// ListHttpsHealthChecks lists all HttpsHealthChecks in the project.
|
||||
func (gce *GCECloud) ListHttpsHealthChecks() ([]*compute.HttpsHealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("list_legacy")
|
||||
v, err := gce.c.HttpsHealthChecks().List(context.Background(), filter.None)
|
||||
v, err := gce.c.HttpsHealthChecks().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
@ -126,52 +155,76 @@ func (gce *GCECloud) ListHttpsHealthChecks() ([]*compute.HttpsHealthCheck, error
|
||||
|
||||
// GetHealthCheck returns the given HealthCheck by name.
|
||||
func (gce *GCECloud) GetHealthCheck(name string) (*compute.HealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("get")
|
||||
v, err := gce.c.HealthChecks().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.HealthChecks().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaHealthCheck returns the given alpha HealthCheck by name.
|
||||
func (gce *GCECloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContextWithVersion("get", computeAlphaVersion)
|
||||
v, err := gce.c.AlphaHealthChecks().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.AlphaHealthChecks().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateHealthCheck applies the given HealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("update")
|
||||
return mc.Observe(gce.c.HealthChecks().Update(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.HealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// UpdateAlphaHealthCheck applies the given alpha HealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContextWithVersion("update", computeAlphaVersion)
|
||||
return mc.Observe(gce.c.AlphaHealthChecks().Update(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// DeleteHealthCheck deletes the given HealthCheck by name.
|
||||
func (gce *GCECloud) DeleteHealthCheck(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("delete")
|
||||
return mc.Observe(gce.c.HealthChecks().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.HealthChecks().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// CreateHealthCheck creates the given HealthCheck.
|
||||
func (gce *GCECloud) CreateHealthCheck(hc *compute.HealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("create")
|
||||
return mc.Observe(gce.c.HealthChecks().Insert(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.HealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// CreateAlphaHealthCheck creates the given alpha HealthCheck.
|
||||
func (gce *GCECloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContextWithVersion("create", computeAlphaVersion)
|
||||
return mc.Observe(gce.c.AlphaHealthChecks().Insert(context.Background(), meta.GlobalKey(hc.Name), hc))
|
||||
return mc.Observe(gce.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// ListHealthChecks lists all HealthCheck in the project.
|
||||
func (gce *GCECloud) ListHealthChecks() ([]*compute.HealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContext("list")
|
||||
v, err := gce.c.HealthChecks().List(context.Background(), filter.None)
|
||||
v, err := gce.c.HealthChecks().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go
generated
vendored
@ -17,10 +17,9 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@ -32,36 +31,51 @@ func newInstanceGroupMetricContext(request string, zone string) *metricContext {
|
||||
// CreateInstanceGroup creates an instance group with the given
|
||||
// instances. It is the callers responsibility to add named ports.
|
||||
func (gce *GCECloud) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("create", zone)
|
||||
return mc.Observe(gce.c.InstanceGroups().Insert(context.Background(), meta.ZonalKey(ig.Name, zone), ig))
|
||||
return mc.Observe(gce.c.InstanceGroups().Insert(ctx, meta.ZonalKey(ig.Name, zone), ig))
|
||||
}
|
||||
|
||||
// DeleteInstanceGroup deletes an instance group.
|
||||
func (gce *GCECloud) DeleteInstanceGroup(name string, zone string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("delete", zone)
|
||||
return mc.Observe(gce.c.InstanceGroups().Delete(context.Background(), meta.ZonalKey(name, zone)))
|
||||
return mc.Observe(gce.c.InstanceGroups().Delete(ctx, meta.ZonalKey(name, zone)))
|
||||
}
|
||||
|
||||
// ListInstanceGroups lists all InstanceGroups in the project and
|
||||
// zone.
|
||||
func (gce *GCECloud) ListInstanceGroups(zone string) ([]*compute.InstanceGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("list", zone)
|
||||
v, err := gce.c.InstanceGroups().List(context.Background(), zone, filter.None)
|
||||
v, err := gce.c.InstanceGroups().List(ctx, zone, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListInstancesInInstanceGroup lists all the instances in a given
|
||||
// instance group and state.
|
||||
func (gce *GCECloud) ListInstancesInInstanceGroup(name string, zone string, state string) ([]*compute.InstanceWithNamedPorts, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("list_instances", zone)
|
||||
req := &compute.InstanceGroupsListInstancesRequest{InstanceState: state}
|
||||
v, err := gce.c.InstanceGroups().ListInstances(context.Background(), meta.ZonalKey(name, zone), req, filter.None)
|
||||
v, err := gce.c.InstanceGroups().ListInstances(ctx, meta.ZonalKey(name, zone), req, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// AddInstancesToInstanceGroup adds the given instances to the given
|
||||
// instance group.
|
||||
func (gce *GCECloud) AddInstancesToInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("add_instances", zone)
|
||||
// TODO: should cull operation above this layer.
|
||||
if len(instanceRefs) == 0 {
|
||||
@ -70,12 +84,15 @@ func (gce *GCECloud) AddInstancesToInstanceGroup(name string, zone string, insta
|
||||
req := &compute.InstanceGroupsAddInstancesRequest{
|
||||
Instances: instanceRefs,
|
||||
}
|
||||
return mc.Observe(gce.c.InstanceGroups().AddInstances(context.Background(), meta.ZonalKey(name, zone), req))
|
||||
return mc.Observe(gce.c.InstanceGroups().AddInstances(ctx, meta.ZonalKey(name, zone), req))
|
||||
}
|
||||
|
||||
// RemoveInstancesFromInstanceGroup removes the given instances from
|
||||
// the instance group.
|
||||
func (gce *GCECloud) RemoveInstancesFromInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("remove_instances", zone)
|
||||
// TODO: should cull operation above this layer.
|
||||
if len(instanceRefs) == 0 {
|
||||
@ -84,19 +101,25 @@ func (gce *GCECloud) RemoveInstancesFromInstanceGroup(name string, zone string,
|
||||
req := &compute.InstanceGroupsRemoveInstancesRequest{
|
||||
Instances: instanceRefs,
|
||||
}
|
||||
return mc.Observe(gce.c.InstanceGroups().RemoveInstances(context.Background(), meta.ZonalKey(name, zone), req))
|
||||
return mc.Observe(gce.c.InstanceGroups().RemoveInstances(ctx, meta.ZonalKey(name, zone), req))
|
||||
}
|
||||
|
||||
// SetNamedPortsOfInstanceGroup sets the list of named ports on a given instance group
|
||||
func (gce *GCECloud) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("set_namedports", zone)
|
||||
req := &compute.InstanceGroupsSetNamedPortsRequest{NamedPorts: namedPorts}
|
||||
return mc.Observe(gce.c.InstanceGroups().SetNamedPorts(context.Background(), meta.ZonalKey(igName, zone), req))
|
||||
return mc.Observe(gce.c.InstanceGroups().SetNamedPorts(ctx, meta.ZonalKey(igName, zone), req))
|
||||
}
|
||||
|
||||
// GetInstanceGroup returns an instance group by name.
|
||||
func (gce *GCECloud) GetInstanceGroup(name string, zone string) (*compute.InstanceGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstanceGroupMetricContext("get", zone)
|
||||
v, err := gce.c.InstanceGroups().Get(context.Background(), meta.ZonalKey(name, zone))
|
||||
v, err := gce.c.InstanceGroups().Get(ctx, meta.ZonalKey(name, zone))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
94
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go
generated
vendored
94
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -35,6 +34,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
@ -100,12 +100,15 @@ func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.No
|
||||
// NodeAddressesByProviderID will not be called from the node that is requesting this ID.
|
||||
// i.e. metadata service and other local methods cannot be used here
|
||||
func (gce *GCECloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
_, zone, name, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return []v1.NodeAddress{}, err
|
||||
}
|
||||
|
||||
instance, err := gce.c.Instances().Get(context.Background(), meta.ZonalKey(canonicalizeInstanceName(name), zone))
|
||||
instance, err := gce.c.Instances().Get(ctx, meta.ZonalKey(canonicalizeInstanceName(name), zone))
|
||||
if err != nil {
|
||||
return []v1.NodeAddress{}, fmt.Errorf("error while querying for providerID %q: %v", providerID, err)
|
||||
}
|
||||
@ -142,6 +145,11 @@ func (gce *GCECloud) instanceByProviderID(providerID string) (*gceInstance, erro
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (gce *GCECloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node
|
||||
// with the specified unique providerID This method will not be called from the
|
||||
// node that is requesting this ID. i.e. metadata service and other local
|
||||
@ -155,27 +163,6 @@ func (gce *GCECloud) InstanceTypeByProviderID(ctx context.Context, providerID st
|
||||
return instance.Type, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName (deprecated).
|
||||
func (gce *GCECloud) ExternalID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instanceName) {
|
||||
externalInstanceID, err := getCurrentExternalIDViaMetadata()
|
||||
if err == nil {
|
||||
return externalInstanceID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to GCE API call if metadata server fails to retrieve ID
|
||||
inst, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.FormatUint(inst.ID, 10), nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (gce *GCECloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
@ -229,8 +216,11 @@ func (gce *GCECloud) InstanceType(ctx context.Context, nodeName types.NodeName)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
return wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
|
||||
project, err := gce.c.Projects().Get(context.Background(), gce.projectID)
|
||||
project, err := gce.c.Projects().Get(ctx, gce.projectID)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not get project: %v", err)
|
||||
return false, nil
|
||||
@ -261,7 +251,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, k
|
||||
}
|
||||
|
||||
mc := newInstancesMetricContext("add_ssh_key", "")
|
||||
err = gce.c.Projects().SetCommonInstanceMetadata(context.Background(), gce.projectID, project.CommonInstanceMetadata)
|
||||
err = gce.c.Projects().SetCommonInstanceMetadata(ctx, gce.projectID, project.CommonInstanceMetadata)
|
||||
mc.Observe(err)
|
||||
|
||||
if err != nil {
|
||||
@ -301,9 +291,12 @@ func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) {
|
||||
//
|
||||
// TODO: this should be removed from the cloud provider.
|
||||
func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
zones := sets.NewString()
|
||||
for _, zone := range gce.managedZones {
|
||||
instances, err := gce.c.Instances().List(context.Background(), zone, filter.None)
|
||||
instances, err := gce.c.Instances().List(ctx, zone, filter.None)
|
||||
if err != nil {
|
||||
return sets.NewString(), err
|
||||
}
|
||||
@ -316,15 +309,21 @@ func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) {
|
||||
|
||||
// InsertInstance creates a new instance on GCP
|
||||
func (gce *GCECloud) InsertInstance(project string, zone string, i *compute.Instance) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newInstancesMetricContext("create", zone)
|
||||
return mc.Observe(gce.c.Instances().Insert(context.Background(), meta.ZonalKey(i.Name, zone), i))
|
||||
return mc.Observe(gce.c.Instances().Insert(ctx, meta.ZonalKey(i.Name, zone), i))
|
||||
}
|
||||
|
||||
// ListInstanceNames returns a string of instance names separated by spaces.
|
||||
// This method should only be used for e2e testing.
|
||||
// TODO: remove this method.
|
||||
func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) {
|
||||
l, err := gce.c.Instances().List(context.Background(), zone, filter.None)
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
l, err := gce.c.Instances().List(ctx, zone, filter.None)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -337,7 +336,10 @@ func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) {
|
||||
|
||||
// DeleteInstance deletes an instance specified by project, zone, and name
|
||||
func (gce *GCECloud) DeleteInstance(project, zone, name string) error {
|
||||
return gce.c.Instances().Delete(context.Background(), meta.ZonalKey(name, zone))
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
return gce.c.Instances().Delete(ctx, meta.ZonalKey(name, zone))
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
@ -349,6 +351,9 @@ func (gce *GCECloud) CurrentNodeName(ctx context.Context, hostname string) (type
|
||||
// `node` for allocation to pods. Returns a list of the form
|
||||
// "<ip>/<netmask>".
|
||||
func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
var instance *gceInstance
|
||||
instance, err = gce.getInstanceByName(mapNodeNameToInstanceName(nodeName))
|
||||
if err != nil {
|
||||
@ -356,7 +361,7 @@ func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err e
|
||||
}
|
||||
|
||||
var res *computebeta.Instance
|
||||
res, err = gce.c.BetaInstances().Get(context.Background(), meta.ZonalKey(instance.Name, lastComponent(instance.Zone)))
|
||||
res, err = gce.c.BetaInstances().Get(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -372,12 +377,14 @@ func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err e
|
||||
// AddAliasToInstance adds an alias to the given instance from the named
|
||||
// secondary range.
|
||||
func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
v1instance, err := gce.getInstanceByName(mapNodeNameToInstanceName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instance, err := gce.c.BetaInstances().Get(context.Background(), meta.ZonalKey(v1instance.Name, lastComponent(v1instance.Zone)))
|
||||
instance, err := gce.c.BetaInstances().Get(ctx, meta.ZonalKey(v1instance.Name, lastComponent(v1instance.Zone)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -400,13 +407,16 @@ func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNe
|
||||
})
|
||||
|
||||
mc := newInstancesMetricContext("add_alias", v1instance.Zone)
|
||||
err = gce.c.BetaInstances().UpdateNetworkInterface(context.Background(), meta.ZonalKey(instance.Name, lastComponent(instance.Zone)), iface.Name, iface)
|
||||
err = gce.c.BetaInstances().UpdateNetworkInterface(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone)), iface.Name, iface)
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
// Gets the named instances, returning cloudprovider.InstanceNotFound if any
|
||||
// instance is not found
|
||||
func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
found := map[string]*gceInstance{}
|
||||
remaining := len(names)
|
||||
|
||||
@ -424,7 +434,7 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error)
|
||||
if remaining == 0 {
|
||||
break
|
||||
}
|
||||
instances, err := gce.c.Instances().List(context.Background(), zone, filter.Regexp("name", nodeInstancePrefix+".*"))
|
||||
instances, err := gce.c.Instances().List(ctx, zone, filter.Regexp("name", nodeInstancePrefix+".*"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -488,9 +498,12 @@ func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) {
|
||||
}
|
||||
|
||||
func (gce *GCECloud) getInstanceFromProjectInZoneByName(project, zone, name string) (*gceInstance, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
name = canonicalizeInstanceName(name)
|
||||
mc := newInstancesMetricContext("get", zone)
|
||||
res, err := gce.c.Instances().Get(context.Background(), meta.ZonalKey(name, zone))
|
||||
res, err := gce.c.Instances().Get(ctx, meta.ZonalKey(name, zone))
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -516,14 +529,6 @@ func getInstanceIDViaMetadata() (string, error) {
|
||||
return parts[0], nil
|
||||
}
|
||||
|
||||
func getCurrentExternalIDViaMetadata() (string, error) {
|
||||
externalID, err := metadata.Get("instance/id")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't get external ID: %v", err)
|
||||
}
|
||||
return externalID, nil
|
||||
}
|
||||
|
||||
func getCurrentMachineTypeViaMetadata() (string, error) {
|
||||
mType, err := metadata.Get("instance/machine-type")
|
||||
if err != nil {
|
||||
@ -557,6 +562,9 @@ func (gce *GCECloud) isCurrentInstance(instanceID string) bool {
|
||||
// format of the host names in the cluster. Only use it as a fallback if
|
||||
// gce.nodeTags is unspecified
|
||||
func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
// TODO: We could store the tags in gceInstance, so we could have already fetched it
|
||||
hostNamesByZone := make(map[string]map[string]bool) // map of zones -> map of names -> bool (for easy lookup)
|
||||
nodeInstancePrefix := gce.nodeInstancePrefix
|
||||
@ -581,7 +589,7 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
|
||||
filt = filter.Regexp("name", nodeInstancePrefix+".*")
|
||||
}
|
||||
for zone, hostNames := range hostNamesByZone {
|
||||
instances, err := gce.c.Instances().List(context.Background(), zone, filt)
|
||||
instances, err := gce.c.Instances().List(ctx, zone, filt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
923
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external_test.go
generated
vendored
923
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
@ -563,6 +563,9 @@ func (gce *GCECloud) ensureInternalBackendServiceGroups(name string, igLinks []s
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the backend service's backends to the updated list.
|
||||
bs.Backends = backends
|
||||
|
||||
glog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name)
|
||||
if err := gce.UpdateRegionBackendService(bs, gce.region); err != nil {
|
||||
return err
|
||||
@ -575,8 +578,7 @@ func shareBackendService(svc *v1.Service) bool {
|
||||
return GetLoadBalancerAnnotationBackendShare(svc) && !v1_service.RequestsOnlyLocalTraffic(svc)
|
||||
}
|
||||
|
||||
func backendsFromGroupLinks(igLinks []string) []*compute.Backend {
|
||||
var backends []*compute.Backend
|
||||
func backendsFromGroupLinks(igLinks []string) (backends []*compute.Backend) {
|
||||
for _, igLink := range igLinks {
|
||||
backends = append(backends, &compute.Backend{
|
||||
Group: igLink,
|
||||
|
739
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal_test.go
generated
vendored
Normal file
739
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal_test.go
generated
vendored
Normal file
@ -0,0 +1,739 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
v1_service "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock"
|
||||
)
|
||||
|
||||
func createInternalLoadBalancer(gce *GCECloud, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodeNames []string, clusterName, clusterID, zoneName string) (*v1.LoadBalancerStatus, error) {
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, zoneName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gce.ensureInternalLoadBalancer(
|
||||
clusterName,
|
||||
clusterID,
|
||||
svc,
|
||||
existingFwdRule,
|
||||
nodes,
|
||||
)
|
||||
}
|
||||
|
||||
func TestEnsureInternalBackendServiceUpdates(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
nodeNames := []string{"test-node-1"}
|
||||
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
require.NoError(t, err)
|
||||
|
||||
sharedBackend := shareBackendService(svc)
|
||||
bsName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
err = gce.ensureInternalBackendService(bsName, "description", svc.Spec.SessionAffinity, cloud.SchemeInternal, "TCP", igLinks, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update the Internal Backend Service with a new ServiceAffinity
|
||||
err = gce.ensureInternalBackendService(bsName, "description", v1.ServiceAffinityNone, cloud.SchemeInternal, "TCP", igLinks, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
bs, err := gce.GetRegionBackendService(bsName, gce.region)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, bs.SessionAffinity, strings.ToUpper(string(v1.ServiceAffinityNone)))
|
||||
}
|
||||
|
||||
func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for desc, tc := range map[string]struct {
|
||||
mockModifier func(*cloud.MockGCE)
|
||||
}{
|
||||
"Basic workflow": {},
|
||||
"GetRegionBackendService failed": {
|
||||
mockModifier: func(c *cloud.MockGCE) {
|
||||
c.MockRegionBackendServices.GetHook = mock.GetRegionBackendServicesErrHook
|
||||
},
|
||||
},
|
||||
"UpdateRegionBackendServices failed": {
|
||||
mockModifier: func(c *cloud.MockGCE) {
|
||||
c.MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServicesErrHook
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
vals := DefaultTestClusterValues()
|
||||
nodeNames := []string{"test-node-1"}
|
||||
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
require.NoError(t, err)
|
||||
|
||||
sharedBackend := shareBackendService(svc)
|
||||
bsName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
|
||||
err = gce.ensureInternalBackendService(bsName, "description", svc.Spec.SessionAffinity, cloud.SchemeInternal, "TCP", igLinks, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update the BackendService with new InstanceGroups
|
||||
if tc.mockModifier != nil {
|
||||
tc.mockModifier(gce.c.(*cloud.MockGCE))
|
||||
}
|
||||
newIGLinks := []string{"new-test-ig-1", "new-test-ig-2"}
|
||||
err = gce.ensureInternalBackendServiceGroups(bsName, newIGLinks)
|
||||
if tc.mockModifier != nil {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
bs, err := gce.GetRegionBackendService(bsName, gce.region)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check that the Backends reflect the new InstanceGroups
|
||||
backends := backendsFromGroupLinks(newIGLinks)
|
||||
assert.Equal(t, bs.Backends, backends)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureInternalLoadBalancer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
nodeNames := []string{"test-node-1"}
|
||||
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, status.Ingress)
|
||||
assertInternalLbResources(t, gce, svc, vals, nodeNames)
|
||||
}
|
||||
|
||||
func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
nodeNames := []string{"test-node-1"}
|
||||
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
|
||||
// Create the expected resources necessary for an Internal Load Balancer
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(svc)
|
||||
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||
hcPath, hcPort := GetNodesHealthCheckPath(), GetNodesHealthCheckPort()
|
||||
existingHC := newInternalLBHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort)
|
||||
err = gce.CreateHealthCheck(existingHC)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
require.NoError(t, err)
|
||||
|
||||
sharedBackend := shareBackendService(svc)
|
||||
bsDescription := makeBackendServiceDescription(nm, sharedBackend)
|
||||
bsName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
err = gce.ensureInternalBackendService(bsName, bsDescription, svc.Spec.SessionAffinity, cloud.SchemeInternal, "TCP", igLinks, existingHC.SelfLink)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
|
||||
// Create a ForwardingRule that's missing an IP address
|
||||
existingFwdRule := &compute.ForwardingRule{
|
||||
Name: lbName,
|
||||
IPAddress: "",
|
||||
Ports: []string{"123"},
|
||||
IPProtocol: "TCP",
|
||||
LoadBalancingScheme: string(cloud.SchemeInternal),
|
||||
}
|
||||
gce.CreateRegionForwardingRule(existingFwdRule, gce.region)
|
||||
|
||||
// Create a Firewall that's missing a Description
|
||||
existingFirewall := &compute.Firewall{
|
||||
Name: lbName,
|
||||
Network: gce.networkURL,
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"123"},
|
||||
},
|
||||
},
|
||||
}
|
||||
gce.CreateFirewall(existingFirewall)
|
||||
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(svc)
|
||||
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||
hcPath, hcPort := GetNodesHealthCheckPath(), GetNodesHealthCheckPort()
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
|
||||
// Create a healthcheck with an incorrect threshold
|
||||
existingHC := newInternalLBHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort)
|
||||
existingHC.HealthyThreshold = gceHcHealthyThreshold * 10
|
||||
gce.CreateHealthCheck(existingHC)
|
||||
|
||||
// Create a backend Service that's missing Description and Backends
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
existingBS := &compute.BackendService{
|
||||
Name: lbName,
|
||||
Protocol: "TCP",
|
||||
HealthChecks: []string{existingHC.SelfLink},
|
||||
SessionAffinity: translateAffinityType(svc.Spec.SessionAffinity),
|
||||
LoadBalancingScheme: string(cloud.SchemeInternal),
|
||||
}
|
||||
|
||||
gce.CreateRegionBackendService(existingBS, gce.region)
|
||||
existingFwdRule.BackendService = existingBS.Name
|
||||
|
||||
_, err = createInternalLoadBalancer(gce, svc, existingFwdRule, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Expect new resources with the correct attributes to be created
|
||||
rule, _ := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||
assert.NotEqual(t, existingFwdRule, rule)
|
||||
|
||||
firewall, err := gce.GetFirewall(lbName)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, firewall, existingFirewall)
|
||||
|
||||
healthcheck, err := gce.GetHealthCheck(hcName)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, healthcheck, existingHC)
|
||||
|
||||
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bs, existingBS)
|
||||
}
|
||||
|
||||
func TestUpdateInternalLoadBalancerBackendServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
nodeName := "test-node-1"
|
||||
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
_, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// BackendService exists prior to updateInternalLoadBalancer call, but has
|
||||
// incorrect (missing) attributes.
|
||||
// ensureInternalBackendServiceGroups is called and creates the correct
|
||||
// BackendService
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
existingBS := &compute.BackendService{
|
||||
Name: backendServiceName,
|
||||
Protocol: "TCP",
|
||||
SessionAffinity: translateAffinityType(svc.Spec.SessionAffinity),
|
||||
LoadBalancingScheme: string(cloud.SchemeInternal),
|
||||
}
|
||||
|
||||
gce.CreateRegionBackendService(existingBS, gce.region)
|
||||
|
||||
nodes, err := createAndInsertNodes(gce, []string{nodeName}, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = gce.updateInternalLoadBalancer(vals.ClusterName, vals.ClusterID, svc, nodes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the new BackendService has the correct attributes
|
||||
url_base := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s", vals.ProjectID)
|
||||
|
||||
assert.NotEqual(t, existingBS, bs)
|
||||
assert.Equal(
|
||||
t,
|
||||
bs.SelfLink,
|
||||
fmt.Sprintf("%s/regions/%s/backendServices/%s", url_base, vals.Region, bs.Name),
|
||||
)
|
||||
assert.Equal(t, bs.Description, `{"kubernetes.io/service-name":"/"}`)
|
||||
assert.Equal(
|
||||
t,
|
||||
bs.HealthChecks,
|
||||
[]string{fmt.Sprintf("%s/global/healthChecks/k8s-%s-node", url_base, vals.ClusterID)},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdateInternalLoadBalancerNodes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
node1Name := []string{"test-node-1"}
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
nodes, err := createAndInsertNodes(gce, node1Name, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = gce.ensureInternalLoadBalancer(vals.ClusterName, vals.ClusterID, svc, nil, nodes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Replace the node in initial zone; add new node in a new zone.
|
||||
node2Name, node3Name := "test-node-2", "test-node-3"
|
||||
newNodesZoneA, err := createAndInsertNodes(gce, []string{node2Name}, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
newNodesZoneB, err := createAndInsertNodes(gce, []string{node3Name}, vals.SecondaryZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodes = append(newNodesZoneA, newNodesZoneB...)
|
||||
err = gce.updateInternalLoadBalancer(vals.ClusterName, vals.ClusterID, svc, nodes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(bs.Backends), "Want two backends referencing two instances groups")
|
||||
|
||||
for _, zone := range []string{vals.ZoneName, vals.SecondaryZoneName} {
|
||||
var found bool
|
||||
for _, be := range bs.Backends {
|
||||
if strings.Contains(be.Group, zone) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Expected list of backends to have zone %q", zone)
|
||||
}
|
||||
|
||||
// Expect initial zone to have test-node-2
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
instances, err := gce.ListInstancesInInstanceGroup(igName, vals.ZoneName, "ALL")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(instances))
|
||||
assert.Contains(
|
||||
t,
|
||||
instances[0].Instance,
|
||||
fmt.Sprintf("projects/%s/zones/%s/instances/%s", vals.ProjectID, vals.ZoneName, node2Name),
|
||||
)
|
||||
|
||||
// Expect initial zone to have test-node-3
|
||||
instances, err = gce.ListInstancesInInstanceGroup(igName, vals.SecondaryZoneName, "ALL")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(instances))
|
||||
assert.Contains(
|
||||
t,
|
||||
instances[0].Instance,
|
||||
fmt.Sprintf("projects/%s/zones/%s/instances/%s", vals.ProjectID, vals.SecondaryZoneName, node3Name),
|
||||
)
|
||||
}
|
||||
|
||||
func TestEnsureInternalLoadBalancerDeleted(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
_, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = gce.ensureInternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, svc)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assertInternalLbResourcesDeleted(t, gce, svc, vals, true)
|
||||
}
|
||||
|
||||
func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
|
||||
_, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = gce.ensureInternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, svc)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Deleting the loadbalancer and resources again should not cause an error.
|
||||
err = gce.ensureInternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, svc)
|
||||
assert.NoError(t, err)
|
||||
assertInternalLbResourcesDeleted(t, gce, svc, vals, true)
|
||||
}
|
||||
|
||||
func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) {
|
||||
vals := DefaultTestClusterValues()
|
||||
nodeName := "test-node-1"
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
healthCheckNodePort := int32(10101)
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
svc.Spec.HealthCheckNodePort = healthCheckNodePort
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
|
||||
status, err := createInternalLoadBalancer(gce, svc, nil, []string{nodeName}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, status.Ingress)
|
||||
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
hc, err := gce.GetHealthCheck(loadBalancerName)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, hc)
|
||||
assert.Equal(t, int64(healthCheckNodePort), hc.HttpHealthCheck.Port)
|
||||
}
|
||||
|
||||
func TestClearPreviousInternalResources(t *testing.T) {
|
||||
// Configure testing environment.
|
||||
vals := DefaultTestClusterValues()
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
gce, err := fakeGCECloud(vals)
|
||||
c := gce.c.(*cloud.MockGCE)
|
||||
require.NoError(t, err)
|
||||
|
||||
hc_1, err := gce.ensureInternalHealthCheck("hc_1", nm, false, "healthz", 12345)
|
||||
require.NoError(t, err)
|
||||
|
||||
hc_2, err := gce.ensureInternalHealthCheck("hc_2", nm, false, "healthz", 12346)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = gce.ensureInternalBackendService(svc.ObjectMeta.Name, "", svc.Spec.SessionAffinity, cloud.SchemeInternal, v1.ProtocolTCP, []string{}, "")
|
||||
require.NoError(t, err)
|
||||
backendSvc, err := gce.GetRegionBackendService(svc.ObjectMeta.Name, gce.region)
|
||||
backendSvc.HealthChecks = []string{hc_1.SelfLink, hc_2.SelfLink}
|
||||
|
||||
c.MockRegionBackendServices.DeleteHook = mock.DeleteRegionBackendServicesErrHook
|
||||
c.MockHealthChecks.DeleteHook = mock.DeleteHealthChecksInternalErrHook
|
||||
gce.clearPreviousInternalResources(svc, loadBalancerName, backendSvc, "expectedBSName", "expectedHCName")
|
||||
|
||||
backendSvc, err = gce.GetRegionBackendService(svc.ObjectMeta.Name, gce.region)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, backendSvc, "BackendService should not be deleted when api is mocked out.")
|
||||
hc_1, err = gce.GetHealthCheck("hc_1")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, hc_1, "HealthCheck should not be deleted when there are more than one healthcheck attached.")
|
||||
hc_2, err = gce.GetHealthCheck("hc_2")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, hc_2, "HealthCheck should not be deleted when there are more than one healthcheck attached.")
|
||||
|
||||
c.MockRegionBackendServices.DeleteHook = mock.DeleteRegionBackendServicesInUseErrHook
|
||||
backendSvc.HealthChecks = []string{hc_1.SelfLink}
|
||||
gce.clearPreviousInternalResources(svc, loadBalancerName, backendSvc, "expectedBSName", "expectedHCName")
|
||||
|
||||
hc_1, err = gce.GetHealthCheck("hc_1")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, hc_1, "HealthCheck should not be deleted when api is mocked out.")
|
||||
|
||||
c.MockHealthChecks.DeleteHook = mock.DeleteHealthChecksInuseErrHook
|
||||
gce.clearPreviousInternalResources(svc, loadBalancerName, backendSvc, "expectedBSName", "expectedHCName")
|
||||
|
||||
hc_1, err = gce.GetHealthCheck("hc_1")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, hc_1, "HealthCheck should not be deleted when api is mocked out.")
|
||||
|
||||
c.MockRegionBackendServices.DeleteHook = nil
|
||||
c.MockHealthChecks.DeleteHook = nil
|
||||
gce.clearPreviousInternalResources(svc, loadBalancerName, backendSvc, "expectedBSName", "expectedHCName")
|
||||
|
||||
backendSvc, err = gce.GetRegionBackendService(svc.ObjectMeta.Name, gce.region)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, backendSvc, "BackendService should be deleted.")
|
||||
hc_1, err = gce.GetHealthCheck("hc_1")
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, hc_1, "HealthCheck should be deleted.")
|
||||
}
|
||||
|
||||
func TestEnsureInternalFirewallSucceedsOnXPN(t *testing.T) {
|
||||
gce, err := fakeGCECloud(DefaultTestClusterValues())
|
||||
require.NoError(t, err)
|
||||
vals := DefaultTestClusterValues()
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
fwName := cloudprovider.GetLoadBalancerName(svc)
|
||||
|
||||
c := gce.c.(*cloud.MockGCE)
|
||||
c.MockFirewalls.InsertHook = mock.InsertFirewallsUnauthorizedErrHook
|
||||
c.MockFirewalls.UpdateHook = mock.UpdateFirewallsUnauthorizedErrHook
|
||||
gce.onXPN = true
|
||||
require.True(t, gce.OnXPN())
|
||||
|
||||
recorder := record.NewFakeRecorder(1024)
|
||||
gce.eventRecorder = recorder
|
||||
|
||||
nodes, err := createAndInsertNodes(gce, []string{"test-node-1"}, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
sourceRange := []string{"10.0.0.0/20"}
|
||||
gce.ensureInternalFirewall(
|
||||
svc,
|
||||
fwName,
|
||||
"A sad little firewall",
|
||||
sourceRange,
|
||||
[]string{"123"},
|
||||
v1.ProtocolTCP,
|
||||
nodes)
|
||||
require.Nil(t, err, "Should success when XPN is on.")
|
||||
|
||||
checkEvent(t, recorder, FilewallChangeMsg, true)
|
||||
|
||||
// Create a firewall.
|
||||
c.MockFirewalls.InsertHook = nil
|
||||
c.MockFirewalls.UpdateHook = nil
|
||||
gce.onXPN = false
|
||||
|
||||
gce.ensureInternalFirewall(
|
||||
svc,
|
||||
fwName,
|
||||
"A sad little firewall",
|
||||
sourceRange,
|
||||
[]string{"123"},
|
||||
v1.ProtocolTCP,
|
||||
nodes)
|
||||
require.Nil(t, err)
|
||||
existingFirewall, err := gce.GetFirewall(fwName)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, existingFirewall)
|
||||
|
||||
gce.onXPN = true
|
||||
c.MockFirewalls.InsertHook = mock.InsertFirewallsUnauthorizedErrHook
|
||||
c.MockFirewalls.UpdateHook = mock.UpdateFirewallsUnauthorizedErrHook
|
||||
|
||||
// Try to update the firewall just created.
|
||||
gce.ensureInternalFirewall(
|
||||
svc,
|
||||
fwName,
|
||||
"A happy little firewall",
|
||||
sourceRange,
|
||||
[]string{"123"},
|
||||
v1.ProtocolTCP,
|
||||
nodes)
|
||||
require.Nil(t, err, "Should success when XPN is on.")
|
||||
|
||||
checkEvent(t, recorder, FilewallChangeMsg, true)
|
||||
}
|
||||
|
||||
func TestEnsureLoadBalancerDeletedSucceedsOnXPN(t *testing.T) {
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
c := gce.c.(*cloud.MockGCE)
|
||||
recorder := record.NewFakeRecorder(1024)
|
||||
gce.eventRecorder = recorder
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = createInternalLoadBalancer(gce, fakeLoadbalancerService(string(LBTypeInternal)), nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
c.MockFirewalls.DeleteHook = mock.DeleteFirewallsUnauthorizedErrHook
|
||||
gce.onXPN = true
|
||||
|
||||
err = gce.ensureInternalLoadBalancerDeleted(vals.ClusterName, vals.ClusterID, fakeLoadbalancerService(string(LBTypeInternal)))
|
||||
assert.NoError(t, err)
|
||||
checkEvent(t, recorder, FilewallChangeMsg, true)
|
||||
}
|
||||
|
||||
func TestEnsureInternalInstanceGroupsDeleted(t *testing.T) {
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
c := gce.c.(*cloud.MockGCE)
|
||||
recorder := record.NewFakeRecorder(1024)
|
||||
gce.eventRecorder = recorder
|
||||
require.NoError(t, err)
|
||||
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
_, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
c.MockZones.ListHook = mock.ListZonesInternalErrHook
|
||||
|
||||
err = gce.ensureInternalLoadBalancerDeleted(igName, vals.ClusterID, svc)
|
||||
assert.Error(t, err, mock.InternalServerError)
|
||||
ig, err := gce.GetInstanceGroup(igName, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ig)
|
||||
|
||||
c.MockZones.ListHook = nil
|
||||
c.MockInstanceGroups.DeleteHook = mock.DeleteInstanceGroupInternalErrHook
|
||||
|
||||
err = gce.ensureInternalInstanceGroupsDeleted(igName)
|
||||
assert.Error(t, err, mock.InternalServerError)
|
||||
ig, err = gce.GetInstanceGroup(igName, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ig)
|
||||
|
||||
c.MockInstanceGroups.DeleteHook = nil
|
||||
err = gce.ensureInternalInstanceGroupsDeleted(igName)
|
||||
assert.NoError(t, err)
|
||||
ig, err = gce.GetInstanceGroup(igName, vals.ZoneName)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, ig)
|
||||
}
|
||||
|
||||
type EnsureILBParams struct {
|
||||
clusterName string
|
||||
clusterID string
|
||||
service *v1.Service
|
||||
existingFwdRule *compute.ForwardingRule
|
||||
nodes []*v1.Node
|
||||
}
|
||||
|
||||
// newEnsureILBParams is the constructor of EnsureILBParams.
|
||||
func newEnsureILBParams(nodes []*v1.Node) *EnsureILBParams {
|
||||
vals := DefaultTestClusterValues()
|
||||
return &EnsureILBParams{
|
||||
vals.ClusterName,
|
||||
vals.ClusterID,
|
||||
fakeLoadbalancerService(string(LBTypeInternal)),
|
||||
nil,
|
||||
nodes,
|
||||
}
|
||||
}
|
||||
|
||||
// TestEnsureInternalLoadBalancerErrors tests the function
|
||||
// ensureInternalLoadBalancer, making sure the system won't panic when
|
||||
// exceptions raised by gce.
|
||||
func TestEnsureInternalLoadBalancerErrors(t *testing.T) {
|
||||
vals := DefaultTestClusterValues()
|
||||
var params *EnsureILBParams
|
||||
|
||||
for desc, tc := range map[string]struct {
|
||||
adjustParams func(*EnsureILBParams)
|
||||
injectMock func(*cloud.MockGCE)
|
||||
}{
|
||||
"Create internal instance groups failed": {
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockInstanceGroups.GetHook = mock.GetInstanceGroupInternalErrHook
|
||||
},
|
||||
},
|
||||
"Invalid existing forwarding rules given": {
|
||||
adjustParams: func(params *EnsureILBParams) {
|
||||
params.existingFwdRule = &compute.ForwardingRule{BackendService: "badBackendService"}
|
||||
},
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockRegionBackendServices.GetHook = mock.GetRegionBackendServicesErrHook
|
||||
},
|
||||
},
|
||||
"EnsureInternalBackendService failed": {
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockRegionBackendServices.GetHook = mock.GetRegionBackendServicesErrHook
|
||||
},
|
||||
},
|
||||
"Create internal health check failed": {
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockHealthChecks.GetHook = mock.GetHealthChecksInternalErrHook
|
||||
},
|
||||
},
|
||||
"Create firewall failed": {
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockFirewalls.InsertHook = mock.InsertFirewallsUnauthorizedErrHook
|
||||
},
|
||||
},
|
||||
"Create region forwarding rule failed": {
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockForwardingRules.InsertHook = mock.InsertForwardingRulesInternalErrHook
|
||||
},
|
||||
},
|
||||
"Get region forwarding rule failed": {
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockForwardingRules.GetHook = mock.GetForwardingRulesInternalErrHook
|
||||
},
|
||||
},
|
||||
"Delete region forwarding rule failed": {
|
||||
adjustParams: func(params *EnsureILBParams) {
|
||||
params.existingFwdRule = &compute.ForwardingRule{BackendService: "badBackendService"}
|
||||
},
|
||||
injectMock: func(c *cloud.MockGCE) {
|
||||
c.MockForwardingRules.DeleteHook = mock.DeleteForwardingRuleErrHook
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
gce, err := fakeGCECloud(DefaultTestClusterValues())
|
||||
nodes, err := createAndInsertNodes(gce, []string{"test-node-1"}, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
params = newEnsureILBParams(nodes)
|
||||
if tc.adjustParams != nil {
|
||||
tc.adjustParams(params)
|
||||
}
|
||||
if tc.injectMock != nil {
|
||||
tc.injectMock(gce.c.(*cloud.MockGCE))
|
||||
}
|
||||
status, err := gce.ensureInternalLoadBalancer(
|
||||
params.clusterName,
|
||||
params.clusterID,
|
||||
params.service,
|
||||
params.existingFwdRule,
|
||||
params.nodes,
|
||||
)
|
||||
assert.Error(t, err, "Should return an error when "+desc)
|
||||
assert.Nil(t, status, "Should not return a status when "+desc)
|
||||
})
|
||||
}
|
||||
}
|
171
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_test.go
generated
vendored
Normal file
171
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_test.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetLoadBalancer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiService := fakeLoadbalancerService("")
|
||||
|
||||
// When a loadbalancer has not been created
|
||||
status, found, err := gce.GetLoadBalancer(context.Background(), vals.ClusterName, apiService)
|
||||
assert.Nil(t, status)
|
||||
assert.False(t, found)
|
||||
assert.Nil(t, err)
|
||||
|
||||
nodeNames := []string{"test-node-1"}
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
expectedStatus, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes)
|
||||
require.NoError(t, err)
|
||||
|
||||
status, found, err = gce.GetLoadBalancer(context.Background(), vals.ClusterName, apiService)
|
||||
assert.Equal(t, expectedStatus, status)
|
||||
assert.True(t, found)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestEnsureLoadBalancerCreatesExternalLb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeNames := []string{"test-node-1"}
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiService := fakeLoadbalancerService("")
|
||||
status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, status.Ingress)
|
||||
assertExternalLbResources(t, gce, apiService, vals, nodeNames)
|
||||
}
|
||||
|
||||
func TestEnsureLoadBalancerCreatesInternalLb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeNames := []string{"test-node-1"}
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiService := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, status.Ingress)
|
||||
assertInternalLbResources(t, gce, apiService, vals, nodeNames)
|
||||
}
|
||||
|
||||
func TestEnsureLoadBalancerDeletesExistingInternalLb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeNames := []string{"test-node-1"}
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiService := fakeLoadbalancerService("")
|
||||
createInternalLoadBalancer(gce, apiService, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
|
||||
status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, status.Ingress)
|
||||
|
||||
assertExternalLbResources(t, gce, apiService, vals, nodeNames)
|
||||
assertInternalLbResourcesDeleted(t, gce, apiService, vals, false)
|
||||
}
|
||||
|
||||
func TestEnsureLoadBalancerDeletesExistingExternalLb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeNames := []string{"test-node-1"}
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiService := fakeLoadbalancerService("")
|
||||
createExternalLoadBalancer(gce, apiService, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
|
||||
apiService = fakeLoadbalancerService(string(LBTypeInternal))
|
||||
status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, status.Ingress)
|
||||
|
||||
assertInternalLbResources(t, gce, apiService, vals, nodeNames)
|
||||
assertExternalLbResourcesDeleted(t, gce, apiService, vals, false)
|
||||
}
|
||||
|
||||
func TestEnsureLoadBalancerDeletedDeletesExternalLb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeNames := []string{"test-node-1"}
|
||||
_, err = createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiService := fakeLoadbalancerService("")
|
||||
createExternalLoadBalancer(gce, apiService, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
|
||||
err = gce.EnsureLoadBalancerDeleted(context.Background(), vals.ClusterName, apiService)
|
||||
assert.NoError(t, err)
|
||||
assertExternalLbResourcesDeleted(t, gce, apiService, vals, true)
|
||||
}
|
||||
|
||||
func TestEnsureLoadBalancerDeletedDeletesInternalLb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
gce, err := fakeGCECloud(vals)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeNames := []string{"test-node-1"}
|
||||
_, err = createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
|
||||
apiService := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
createInternalLoadBalancer(gce, apiService, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName)
|
||||
|
||||
err = gce.EnsureLoadBalancerDeleted(context.Background(), vals.ClusterName, apiService)
|
||||
assert.NoError(t, err)
|
||||
assertInternalLbResourcesDeleted(t, gce, apiService, vals, true)
|
||||
}
|
403
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go
generated
vendored
Normal file
403
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go
generated
vendored
Normal file
@ -0,0 +1,403 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file contains shared functions and variables to set up for tests for
|
||||
// ExternalLoadBalancer and InternalLoadBalancers. It currently cannot live in a
|
||||
// separate package from GCE because then it would cause a circular import.
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
v1_service "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
// TODO(yankaiz): Create shared error types for both test/non-test codes.
|
||||
const (
|
||||
eventReasonManualChange = "LoadBalancerManualChange"
|
||||
eventMsgFirewallChange = "Firewall change required by network admin"
|
||||
errPrefixGetTargetPool = "error getting load balancer's target pool:"
|
||||
errStrLbNoHosts = "Cannot EnsureLoadBalancer() with no hosts"
|
||||
wrongTier = "SupremeLuxury"
|
||||
errStrUnsupportedTier = "unsupported network tier: \"" + wrongTier + "\""
|
||||
)
|
||||
|
||||
type TestClusterValues struct {
|
||||
ProjectID string
|
||||
Region string
|
||||
ZoneName string
|
||||
SecondaryZoneName string
|
||||
ClusterID string
|
||||
ClusterName string
|
||||
}
|
||||
|
||||
func DefaultTestClusterValues() TestClusterValues {
|
||||
return TestClusterValues{
|
||||
ProjectID: "test-project",
|
||||
Region: "us-central1",
|
||||
ZoneName: "us-central1-b",
|
||||
SecondaryZoneName: "us-central1-c",
|
||||
ClusterID: "test-cluster-id",
|
||||
ClusterName: "Test Cluster Name",
|
||||
}
|
||||
}
|
||||
|
||||
func fakeLoadbalancerService(lbType string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "",
|
||||
Annotations: map[string]string{ServiceAnnotationLoadBalancerType: lbType},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
SessionAffinity: v1.ServiceAffinityClientIP,
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
Ports: []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: int32(123)}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
FilewallChangeMsg = fmt.Sprintf("%s %s %s", v1.EventTypeNormal, eventReasonManualChange, eventMsgFirewallChange)
|
||||
)
|
||||
|
||||
type fakeRoundTripper struct{}
|
||||
|
||||
func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
|
||||
return nil, fmt.Errorf("err: test used fake http client")
|
||||
}
|
||||
|
||||
func fakeGCECloud(vals TestClusterValues) (*GCECloud, error) {
|
||||
client := &http.Client{Transport: &fakeRoundTripper{}}
|
||||
|
||||
service, err := compute.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Used in disk unit tests
|
||||
fakeManager := newFakeManager(vals.ProjectID, vals.Region)
|
||||
zonesWithNodes := createNodeZones([]string{vals.ZoneName})
|
||||
|
||||
alphaFeatureGate := NewAlphaFeatureGate([]string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gce := &GCECloud{
|
||||
region: vals.Region,
|
||||
service: service,
|
||||
manager: fakeManager,
|
||||
managedZones: []string{vals.ZoneName},
|
||||
projectID: vals.ProjectID,
|
||||
networkProjectID: vals.ProjectID,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: zonesWithNodes,
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
ClusterID: fakeClusterID(vals.ClusterID),
|
||||
}
|
||||
|
||||
c := cloud.NewMockGCE(&gceProjectRouter{gce})
|
||||
c.MockTargetPools.AddInstanceHook = mock.AddInstanceHook
|
||||
c.MockTargetPools.RemoveInstanceHook = mock.RemoveInstanceHook
|
||||
c.MockForwardingRules.InsertHook = mock.InsertFwdRuleHook
|
||||
c.MockAddresses.InsertHook = mock.InsertAddressHook
|
||||
c.MockAlphaAddresses.InsertHook = mock.InsertAlphaAddressHook
|
||||
c.MockAlphaAddresses.X = mock.AddressAttributes{}
|
||||
c.MockAddresses.X = mock.AddressAttributes{}
|
||||
|
||||
c.MockInstanceGroups.X = mock.InstanceGroupAttributes{
|
||||
InstanceMap: make(map[meta.Key]map[string]*compute.InstanceWithNamedPorts),
|
||||
Lock: &sync.Mutex{},
|
||||
}
|
||||
c.MockInstanceGroups.AddInstancesHook = mock.AddInstancesHook
|
||||
c.MockInstanceGroups.RemoveInstancesHook = mock.RemoveInstancesHook
|
||||
c.MockInstanceGroups.ListInstancesHook = mock.ListInstancesHook
|
||||
|
||||
c.MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook
|
||||
c.MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook
|
||||
c.MockFirewalls.UpdateHook = mock.UpdateFirewallHook
|
||||
|
||||
keyGA := meta.GlobalKey("key-ga")
|
||||
c.MockZones.Objects[*keyGA] = &cloud.MockZonesObj{
|
||||
Obj: &compute.Zone{Name: vals.ZoneName, Region: gce.getRegionLink(vals.Region)},
|
||||
}
|
||||
|
||||
gce.c = c
|
||||
|
||||
return gce, nil
|
||||
}
|
||||
|
||||
func createAndInsertNodes(gce *GCECloud, nodeNames []string, zoneName string) ([]*v1.Node, error) {
|
||||
nodes := []*v1.Node{}
|
||||
|
||||
for _, name := range nodeNames {
|
||||
// Inserting the same node name twice causes an error - here we check if
|
||||
// the instance exists already before insertion.
|
||||
// TestUpdateExternalLoadBalancer inserts a new node, and relies on an older
|
||||
// node to already have been inserted.
|
||||
instance, _ := gce.getInstanceByName(name)
|
||||
|
||||
if instance == nil {
|
||||
err := gce.InsertInstance(
|
||||
gce.ProjectID(),
|
||||
zoneName,
|
||||
&compute.Instance{
|
||||
Name: name,
|
||||
Tags: &compute.Tags{
|
||||
Items: []string{name},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nodes, err
|
||||
}
|
||||
}
|
||||
|
||||
nodes = append(
|
||||
nodes,
|
||||
&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
kubeletapis.LabelHostname: name,
|
||||
kubeletapis.LabelZoneFailureDomain: zoneName,
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.7.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling
|
||||
// gce.Initialize()
|
||||
func fakeClusterID(clusterID string) ClusterID {
|
||||
return ClusterID{
|
||||
clusterID: &clusterID,
|
||||
store: cache.NewStore(func(obj interface{}) (string, error) {
|
||||
return "", nil
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
func assertExternalLbResources(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, nodeNames []string) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
hcName := MakeNodesHealthCheckName(vals.ClusterID)
|
||||
|
||||
// Check that Firewalls are created for the LoadBalancer and the HealthCheck
|
||||
fwNames := []string{
|
||||
MakeFirewallName(lbName), // Firewalls for external LBs are prefixed with k8s-fw-
|
||||
MakeHealthCheckFirewallName(vals.ClusterID, hcName, true),
|
||||
}
|
||||
|
||||
for _, fwName := range fwNames {
|
||||
firewall, err := gce.GetFirewall(fwName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nodeNames, firewall.TargetTags)
|
||||
assert.NotEmpty(t, firewall.SourceRanges)
|
||||
}
|
||||
|
||||
// Check that TargetPool is Created
|
||||
pool, err := gce.GetTargetPool(lbName, gce.region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lbName, pool.Name)
|
||||
assert.NotEmpty(t, pool.HealthChecks)
|
||||
assert.Equal(t, 1, len(pool.Instances))
|
||||
|
||||
// Check that HealthCheck is created
|
||||
healthcheck, err := gce.GetHttpHealthCheck(hcName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, hcName, healthcheck.Name)
|
||||
|
||||
// Check that ForwardingRule is created
|
||||
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lbName, fwdRule.Name)
|
||||
assert.Equal(t, "TCP", fwdRule.IPProtocol)
|
||||
assert.Equal(t, "123-123", fwdRule.PortRange)
|
||||
}
|
||||
|
||||
func assertExternalLbResourcesDeleted(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, firewallsDeleted bool) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
hcName := MakeNodesHealthCheckName(vals.ClusterID)
|
||||
|
||||
if firewallsDeleted {
|
||||
// Check that Firewalls are deleted for the LoadBalancer and the HealthCheck
|
||||
fwNames := []string{
|
||||
MakeFirewallName(lbName),
|
||||
MakeHealthCheckFirewallName(vals.ClusterID, hcName, true),
|
||||
}
|
||||
|
||||
for _, fwName := range fwNames {
|
||||
firewall, err := gce.GetFirewall(fwName)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, firewall)
|
||||
}
|
||||
|
||||
// Check forwarding rule is deleted
|
||||
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, fwdRule)
|
||||
}
|
||||
|
||||
// Check that TargetPool is deleted
|
||||
pool, err := gce.GetTargetPool(lbName, gce.region)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, pool)
|
||||
|
||||
// Check that HealthCheck is deleted
|
||||
healthcheck, err := gce.GetHttpHealthCheck(hcName)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, healthcheck)
|
||||
|
||||
}
|
||||
|
||||
func assertInternalLbResources(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, nodeNames []string) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
|
||||
// Check that Instance Group is created
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
ig, err := gce.GetInstanceGroup(igName, vals.ZoneName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, igName, ig.Name)
|
||||
|
||||
// Check that Firewalls are created for the LoadBalancer and the HealthCheck
|
||||
fwNames := []string{
|
||||
lbName, // Firewalls for internal LBs are named the same name as the loadbalancer.
|
||||
makeHealthCheckFirewallName(lbName, vals.ClusterID, true),
|
||||
}
|
||||
|
||||
for _, fwName := range fwNames {
|
||||
firewall, err := gce.GetFirewall(fwName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nodeNames, firewall.TargetTags)
|
||||
assert.NotEmpty(t, firewall.SourceRanges)
|
||||
}
|
||||
|
||||
// Check that HealthCheck is created
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(apiService)
|
||||
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||
healthcheck, err := gce.GetHealthCheck(hcName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, hcName, healthcheck.Name)
|
||||
|
||||
// Check that BackendService exists
|
||||
sharedBackend := shareBackendService(apiService)
|
||||
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", apiService.Spec.SessionAffinity)
|
||||
backendServiceLink := gce.getBackendServiceLink(backendServiceName)
|
||||
|
||||
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "TCP", bs.Protocol)
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{healthcheck.SelfLink},
|
||||
bs.HealthChecks,
|
||||
)
|
||||
|
||||
// Check that ForwardingRule is created
|
||||
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lbName, fwdRule.Name)
|
||||
assert.Equal(t, "TCP", fwdRule.IPProtocol)
|
||||
assert.Equal(t, backendServiceLink, fwdRule.BackendService)
|
||||
// if no Subnetwork specified, defaults to the GCE NetworkURL
|
||||
assert.Equal(t, gce.NetworkURL(), fwdRule.Subnetwork)
|
||||
}
|
||||
|
||||
func assertInternalLbResourcesDeleted(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, firewallsDeleted bool) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(apiService)
|
||||
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||
|
||||
// ensureExternalLoadBalancer and ensureInternalLoadBalancer both create
|
||||
// Firewalls with the same name.
|
||||
if firewallsDeleted {
|
||||
// Check that Firewalls are deleted for the LoadBalancer and the HealthCheck
|
||||
fwNames := []string{
|
||||
MakeFirewallName(lbName),
|
||||
MakeHealthCheckFirewallName(vals.ClusterID, hcName, true),
|
||||
}
|
||||
|
||||
for _, fwName := range fwNames {
|
||||
firewall, err := gce.GetFirewall(fwName)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, firewall)
|
||||
}
|
||||
|
||||
// Check forwarding rule is deleted
|
||||
fwdRule, err := gce.GetRegionForwardingRule(lbName, gce.region)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, fwdRule)
|
||||
}
|
||||
|
||||
// Check that Instance Group is deleted
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
ig, err := gce.GetInstanceGroup(igName, vals.ZoneName)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, ig)
|
||||
|
||||
// Check that HealthCheck is deleted
|
||||
healthcheck, err := gce.GetHealthCheck(hcName)
|
||||
require.Error(t, err)
|
||||
assert.Nil(t, healthcheck)
|
||||
}
|
||||
|
||||
func checkEvent(t *testing.T, recorder *record.FakeRecorder, expected string, shouldMatch bool) bool {
|
||||
select {
|
||||
case received := <-recorder.Events:
|
||||
if strings.HasPrefix(received, expected) != shouldMatch {
|
||||
t.Errorf(received)
|
||||
if shouldMatch {
|
||||
t.Errorf("Should receive message \"%v\" but got \"%v\".", expected, received)
|
||||
} else {
|
||||
t.Errorf("Unexpected event \"%v\".", received)
|
||||
}
|
||||
}
|
||||
return false
|
||||
case <-time.After(2 * time.Second):
|
||||
if shouldMatch {
|
||||
t.Errorf("Should receive message \"%v\" but got timed out.", expected)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go
generated
vendored
@ -17,12 +17,12 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@ -37,31 +37,40 @@ func newNetworkEndpointGroupMetricContext(request string, zone string) *metricCo
|
||||
}
|
||||
|
||||
func (gce *GCECloud) GetNetworkEndpointGroup(name string, zone string) (*computealpha.NetworkEndpointGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("get", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
v, err := gce.c.AlphaNetworkEndpointGroups().Get(context.Background(), meta.ZonalKey(name, zone))
|
||||
v, err := gce.c.AlphaNetworkEndpointGroups().Get(ctx, meta.ZonalKey(name, zone))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListNetworkEndpointGroup(zone string) ([]*computealpha.NetworkEndpointGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("list", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
negs, err := gce.c.AlphaNetworkEndpointGroups().List(context.Background(), zone, filter.None)
|
||||
negs, err := gce.c.AlphaNetworkEndpointGroups().List(ctx, zone, filter.None)
|
||||
return negs, mc.Observe(err)
|
||||
}
|
||||
|
||||
// AggregatedListNetworkEndpointGroup returns a map of zone -> endpoint group.
|
||||
func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*computealpha.NetworkEndpointGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("aggregated_list", "")
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
// TODO: filter for the region the cluster is in.
|
||||
all, err := gce.c.AlphaNetworkEndpointGroups().AggregatedList(context.Background(), filter.None)
|
||||
all, err := gce.c.AlphaNetworkEndpointGroups().AggregatedList(ctx, filter.None)
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
@ -79,22 +88,31 @@ func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*compute
|
||||
}
|
||||
|
||||
func (gce *GCECloud) CreateNetworkEndpointGroup(neg *computealpha.NetworkEndpointGroup, zone string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("create", zone)
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().Insert(context.Background(), meta.ZonalKey(neg.Name, zone), neg))
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().Insert(ctx, meta.ZonalKey(neg.Name, zone), neg))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DeleteNetworkEndpointGroup(name string, zone string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("delete", zone)
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().Delete(context.Background(), meta.ZonalKey(name, zone)))
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().Delete(ctx, meta.ZonalKey(name, zone)))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("attach", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return mc.Observe(err)
|
||||
@ -102,10 +120,13 @@ func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*comp
|
||||
req := &computealpha.NetworkEndpointGroupsAttachEndpointsRequest{
|
||||
NetworkEndpoints: endpoints,
|
||||
}
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().AttachNetworkEndpoints(context.Background(), meta.ZonalKey(name, zone), req))
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().AttachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("detach", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return mc.Observe(err)
|
||||
@ -113,10 +134,13 @@ func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*comp
|
||||
req := &computealpha.NetworkEndpointGroupsDetachEndpointsRequest{
|
||||
NetworkEndpoints: endpoints,
|
||||
}
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().DetachNetworkEndpoints(context.Background(), meta.ZonalKey(name, zone), req))
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().DetachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computealpha.NetworkEndpointWithHealthStatus, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("list_networkendpoints", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
@ -128,6 +152,6 @@ func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bo
|
||||
req := &computealpha.NetworkEndpointGroupsListEndpointsRequest{
|
||||
HealthStatus: healthStatus,
|
||||
}
|
||||
l, err := gce.c.AlphaNetworkEndpointGroups().ListNetworkEndpoints(context.Background(), meta.ZonalKey(name, zone), req, filter.None)
|
||||
l, err := gce.c.AlphaNetworkEndpointGroups().ListNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req, filter.None)
|
||||
return l, mc.Observe(err)
|
||||
}
|
||||
|
180
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_op.go
generated
vendored
180
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_op.go
generated
vendored
@ -1,180 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
computev1 "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operationName string) (*computev1.Operation, error), mc *metricContext) error {
|
||||
if op == nil {
|
||||
return mc.Observe(fmt.Errorf("operation must not be nil"))
|
||||
}
|
||||
|
||||
if opIsDone(op) {
|
||||
return getErrorFromOp(op)
|
||||
}
|
||||
|
||||
opStart := time.Now()
|
||||
opName := op.Name
|
||||
|
||||
return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) {
|
||||
start := time.Now()
|
||||
gce.operationPollRateLimiter.Accept()
|
||||
duration := time.Now().Sub(start)
|
||||
if duration > 5*time.Second {
|
||||
glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName)
|
||||
}
|
||||
pollOp, err := getOperation(opName)
|
||||
if err != nil {
|
||||
glog.Warningf("GCE poll operation %s failed: pollOp: [%v] err: [%v] getErrorFromOp: [%v]",
|
||||
opName, pollOp, err, getErrorFromOp(pollOp))
|
||||
}
|
||||
|
||||
done := opIsDone(pollOp)
|
||||
if done {
|
||||
duration := time.Now().Sub(opStart)
|
||||
if duration > 1*time.Minute {
|
||||
// Log the JSON. It's cleaner than the %v structure.
|
||||
enc, err := pollOp.MarshalJSON()
|
||||
if err != nil {
|
||||
glog.Warningf("waitForOperation: long operation (%v): %v (failed to encode to JSON: %v)",
|
||||
duration, pollOp, err)
|
||||
} else {
|
||||
glog.V(2).Infof("waitForOperation: long operation (%v): %v",
|
||||
duration, string(enc))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return done, mc.Observe(getErrorFromOp(pollOp))
|
||||
})
|
||||
}
|
||||
|
||||
func opIsDone(op *computev1.Operation) bool {
|
||||
return op != nil && op.Status == "DONE"
|
||||
}
|
||||
|
||||
func getErrorFromOp(op *computev1.Operation) error {
|
||||
if op != nil && op.Error != nil && len(op.Error.Errors) > 0 {
|
||||
err := &googleapi.Error{
|
||||
Code: int(op.HttpErrorStatusCode),
|
||||
Message: op.Error.Errors[0].Message,
|
||||
}
|
||||
glog.Errorf("GCE operation failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForGlobalOp(op gceObject, mc *metricContext) error {
|
||||
return gce.waitForGlobalOpInProject(op, gce.ProjectID(), mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForRegionOp(op gceObject, region string, mc *metricContext) error {
|
||||
return gce.waitForRegionOpInProject(op, gce.ProjectID(), region, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForZoneOp(op gceObject, zone string, mc *metricContext) error {
|
||||
return gce.waitForZoneOpInProject(op, gce.ProjectID(), zone, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForGlobalOpInProject(op gceObject, projectID string, mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceAlpha.GlobalOperations.Get(projectID, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computebeta.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceBeta.GlobalOperations.Get(projectID, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computev1.Operation:
|
||||
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
|
||||
return gce.service.GlobalOperations.Get(projectID, operationName).Do()
|
||||
}, mc)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForRegionOpInProject(op gceObject, projectID, region string, mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceAlpha.RegionOperations.Get(projectID, region, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computebeta.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceBeta.RegionOperations.Get(projectID, region, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computev1.Operation:
|
||||
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
|
||||
return gce.service.RegionOperations.Get(projectID, region, operationName).Do()
|
||||
}, mc)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForZoneOpInProject(op gceObject, projectID, zone string, mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceAlpha.ZoneOperations.Get(projectID, zone, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computebeta.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceBeta.ZoneOperations.Get(projectID, zone, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computev1.Operation:
|
||||
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
|
||||
return gce.service.ZoneOperations.Get(projectID, zone, operationName).Do()
|
||||
}, mc)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func convertToV1Operation(object gceObject) *computev1.Operation {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var op computev1.Operation
|
||||
if err := json.Unmarshal(enc, &op); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to v1 operation: %v", object, err))
|
||||
}
|
||||
return &op
|
||||
}
|
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@ -37,10 +38,13 @@ func newRoutesMetricContext(request string) *metricContext {
|
||||
|
||||
// ListRoutes in the cloud environment.
|
||||
func (gce *GCECloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newRoutesMetricContext("list")
|
||||
prefix := truncateClusterName(clusterName)
|
||||
f := filter.Regexp("name", prefix+"-.*").AndRegexp("network", gce.NetworkURL()).AndRegexp("description", k8sNodeRouteTag)
|
||||
routes, err := gce.c.Routes().List(context.Background(), f)
|
||||
routes, err := gce.c.Routes().List(ctx, f)
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
@ -60,6 +64,9 @@ func (gce *GCECloud) ListRoutes(ctx context.Context, clusterName string) ([]*clo
|
||||
|
||||
// CreateRoute in the cloud environment.
|
||||
func (gce *GCECloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newRoutesMetricContext("create")
|
||||
|
||||
targetInstance, err := gce.getInstanceByName(mapNodeNameToInstanceName(route.TargetNode))
|
||||
@ -74,7 +81,7 @@ func (gce *GCECloud) CreateRoute(ctx context.Context, clusterName string, nameHi
|
||||
Priority: 1000,
|
||||
Description: k8sNodeRouteTag,
|
||||
}
|
||||
err = gce.c.Routes().Insert(context.Background(), meta.GlobalKey(cr.Name), cr)
|
||||
err = gce.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr)
|
||||
if isHTTPErrorCode(err, http.StatusConflict) {
|
||||
glog.Infof("Route %q already exists.", cr.Name)
|
||||
err = nil
|
||||
@ -84,8 +91,11 @@ func (gce *GCECloud) CreateRoute(ctx context.Context, clusterName string, nameHi
|
||||
|
||||
// DeleteRoute from the cloud environment.
|
||||
func (gce *GCECloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newRoutesMetricContext("delete")
|
||||
return mc.Observe(gce.c.Routes().Delete(context.Background(), meta.GlobalKey(route.Name)))
|
||||
return mc.Observe(gce.c.Routes().Delete(ctx, meta.GlobalKey(route.Name)))
|
||||
}
|
||||
|
||||
func truncateClusterName(clusterName string) string {
|
||||
|
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go
generated
vendored
Normal file
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
func newSecurityPolicyMetricContextWithVersion(request, version string) *metricContext {
|
||||
return newGenericMetricContext("securitypolicy", request, "", unusedMetricLabel, version)
|
||||
}
|
||||
|
||||
// GetBetaSecurityPolicy retrieves a security policy.
|
||||
func (gce *GCECloud) GetBetaSecurityPolicy(name string) (*computebeta.SecurityPolicy, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("get", computeBetaVersion)
|
||||
v, err := gce.c.BetaSecurityPolicies().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListBetaSecurityPolicy lists all security policies in the project.
|
||||
func (gce *GCECloud) ListBetaSecurityPolicy() ([]*computebeta.SecurityPolicy, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("list", computeBetaVersion)
|
||||
v, err := gce.c.BetaSecurityPolicies().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateBetaSecurityPolicy creates the given security policy.
|
||||
func (gce *GCECloud) CreateBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("create", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaSecurityPolicies().Insert(ctx, meta.GlobalKey(sp.Name), sp))
|
||||
}
|
||||
|
||||
// DeleteBetaSecurityPolicy deletes the given security policy.
|
||||
func (gce *GCECloud) DeleteBetaSecurityPolicy(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("delete", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaSecurityPolicies().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// PatchBetaSecurityPolicy applies the given security policy as a
|
||||
// patch to an existing security policy.
|
||||
func (gce *GCECloud) PatchBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("patch", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaSecurityPolicies().Patch(ctx, meta.GlobalKey(sp.Name), sp))
|
||||
}
|
||||
|
||||
// GetRuleForBetaSecurityPolicy gets rule from a security policy.
|
||||
func (gce *GCECloud) GetRuleForBetaSecurityPolicy(name string) (*computebeta.SecurityPolicyRule, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("get_rule", computeBetaVersion)
|
||||
v, err := gce.c.BetaSecurityPolicies().GetRule(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// AddRuletoBetaSecurityPolicy adds the given security policy rule to
|
||||
// a security policy.
|
||||
func (gce *GCECloud) AddRuletoBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("add_rule", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaSecurityPolicies().AddRule(ctx, meta.GlobalKey(name), spr))
|
||||
}
|
||||
|
||||
// PatchRuleForBetaSecurityPolicy patches the given security policy
|
||||
// rule to a security policy.
|
||||
func (gce *GCECloud) PatchRuleForBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("patch_rule", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaSecurityPolicies().PatchRule(ctx, meta.GlobalKey(name), spr))
|
||||
}
|
||||
|
||||
// RemoveRuleFromBetaSecurityPolicy removes rule from a security policy.
|
||||
func (gce *GCECloud) RemoveRuleFromBetaSecurityPolicy(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newSecurityPolicyMetricContextWithVersion("remove_rule", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaSecurityPolicies().RemoveRule(ctx, meta.GlobalKey(name)))
|
||||
}
|
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go
generated
vendored
@ -17,10 +17,9 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
@ -30,37 +29,52 @@ func newTargetPoolMetricContext(request, region string) *metricContext {
|
||||
|
||||
// GetTargetPool returns the TargetPool by name.
|
||||
func (gce *GCECloud) GetTargetPool(name, region string) (*compute.TargetPool, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetPoolMetricContext("get", region)
|
||||
v, err := gce.c.TargetPools().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
v, err := gce.c.TargetPools().Get(ctx, meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateTargetPool creates the passed TargetPool
|
||||
func (gce *GCECloud) CreateTargetPool(tp *compute.TargetPool, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetPoolMetricContext("create", region)
|
||||
return mc.Observe(gce.c.TargetPools().Insert(context.Background(), meta.RegionalKey(tp.Name, region), tp))
|
||||
return mc.Observe(gce.c.TargetPools().Insert(ctx, meta.RegionalKey(tp.Name, region), tp))
|
||||
}
|
||||
|
||||
// DeleteTargetPool deletes TargetPool by name.
|
||||
func (gce *GCECloud) DeleteTargetPool(name, region string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetPoolMetricContext("delete", region)
|
||||
return mc.Observe(gce.c.TargetPools().Delete(context.Background(), meta.RegionalKey(name, region)))
|
||||
return mc.Observe(gce.c.TargetPools().Delete(ctx, meta.RegionalKey(name, region)))
|
||||
}
|
||||
|
||||
// AddInstancesToTargetPool adds instances by link to the TargetPool
|
||||
func (gce *GCECloud) AddInstancesToTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
req := &compute.TargetPoolsAddInstanceRequest{
|
||||
Instances: instanceRefs,
|
||||
}
|
||||
mc := newTargetPoolMetricContext("add_instances", region)
|
||||
return mc.Observe(gce.c.TargetPools().AddInstance(context.Background(), meta.RegionalKey(name, region), req))
|
||||
return mc.Observe(gce.c.TargetPools().AddInstance(ctx, meta.RegionalKey(name, region), req))
|
||||
}
|
||||
|
||||
// RemoveInstancesFromTargetPool removes instances by link to the TargetPool
|
||||
func (gce *GCECloud) RemoveInstancesFromTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
req := &compute.TargetPoolsRemoveInstanceRequest{
|
||||
Instances: instanceRefs,
|
||||
}
|
||||
mc := newTargetPoolMetricContext("remove_instances", region)
|
||||
return mc.Observe(gce.c.TargetPools().RemoveInstance(context.Background(), meta.RegionalKey(name, region), req))
|
||||
return mc.Observe(gce.c.TargetPools().RemoveInstance(ctx, meta.RegionalKey(name, region), req))
|
||||
}
|
||||
|
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go
generated
vendored
62
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go
generated
vendored
@ -17,10 +17,9 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@ -31,34 +30,49 @@ func newTargetProxyMetricContext(request string) *metricContext {
|
||||
|
||||
// GetTargetHttpProxy returns the UrlMap by name.
|
||||
func (gce *GCECloud) GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("get")
|
||||
v, err := gce.c.TargetHttpProxies().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.TargetHttpProxies().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateTargetHttpProxy creates a TargetHttpProxy
|
||||
func (gce *GCECloud) CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("create")
|
||||
return mc.Observe(gce.c.TargetHttpProxies().Insert(context.Background(), meta.GlobalKey(proxy.Name), proxy))
|
||||
return mc.Observe(gce.c.TargetHttpProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy))
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpProxy sets the given UrlMap for the given TargetHttpProxy.
|
||||
func (gce *GCECloud) SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
ref := &compute.UrlMapReference{UrlMap: urlMap.SelfLink}
|
||||
mc := newTargetProxyMetricContext("set_url_map")
|
||||
return mc.Observe(gce.c.TargetHttpProxies().SetUrlMap(context.Background(), meta.GlobalKey(proxy.Name), ref))
|
||||
return mc.Observe(gce.c.TargetHttpProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref))
|
||||
}
|
||||
|
||||
// DeleteTargetHttpProxy deletes the TargetHttpProxy by name.
|
||||
func (gce *GCECloud) DeleteTargetHttpProxy(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("delete")
|
||||
return mc.Observe(gce.c.TargetHttpProxies().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.TargetHttpProxies().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// ListTargetHttpProxies lists all TargetHttpProxies in the project.
|
||||
func (gce *GCECloud) ListTargetHttpProxies() ([]*compute.TargetHttpProxy, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("list")
|
||||
v, err := gce.c.TargetHttpProxies().List(context.Background(), filter.None)
|
||||
v, err := gce.c.TargetHttpProxies().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
@ -66,42 +80,60 @@ func (gce *GCECloud) ListTargetHttpProxies() ([]*compute.TargetHttpProxy, error)
|
||||
|
||||
// GetTargetHttpsProxy returns the UrlMap by name.
|
||||
func (gce *GCECloud) GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("get")
|
||||
v, err := gce.c.TargetHttpsProxies().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.TargetHttpsProxies().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateTargetHttpsProxy creates a TargetHttpsProxy
|
||||
func (gce *GCECloud) CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("create")
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().Insert(context.Background(), meta.GlobalKey(proxy.Name), proxy))
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy))
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpsProxy sets the given UrlMap for the given TargetHttpsProxy.
|
||||
func (gce *GCECloud) SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("set_url_map")
|
||||
ref := &compute.UrlMapReference{UrlMap: urlMap.SelfLink}
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().SetUrlMap(context.Background(), meta.GlobalKey(proxy.Name), ref))
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref))
|
||||
}
|
||||
|
||||
// SetSslCertificateForTargetHttpsProxy sets the given SslCertificate for the given TargetHttpsProxy.
|
||||
func (gce *GCECloud) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, sslCert *compute.SslCertificate) error {
|
||||
func (gce *GCECloud) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, sslCertURLs []string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("set_ssl_cert")
|
||||
req := &compute.TargetHttpsProxiesSetSslCertificatesRequest{
|
||||
SslCertificates: []string{sslCert.SelfLink},
|
||||
SslCertificates: sslCertURLs,
|
||||
}
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().SetSslCertificates(context.Background(), meta.GlobalKey(proxy.Name), req))
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().SetSslCertificates(ctx, meta.GlobalKey(proxy.Name), req))
|
||||
}
|
||||
|
||||
// DeleteTargetHttpsProxy deletes the TargetHttpsProxy by name.
|
||||
func (gce *GCECloud) DeleteTargetHttpsProxy(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("delete")
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// ListTargetHttpsProxies lists all TargetHttpsProxies in the project.
|
||||
func (gce *GCECloud) ListTargetHttpsProxies() ([]*compute.TargetHttpsProxy, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("list")
|
||||
v, err := gce.c.TargetHttpsProxies().List(context.Background(), filter.None)
|
||||
v, err := gce.c.TargetHttpsProxies().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
72
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go
generated
vendored
72
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go
generated
vendored
@ -18,16 +18,12 @@ package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
computev1 "google.golang.org/api/compute/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
@ -480,102 +476,40 @@ func TestGenerateCloudConfigs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertToV1Operation(t *testing.T) {
|
||||
v1Op := getTestOperation()
|
||||
enc, _ := v1Op.MarshalJSON()
|
||||
var op interface{}
|
||||
var alphaOp computealpha.Operation
|
||||
var betaOp computebeta.Operation
|
||||
|
||||
if err := json.Unmarshal(enc, &alphaOp); err != nil {
|
||||
t.Errorf("Failed to unmarshal operation: %v", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(enc, &betaOp); err != nil {
|
||||
t.Errorf("Failed to unmarshal operation: %v", err)
|
||||
}
|
||||
|
||||
op = convertToV1Operation(&alphaOp)
|
||||
if _, ok := op.(*computev1.Operation); ok {
|
||||
if !reflect.DeepEqual(op, v1Op) {
|
||||
t.Errorf("Failed to maintain consistency across conversion")
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Expect output to be type v1 operation, but got %v", op)
|
||||
}
|
||||
|
||||
op = convertToV1Operation(&betaOp)
|
||||
if _, ok := op.(*computev1.Operation); ok {
|
||||
if !reflect.DeepEqual(op, v1Op) {
|
||||
t.Errorf("Failed to maintain consistency across conversion")
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Expect output to be type v1 operation, but got %v", op)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestOperation() *computev1.Operation {
|
||||
return &computev1.Operation{
|
||||
Name: "test",
|
||||
Description: "test",
|
||||
Id: uint64(12345),
|
||||
Error: &computev1.OperationError{
|
||||
Errors: []*computev1.OperationErrorErrors{
|
||||
{
|
||||
Code: "555",
|
||||
Message: "error",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewAlphaFeatureGate(t *testing.T) {
|
||||
knownAlphaFeatures["foo"] = true
|
||||
knownAlphaFeatures["bar"] = true
|
||||
|
||||
testCases := []struct {
|
||||
alphaFeatures []string
|
||||
expectEnabled []string
|
||||
expectDisabled []string
|
||||
expectError bool
|
||||
}{
|
||||
// enable foo bar
|
||||
{
|
||||
alphaFeatures: []string{"foo", "bar"},
|
||||
expectEnabled: []string{"foo", "bar"},
|
||||
expectDisabled: []string{"aaa"},
|
||||
expectError: false,
|
||||
},
|
||||
// no alpha feature
|
||||
{
|
||||
alphaFeatures: []string{},
|
||||
expectEnabled: []string{},
|
||||
expectDisabled: []string{"foo", "bar"},
|
||||
expectError: false,
|
||||
},
|
||||
// unsupported alpha feature
|
||||
{
|
||||
alphaFeatures: []string{"aaa", "foo"},
|
||||
expectError: true,
|
||||
expectEnabled: []string{"foo"},
|
||||
expectDisabled: []string{"aaa"},
|
||||
expectDisabled: []string{},
|
||||
},
|
||||
// enable foo
|
||||
{
|
||||
alphaFeatures: []string{"foo"},
|
||||
expectEnabled: []string{"foo"},
|
||||
expectDisabled: []string{"bar"},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
featureGate, err := NewAlphaFeatureGate(tc.alphaFeatures)
|
||||
|
||||
if (tc.expectError && err == nil) || (!tc.expectError && err != nil) {
|
||||
t.Errorf("Expect error to be %v, but got error %v", tc.expectError, err)
|
||||
}
|
||||
featureGate := NewAlphaFeatureGate(tc.alphaFeatures)
|
||||
|
||||
for _, key := range tc.expectEnabled {
|
||||
if !featureGate.Enabled(key) {
|
||||
@ -588,8 +522,6 @@ func TestNewAlphaFeatureGate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(knownAlphaFeatures, "foo")
|
||||
delete(knownAlphaFeatures, "bar")
|
||||
}
|
||||
|
||||
func TestGetRegionInURL(t *testing.T) {
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/api/googleapi"
|
||||
tpuapi "google.golang.org/api/tpu/v1alpha1"
|
||||
tpuapi "google.golang.org/api/tpu/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
@ -165,7 +165,7 @@ func (gce *GCECloud) waitForTPUOp(interval, timeout time.Duration, op *tpuapi.Op
|
||||
// newTPUMetricContext returns a new metricContext used for recording metrics
|
||||
// of Cloud TPU API calls.
|
||||
func newTPUMetricContext(request, zone string) *metricContext {
|
||||
return newGenericMetricContext("tpus", request, unusedMetricLabel, zone, "alpha")
|
||||
return newGenericMetricContext("tpus", request, unusedMetricLabel, zone, "v1")
|
||||
}
|
||||
|
||||
// getErrorFromTPUOp returns the error in the failed op, or nil if the op
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go
generated
vendored
@ -17,10 +17,9 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@ -31,32 +30,47 @@ func newUrlMapMetricContext(request string) *metricContext {
|
||||
|
||||
// GetUrlMap returns the UrlMap by name.
|
||||
func (gce *GCECloud) GetUrlMap(name string) (*compute.UrlMap, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newUrlMapMetricContext("get")
|
||||
v, err := gce.c.UrlMaps().Get(context.Background(), meta.GlobalKey(name))
|
||||
v, err := gce.c.UrlMaps().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateUrlMap creates a url map
|
||||
func (gce *GCECloud) CreateUrlMap(urlMap *compute.UrlMap) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newUrlMapMetricContext("create")
|
||||
return mc.Observe(gce.c.UrlMaps().Insert(context.Background(), meta.GlobalKey(urlMap.Name), urlMap))
|
||||
return mc.Observe(gce.c.UrlMaps().Insert(ctx, meta.GlobalKey(urlMap.Name), urlMap))
|
||||
}
|
||||
|
||||
// UpdateUrlMap applies the given UrlMap as an update
|
||||
func (gce *GCECloud) UpdateUrlMap(urlMap *compute.UrlMap) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newUrlMapMetricContext("update")
|
||||
return mc.Observe(gce.c.UrlMaps().Update(context.Background(), meta.GlobalKey(urlMap.Name), urlMap))
|
||||
return mc.Observe(gce.c.UrlMaps().Update(ctx, meta.GlobalKey(urlMap.Name), urlMap))
|
||||
}
|
||||
|
||||
// DeleteUrlMap deletes a url map by name.
|
||||
func (gce *GCECloud) DeleteUrlMap(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newUrlMapMetricContext("delete")
|
||||
return mc.Observe(gce.c.UrlMaps().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
return mc.Observe(gce.c.UrlMaps().Delete(ctx, meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// ListUrlMaps lists all UrlMaps in the project.
|
||||
func (gce *GCECloud) ListUrlMaps() ([]*compute.UrlMap, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newUrlMapMetricContext("list")
|
||||
v, err := gce.c.UrlMaps().List(context.Background(), filter.None)
|
||||
v, err := gce.c.UrlMaps().List(ctx, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
)
|
||||
|
||||
@ -72,8 +73,11 @@ func (gce *GCECloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeN
|
||||
|
||||
// ListZonesInRegion returns all zones in a GCP region
|
||||
func (gce *GCECloud) ListZonesInRegion(region string) ([]*compute.Zone, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newZonesMetricContext("list", region)
|
||||
list, err := gce.c.Zones().List(context.Background(), filter.Regexp("region", gce.getRegionLink(region)))
|
||||
list, err := gce.c.Zones().List(ctx, filter.Regexp("region", gce.getRegionLink(region)))
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go
generated
vendored
@ -50,17 +50,26 @@ type gceRateLimiter struct {
|
||||
// operations.
|
||||
func (l *gceRateLimiter) Accept(ctx context.Context, key *cloud.RateLimitKey) error {
|
||||
if key.Operation == "Get" && key.Service == "Operations" {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
l.gce.operationPollRateLimiter.Accept()
|
||||
close(ch)
|
||||
}()
|
||||
select {
|
||||
case <-ch:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
// Wait a minimum amount of time regardless of rate limiter.
|
||||
rl := &cloud.MinimumRateLimiter{
|
||||
// Convert flowcontrol.RateLimiter into cloud.RateLimiter
|
||||
RateLimiter: &cloud.AcceptRateLimiter{
|
||||
Acceptor: l.gce.operationPollRateLimiter,
|
||||
},
|
||||
Minimum: operationPollInterval,
|
||||
}
|
||||
return rl.Accept(ctx, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateGCECloudWithCloud is a helper function to create an instance of GCECloud with the
|
||||
// given Cloud interface implementation. Typical usage is to use cloud.NewMockGCE to get a
|
||||
// handle to a mock Cloud instance and then use that for testing.
|
||||
func CreateGCECloudWithCloud(config *CloudConfig, c cloud.Cloud) (*GCECloud, error) {
|
||||
gceCloud, err := CreateGCECloud(config)
|
||||
if err == nil {
|
||||
gceCloud.c = c
|
||||
}
|
||||
return gceCloud, err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go
generated
vendored
@ -70,7 +70,7 @@ type DeviceMetadata struct {
|
||||
// See http://docs.openstack.org/user-guide/cli_config_drive.html
|
||||
type Metadata struct {
|
||||
UUID string `json:"uuid"`
|
||||
Hostname string `json:"hostname"`
|
||||
Name string `json:"name"`
|
||||
AvailabilityZone string `json:"availability_zone"`
|
||||
Devices []DeviceMetadata `json:"devices,omitempty"`
|
||||
// .. and other fields we don't care about. Expand as necessary.
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata_test.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
|
||||
var FakeMetadata = Metadata{
|
||||
UUID: "83679162-1378-4288-a2d4-70e13ec132aa",
|
||||
Hostname: "test",
|
||||
Name: "test",
|
||||
AvailabilityZone: "nova",
|
||||
}
|
||||
|
||||
@ -81,8 +81,8 @@ func TestParseMetadata(t *testing.T) {
|
||||
t.Fatalf("Should succeed when provided with valid data: %s", err)
|
||||
}
|
||||
|
||||
if md.Hostname != "test.novalocal" {
|
||||
t.Errorf("incorrect hostname: %s", md.Hostname)
|
||||
if md.Name != "test" {
|
||||
t.Errorf("incorrect name: %s", md.Name)
|
||||
}
|
||||
|
||||
if md.UUID != "83679162-1378-4288-a2d4-70e13ec132aa" {
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
@ -415,13 +415,10 @@ func foreachServer(client *gophercloud.ServiceClient, opts servers.ListOptsBuild
|
||||
return err
|
||||
}
|
||||
|
||||
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName, showOnlyActive bool) (*servers.Server, error) {
|
||||
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) {
|
||||
opts := servers.ListOpts{
|
||||
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
|
||||
}
|
||||
if showOnlyActive {
|
||||
opts.Status = "ACTIVE"
|
||||
}
|
||||
|
||||
pager := servers.List(client, opts)
|
||||
|
||||
@ -504,7 +501,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
|
||||
}
|
||||
|
||||
func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
srv, err := getServerByName(client, name, true)
|
||||
srv, err := getServerByName(client, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -605,8 +602,17 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
}
|
||||
|
||||
func isNotFound(err error) bool {
|
||||
e, ok := err.(*gophercloud.ErrUnexpectedResponseCode)
|
||||
return ok && e.Actual == http.StatusNotFound
|
||||
if _, ok := err.(gophercloud.ErrDefault404); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok {
|
||||
if errCode.Actual == http.StatusNotFound {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Zones indicates that we support zones
|
||||
@ -666,7 +672,7 @@ func (os *OpenStack) GetZoneByNodeName(ctx context.Context, nodeName types.NodeN
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
srv, err := getServerByName(compute, nodeName, true)
|
||||
srv, err := getServerByName(compute, nodeName)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return cloudprovider.Zone{}, cloudprovider.InstanceNotFound
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
@ -42,6 +42,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
|
||||
|
||||
compute, err := os.NewComputeV2()
|
||||
if err != nil {
|
||||
glog.Errorf("unable to access compute v2 API : %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
@ -60,7 +61,7 @@ func (i *Instances) CurrentNodeName(ctx context.Context, hostname string) (types
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return types.NodeName(md.Hostname), nil
|
||||
return types.NodeName(md.Name), nil
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances is not implemented for OpenStack
|
||||
@ -105,18 +106,6 @@ func (i *Instances) NodeAddressesByProviderID(ctx context.Context, providerID st
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (i *Instances) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name, true)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return srv.ID, nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
@ -141,6 +130,11 @@ func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID s
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instances is in safe state to detach volumes
|
||||
func (i *Instances) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// InstanceID returns the kubelet's cloud provider ID.
|
||||
func (os *OpenStack) InstanceID() (string, error) {
|
||||
if len(os.localInstanceID) == 0 {
|
||||
@ -155,7 +149,7 @@ func (os *OpenStack) InstanceID() (string, error) {
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (i *Instances) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name, true)
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
@ -188,7 +182,7 @@ func (i *Instances) InstanceTypeByProviderID(ctx context.Context, providerID str
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (i *Instances) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name, true)
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
83
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
83
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
@ -70,6 +70,7 @@ const (
|
||||
errorStatus = "ERROR"
|
||||
|
||||
ServiceAnnotationLoadBalancerFloatingNetworkID = "loadbalancer.openstack.org/floating-network-id"
|
||||
ServiceAnnotationLoadBalancerSubnetID = "loadbalancer.openstack.org/subnet-id"
|
||||
|
||||
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
|
||||
// to indicate that we want an internal loadbalancer service.
|
||||
@ -367,7 +368,7 @@ func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID s
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
_, err := loadbalancers.Get(client, loadbalancerID).Extract()
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
if isNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
@ -486,21 +487,27 @@ func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, s
|
||||
|
||||
// The LB needs to be configured with instance addresses on the same
|
||||
// subnet as the LB (aka opts.SubnetID). Currently we're just
|
||||
// guessing that the node's InternalIP is the right address - and that
|
||||
// should be sufficient for all "normal" cases.
|
||||
// guessing that the node's InternalIP is the right address.
|
||||
// In case no InternalIP can be found, ExternalIP is tried.
|
||||
// If neither InternalIP nor ExternalIP can be found an error is
|
||||
// returned.
|
||||
func nodeAddressForLB(node *v1.Node) (string, error) {
|
||||
addrs := node.Status.Addresses
|
||||
if len(addrs) == 0 {
|
||||
return "", ErrNoAddressFound
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if addr.Type == v1.NodeInternalIP {
|
||||
return addr.Address, nil
|
||||
allowedAddrTypes := []v1.NodeAddressType{v1.NodeInternalIP, v1.NodeExternalIP}
|
||||
|
||||
for _, allowedAddrType := range allowedAddrTypes {
|
||||
for _, addr := range addrs {
|
||||
if addr.Type == allowedAddrType {
|
||||
return addr.Address, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return addrs[0].Address, nil
|
||||
return "", ErrNoAddressFound
|
||||
}
|
||||
|
||||
//getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting
|
||||
@ -547,14 +554,14 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string,
|
||||
}
|
||||
|
||||
// getNodeSecurityGroupIDForLB lists node-security-groups for specific nodes
|
||||
func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1.Node) ([]string, error) {
|
||||
nodeSecurityGroupIDs := sets.NewString()
|
||||
func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, network *gophercloud.ServiceClient, nodes []*v1.Node) ([]string, error) {
|
||||
secGroupNames := sets.NewString()
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeName := types.NodeName(node.Name)
|
||||
srv, err := getServerByName(compute, nodeName, true)
|
||||
srv, err := getServerByName(compute, nodeName)
|
||||
if err != nil {
|
||||
return nodeSecurityGroupIDs.List(), err
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
// use the first node-security-groups
|
||||
@ -562,11 +569,19 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1
|
||||
// case 1: node1:SG1 node2:SG2 return SG1,SG2
|
||||
// case 2: node1:SG1,SG2 node2:SG3,SG4 return SG1,SG3
|
||||
// case 3: node1:SG1,SG2 node2:SG2,SG3 return SG1,SG2
|
||||
securityGroupName := srv.SecurityGroups[0]["name"]
|
||||
nodeSecurityGroupIDs.Insert(securityGroupName.(string))
|
||||
secGroupNames.Insert(srv.SecurityGroups[0]["name"].(string))
|
||||
}
|
||||
|
||||
return nodeSecurityGroupIDs.List(), nil
|
||||
secGroupIDs := make([]string, secGroupNames.Len())
|
||||
for i, name := range secGroupNames.List() {
|
||||
secGroupID, err := groups.IDFromName(network, name)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
secGroupIDs[i] = secGroupID
|
||||
}
|
||||
|
||||
return secGroupIDs, nil
|
||||
}
|
||||
|
||||
// isSecurityGroupNotFound return true while 'err' is object of gophercloud.ErrResourceNotFound
|
||||
@ -643,6 +658,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
|
||||
return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name)
|
||||
}
|
||||
|
||||
lbaas.opts.SubnetID = getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID)
|
||||
if len(lbaas.opts.SubnetID) == 0 {
|
||||
// Get SubnetID automatically.
|
||||
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node.
|
||||
@ -815,6 +831,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
|
||||
if !memberExists(members, addr, int(port.NodePort)) {
|
||||
glog.V(4).Infof("Creating member for pool %s", pool.ID)
|
||||
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
||||
Name: fmt.Sprintf("member_%s_%d_%s", name, portIndex, node.Name),
|
||||
ProtocolPort: int(port.NodePort),
|
||||
Address: addr,
|
||||
SubnetID: lbaas.opts.SubnetID,
|
||||
@ -852,6 +869,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
|
||||
if monitorID == "" && lbaas.opts.CreateMonitor {
|
||||
glog.V(4).Infof("Creating monitor for pool %s", pool.ID)
|
||||
monitor, err := v2monitors.Create(lbaas.lb, v2monitors.CreateOpts{
|
||||
Name: fmt.Sprintf("monitor_%s_%d", name, portIndex),
|
||||
PoolID: pool.ID,
|
||||
Type: string(port.Protocol),
|
||||
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
|
||||
@ -987,7 +1005,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
// find node-security-group for service
|
||||
var err error
|
||||
if len(lbaas.opts.NodeSecurityGroupIDs) == 0 {
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes)
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, lbaas.network, nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
@ -1158,6 +1176,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes)
|
||||
|
||||
lbaas.opts.SubnetID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID)
|
||||
if len(lbaas.opts.SubnetID) == 0 && len(nodes) > 0 {
|
||||
// Get SubnetID automatically.
|
||||
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node.
|
||||
@ -1211,17 +1230,17 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
|
||||
}
|
||||
|
||||
// Compose Set of member (addresses) that _should_ exist
|
||||
addrs := map[string]empty{}
|
||||
addrs := make(map[string]*v1.Node)
|
||||
for _, node := range nodes {
|
||||
addr, err := nodeAddressForLB(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addrs[addr] = empty{}
|
||||
addrs[addr] = node
|
||||
}
|
||||
|
||||
// Check for adding/removing members associated with each port
|
||||
for _, port := range ports {
|
||||
for portIndex, port := range ports {
|
||||
// Get listener associated with this port
|
||||
listener, ok := lbListeners[portKey{
|
||||
Protocol: toListenersProtocol(port.Protocol),
|
||||
@ -1248,12 +1267,13 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
|
||||
}
|
||||
|
||||
// Add any new members for this port
|
||||
for addr := range addrs {
|
||||
for addr, node := range addrs {
|
||||
if _, ok := members[addr]; ok && members[addr].ProtocolPort == int(port.NodePort) {
|
||||
// Already exists, do not create member
|
||||
continue
|
||||
}
|
||||
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
||||
Name: fmt.Sprintf("member_%s_%d_%s", loadbalancer.Name, portIndex, node.Name),
|
||||
Address: addr,
|
||||
ProtocolPort: int(port.NodePort),
|
||||
SubnetID: lbaas.opts.SubnetID,
|
||||
@ -1299,7 +1319,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
originalNodeSecurityGroupIDs := lbaas.opts.NodeSecurityGroupIDs
|
||||
|
||||
var err error
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes)
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, lbaas.network, nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
@ -1424,18 +1444,6 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
|
||||
}
|
||||
}
|
||||
|
||||
// get all members associated with each poolIDs
|
||||
var memberIDs []string
|
||||
for _, pool := range poolIDs {
|
||||
membersList, err := getMembersByPoolID(lbaas.lb, pool)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return fmt.Errorf("error getting pool members %s: %v", pool, err)
|
||||
}
|
||||
for _, member := range membersList {
|
||||
memberIDs = append(memberIDs, member.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// delete all monitors
|
||||
for _, monitorID := range monitorIDs {
|
||||
err := v2monitors.Delete(lbaas.lb, monitorID).ExtractErr()
|
||||
@ -1450,9 +1458,14 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
|
||||
|
||||
// delete all members and pools
|
||||
for _, poolID := range poolIDs {
|
||||
// get members for current pool
|
||||
membersList, err := getMembersByPoolID(lbaas.lb, poolID)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return fmt.Errorf("error getting pool members %s: %v", poolID, err)
|
||||
}
|
||||
// delete all members for this pool
|
||||
for _, memberID := range memberIDs {
|
||||
err := v2pools.DeleteMember(lbaas.lb, poolID, memberID).ExtractErr()
|
||||
for _, member := range membersList {
|
||||
err := v2pools.DeleteMember(lbaas.lb, poolID, member.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@ -1463,7 +1476,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
|
||||
}
|
||||
|
||||
// delete pool
|
||||
err := v2pools.Delete(lbaas.lb, poolID).ExtractErr()
|
||||
err = v2pools.Delete(lbaas.lb, poolID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go
generated
vendored
@ -83,9 +83,12 @@ func (r *Routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr
|
||||
var routes []*cloudprovider.Route
|
||||
for _, item := range router.Routes {
|
||||
nodeName, foundNode := nodeNamesByAddr[item.NextHop]
|
||||
if !foundNode {
|
||||
nodeName = types.NodeName(item.NextHop)
|
||||
}
|
||||
route := cloudprovider.Route{
|
||||
Name: item.DestinationCIDR,
|
||||
TargetNode: nodeName, //empty if NextHop is unknown
|
||||
TargetNode: nodeName, //contains the nexthop address if node was not found
|
||||
Blackhole: !foundNode,
|
||||
DestinationCIDR: item.DestinationCIDR,
|
||||
}
|
||||
@ -225,10 +228,16 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
|
||||
|
||||
ip, _, _ := net.ParseCIDR(route.DestinationCIDR)
|
||||
isCIDRv6 := ip.To4() == nil
|
||||
addr, err := getAddressByName(r.compute, route.TargetNode, isCIDRv6)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
var addr string
|
||||
|
||||
// Blackhole routes are orphaned and have no counterpart in OpenStack
|
||||
if !route.Blackhole {
|
||||
var err error
|
||||
addr, err = getAddressByName(r.compute, route.TargetNode, isCIDRv6)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
router, err := routers.Get(r.network, r.opts.RouterID).Extract()
|
||||
@ -239,7 +248,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
|
||||
routes := router.Routes
|
||||
index := -1
|
||||
for i, item := range routes {
|
||||
if item.DestinationCIDR == route.DestinationCIDR && item.NextHop == addr {
|
||||
if item.DestinationCIDR == route.DestinationCIDR && (item.NextHop == addr || route.Blackhole && item.NextHop == string(route.TargetNode)) {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
@ -255,7 +264,8 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
|
||||
routes = routes[:len(routes)-1]
|
||||
|
||||
unwind, err := updateRoutes(r.network, router, routes)
|
||||
if err != nil {
|
||||
// If this was a blackhole route we are done, there are no ports to update
|
||||
if err != nil || route.Blackhole {
|
||||
return err
|
||||
}
|
||||
defer onFailure.call(unwind)
|
||||
@ -297,7 +307,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
|
||||
}
|
||||
|
||||
func getPortIDByIP(compute *gophercloud.ServiceClient, targetNode types.NodeName, ipAddress string) (string, error) {
|
||||
srv, err := getServerByName(compute, targetNode, true)
|
||||
srv, err := getServerByName(compute, targetNode)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
@ -88,6 +88,11 @@ func TestReadConfig(t *testing.T) {
|
||||
t.Errorf("Should fail when no config is provided: %s", err)
|
||||
}
|
||||
|
||||
// Since we are setting env vars, we need to reset old
|
||||
// values for other tests to succeed.
|
||||
env := clearEnviron(t)
|
||||
defer resetEnviron(t, env)
|
||||
|
||||
os.Setenv("OS_PASSWORD", "mypass")
|
||||
defer os.Unsetenv("OS_PASSWORD")
|
||||
|
||||
@ -681,3 +686,24 @@ func TestToAuth3Options(t *testing.T) {
|
||||
t.Errorf("DomainName %s != %s", ao.DomainName, cfg.Global.DomainName)
|
||||
}
|
||||
}
|
||||
|
||||
func clearEnviron(t *testing.T) []string {
|
||||
env := os.Environ()
|
||||
for _, pair := range env {
|
||||
if strings.HasPrefix(pair, "OS_") {
|
||||
i := strings.Index(pair, "=") + 1
|
||||
os.Unsetenv(pair[:i-1])
|
||||
}
|
||||
}
|
||||
return env
|
||||
}
|
||||
func resetEnviron(t *testing.T, items []string) {
|
||||
for _, pair := range items {
|
||||
if strings.HasPrefix(pair, "OS_") {
|
||||
i := strings.Index(pair, "=") + 1
|
||||
if err := os.Setenv(pair[:i-1], pair[i:]); err != nil {
|
||||
t.Errorf("Setenv(%q, %q) failed during reset: %v", pair[:i-1], pair[i:], err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
@ -310,7 +310,8 @@ func (os *OpenStack) OperationPending(diskName string) (bool, string, error) {
|
||||
}
|
||||
volumeStatus := volume.Status
|
||||
if volumeStatus == volumeErrorStatus {
|
||||
return false, volumeStatus, nil
|
||||
err = fmt.Errorf("status of volume %s is %s", diskName, volumeStatus)
|
||||
return false, volumeStatus, err
|
||||
}
|
||||
if volumeStatus == volumeAvailableStatus || volumeStatus == volumeInUseStatus || volumeStatus == volumeDeletedStatus {
|
||||
return false, volume.Status, nil
|
||||
@ -622,7 +623,7 @@ func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID stri
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
srv, err := getServerByName(cClient, nodeName, false)
|
||||
srv, err := getServerByName(cClient, nodeName)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
// instance not found anymore in cloudprovider, assume that cinder is detached
|
||||
@ -659,7 +660,7 @@ func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs [
|
||||
if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
srv, err := getServerByName(cClient, nodeName, false)
|
||||
srv, err := getServerByName(cClient, nodeName)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
// instance not found anymore, mark all volumes as detached
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user