mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor updates
This commit is contained in:
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@ -44,8 +45,6 @@ type Interface interface {
|
||||
Routes() (Routes, bool)
|
||||
// ProviderName returns the cloud provider ID.
|
||||
ProviderName() string
|
||||
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
|
||||
ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
|
||||
// HasClusterID returns true if a ClusterID is required and set
|
||||
HasClusterID() bool
|
||||
}
|
||||
@ -58,9 +57,9 @@ type InformerUser interface {
|
||||
// Clusters is an abstract, pluggable interface for clusters of containers.
|
||||
type Clusters interface {
|
||||
// ListClusters lists the names of the available clusters.
|
||||
ListClusters() ([]string, error)
|
||||
ListClusters(ctx context.Context) ([]string, error)
|
||||
// Master gets back the address (either DNS name or IP address) of the master node for the cluster.
|
||||
Master(clusterName string) (string, error)
|
||||
Master(ctx context.Context, clusterName string) (string, error)
|
||||
}
|
||||
|
||||
// TODO(#6812): Use a shorter name that's less likely to be longer than cloud
|
||||
@ -77,12 +76,12 @@ func GetLoadBalancerName(service *v1.Service) string {
|
||||
}
|
||||
|
||||
// GetInstanceProviderID builds a ProviderID for a node in a cloud.
|
||||
func GetInstanceProviderID(cloud Interface, nodeName types.NodeName) (string, error) {
|
||||
func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) {
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to get instances from cloud provider")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
instanceID, err := instances.InstanceID(ctx, nodeName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
|
||||
}
|
||||
@ -96,17 +95,17 @@ type LoadBalancer interface {
|
||||
// if so, what its status is.
|
||||
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)
|
||||
GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
// Implementations must treat the *v1.Service and *v1.Node
|
||||
// parameters as read-only and not modify them.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
|
||||
EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
// Implementations must treat the *v1.Service and *v1.Node
|
||||
// parameters as read-only and not modify them.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
|
||||
UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it
|
||||
// exists, returning nil if the load balancer specified either didn't exist or
|
||||
// was successfully deleted.
|
||||
@ -115,7 +114,7 @@ type LoadBalancer interface {
|
||||
// doesn't exist even if some part of it is still laying around.
|
||||
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
|
||||
EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error
|
||||
}
|
||||
|
||||
// Instances is an abstract, pluggable interface for sets of instances.
|
||||
@ -124,31 +123,31 @@ type Instances interface {
|
||||
// TODO(roberthbailey): This currently is only used in such a way that it
|
||||
// returns the address of the calling instance. We should do a rename to
|
||||
// make this clearer.
|
||||
NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
|
||||
NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error)
|
||||
// NodeAddressesByProviderID returns the addresses of the specified instance.
|
||||
// The instance is specified using the providerID of the node. The
|
||||
// ProviderID is a unique identifier of the node. This will not be called
|
||||
// from the node whose nodeaddresses are being queried. i.e. local metadata
|
||||
// services cannot be used in this method to obtain nodeaddresses
|
||||
NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error)
|
||||
NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error)
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
ExternalID(nodeName types.NodeName) (string, error)
|
||||
ExternalID(ctx context.Context, nodeName types.NodeName) (string, error)
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
InstanceID(ctx context.Context, nodeName types.NodeName) (string, error)
|
||||
// InstanceType returns the type of the specified instance.
|
||||
InstanceType(name types.NodeName) (string, error)
|
||||
InstanceType(ctx context.Context, name types.NodeName) (string, error)
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
InstanceTypeByProviderID(providerID string) (string, error)
|
||||
InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error)
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
AddSSHKeyToAllInstances(user string, keyData []byte) error
|
||||
AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
CurrentNodeName(hostname string) (types.NodeName, error)
|
||||
CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error)
|
||||
// InstanceExistsByProviderID returns true if the instance for the given provider id still is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
InstanceExistsByProviderID(providerID string) (bool, error)
|
||||
InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error)
|
||||
}
|
||||
|
||||
// Route is a representation of an advanced routing rule.
|
||||
@ -169,14 +168,14 @@ type Route struct {
|
||||
// Routes is an abstract, pluggable interface for advanced routing rules.
|
||||
type Routes interface {
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
ListRoutes(clusterName string) ([]*Route, error)
|
||||
ListRoutes(ctx context.Context, clusterName string) ([]*Route, error)
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
CreateRoute(clusterName string, nameHint string, route *Route) error
|
||||
CreateRoute(ctx context.Context, clusterName string, nameHint string, route *Route) error
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
DeleteRoute(clusterName string, route *Route) error
|
||||
DeleteRoute(ctx context.Context, clusterName string, route *Route) error
|
||||
}
|
||||
|
||||
var (
|
||||
@ -194,23 +193,23 @@ type Zone struct {
|
||||
// Zones is an abstract, pluggable interface for zone enumeration.
|
||||
type Zones interface {
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
// In most cases, this method is called from the kubelet querying a local metadata service to aquire its zone.
|
||||
// In most cases, this method is called from the kubelet querying a local metadata service to acquire its zone.
|
||||
// For the case of external cloud providers, use GetZoneByProviderID or GetZoneByNodeName since GetZone
|
||||
// can no longer be called from the kubelets.
|
||||
GetZone() (Zone, error)
|
||||
GetZone(ctx context.Context) (Zone, error)
|
||||
|
||||
// GetZoneByProviderID returns the Zone containing the current zone and locality region of the node specified by providerId
|
||||
// This method is particularly used in the context of external cloud providers where node initialization must be down
|
||||
// outside the kubelets.
|
||||
GetZoneByProviderID(providerID string) (Zone, error)
|
||||
GetZoneByProviderID(ctx context.Context, providerID string) (Zone, error)
|
||||
|
||||
// GetZoneByNodeName returns the Zone containing the current zone and locality region of the node specified by node name
|
||||
// This method is particularly used in the context of external cloud providers where node initialization must be down
|
||||
// outside the kubelets.
|
||||
GetZoneByNodeName(nodeName types.NodeName) (Zone, error)
|
||||
GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (Zone, error)
|
||||
}
|
||||
|
||||
// PVLabeler is an abstract, pluggable interface for fetching labels for volumes
|
||||
type PVLabeler interface {
|
||||
GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error)
|
||||
GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error)
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
@ -60,23 +60,11 @@ func IsCloudProvider(name string) bool {
|
||||
return found
|
||||
}
|
||||
|
||||
// CloudProviders returns the name of all registered cloud providers in a
|
||||
// string slice
|
||||
func CloudProviders() []string {
|
||||
names := []string{}
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
for name := range providers {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// GetCloudProvider creates an instance of the named cloud provider, or nil if
|
||||
// the name is unknown. The error return is only used if the named provider
|
||||
// was known but failed to initialize. The config parameter specifies the
|
||||
// io.Reader handler of the configuration file for the cloud provider, or nil
|
||||
// for no configuation.
|
||||
// for no configuration.
|
||||
func GetCloudProvider(name string, config io.Reader) (Interface, error) {
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD
generated
vendored
@ -38,6 +38,7 @@ go_library(
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/ec2metadata:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
|
||||
@ -46,6 +47,7 @@ go_library(
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/sts:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
@ -73,8 +75,7 @@ go_test(
|
||||
"retry_handler_test.go",
|
||||
"tags_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/OWNERS
generated
vendored
@ -1,8 +1,11 @@
|
||||
approvers:
|
||||
- justinsb
|
||||
- zmerlynn
|
||||
- gnufied
|
||||
- jsafrane
|
||||
reviewers:
|
||||
- gnufied
|
||||
- jsafrane
|
||||
- justinsb
|
||||
- zmerlynn
|
||||
- chrislovecnm
|
||||
|
224
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
224
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -32,6 +33,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
@ -40,6 +42,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/elb"
|
||||
"github.com/aws/aws-sdk-go/service/elbv2"
|
||||
"github.com/aws/aws-sdk-go/service/kms"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -201,14 +204,13 @@ const nodeWithImpairedVolumes = "NodeWithImpairedVolumes"
|
||||
const (
|
||||
// volumeAttachmentConsecutiveErrorLimit is the number of consecutive errors we will ignore when waiting for a volume to attach/detach
|
||||
volumeAttachmentStatusConsecutiveErrorLimit = 10
|
||||
// volumeAttachmentStatus* is configuration of exponential backoff for
|
||||
// waiting for attach/detach operation to complete. Starting with 10
|
||||
// seconds, multiplying by 1.2 with each step and taking 21 steps at maximum
|
||||
// it will time out after 31.11 minutes, which roughly corresponds to GCE
|
||||
// timeout (30 minutes).
|
||||
volumeAttachmentStatusInitialDelay = 10 * time.Second
|
||||
volumeAttachmentStatusFactor = 1.2
|
||||
volumeAttachmentStatusSteps = 21
|
||||
// most attach/detach operations on AWS finish within 1-4 seconds
|
||||
// By using 1 second starting interval with a backoff of 1.8
|
||||
// we get - [1, 1.8, 3.24, 5.832000000000001, 10.4976]
|
||||
// in total we wait for 2601 seconds
|
||||
volumeAttachmentStatusInitialDelay = 1 * time.Second
|
||||
volumeAttachmentStatusFactor = 1.8
|
||||
volumeAttachmentStatusSteps = 13
|
||||
|
||||
// createTag* is configuration of exponential backoff for CreateTag call. We
|
||||
// retry mainly because if we create an object, we cannot tag it until it is
|
||||
@ -247,11 +249,6 @@ const MaxReadThenCreateRetries = 30
|
||||
// need hardcoded defaults.
|
||||
const DefaultVolumeType = "gp2"
|
||||
|
||||
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
|
||||
// Amazon recommends no more than 40; the system root volume uses at least one.
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
|
||||
const DefaultMaxEBSVolumes = 39
|
||||
|
||||
// Used to call RecognizeWellKnownRegions just once
|
||||
var once sync.Once
|
||||
|
||||
@ -531,9 +528,12 @@ type CloudConfig struct {
|
||||
// RouteTableID enables using a specific RouteTable
|
||||
RouteTableID string
|
||||
|
||||
// RoleARN is the IAM role to assume when interaction with AWS APIs.
|
||||
RoleARN string
|
||||
|
||||
// KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources
|
||||
KubernetesClusterTag string
|
||||
// KubernetesClusterTag is the cluster id we'll use to identify our cluster resources
|
||||
// KubernetesClusterID is the cluster id we'll use to identify our cluster resources
|
||||
KubernetesClusterID string
|
||||
|
||||
//The aws provider creates an inbound rule per load balancer on the node security
|
||||
@ -736,12 +736,12 @@ func newEc2Filter(name string, values ...string) *ec2.Filter {
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances is currently not implemented.
|
||||
func (c *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (c *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the current node
|
||||
func (c *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func (c *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return c.selfAWSInstance.nodeName, nil
|
||||
}
|
||||
|
||||
@ -932,22 +932,43 @@ func (s *awsSdkEC2) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeV
|
||||
func init() {
|
||||
registerMetrics()
|
||||
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := readAWSCloudConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read AWS cloud provider config file: %v", err)
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
|
||||
}
|
||||
|
||||
var provider credentials.Provider
|
||||
if cfg.Global.RoleARN == "" {
|
||||
provider = &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(sess),
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN)
|
||||
provider = &stscreds.AssumeRoleProvider{
|
||||
Client: sts.New(sess),
|
||||
RoleARN: cfg.Global.RoleARN,
|
||||
}
|
||||
}
|
||||
|
||||
creds := credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(session.New(&aws.Config{})),
|
||||
},
|
||||
provider,
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
})
|
||||
|
||||
aws := newAWSSDKProvider(creds)
|
||||
return newAWSCloud(config, aws)
|
||||
return newAWSCloud(*cfg, aws)
|
||||
})
|
||||
}
|
||||
|
||||
// readAWSCloudConfig reads an instance of AWSCloudConfig from config reader.
|
||||
func readAWSCloudConfig(config io.Reader, metadata EC2Metadata) (*CloudConfig, error) {
|
||||
func readAWSCloudConfig(config io.Reader) (*CloudConfig, error) {
|
||||
var cfg CloudConfig
|
||||
var err error
|
||||
|
||||
@ -958,20 +979,25 @@ func readAWSCloudConfig(config io.Reader, metadata EC2Metadata) (*CloudConfig, e
|
||||
}
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error {
|
||||
if cfg.Global.Zone == "" {
|
||||
if metadata != nil {
|
||||
glog.Info("Zone not specified in configuration file; querying AWS metadata service")
|
||||
var err error
|
||||
cfg.Global.Zone, err = getAvailabilityZone(metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cfg.Global.Zone == "" {
|
||||
return nil, fmt.Errorf("no zone specified in configuration file")
|
||||
return fmt.Errorf("no zone specified in configuration file")
|
||||
}
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInstanceType(metadata EC2Metadata) (string, error) {
|
||||
@ -994,7 +1020,7 @@ func azToRegion(az string) (string, error) {
|
||||
|
||||
// newAWSCloud creates a new instance of AWSCloud.
|
||||
// AWSProvider and instanceId are primarily for tests
|
||||
func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
|
||||
func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) {
|
||||
// We have some state in the Cloud object - in particular the attaching map
|
||||
// Log so that if we are building multiple Cloud objects, it is obvious!
|
||||
glog.Infof("Building AWS cloudprovider")
|
||||
@ -1004,9 +1030,9 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
|
||||
return nil, fmt.Errorf("error creating AWS metadata client: %q", err)
|
||||
}
|
||||
|
||||
cfg, err := readAWSCloudConfig(config, metadata)
|
||||
err = updateConfigZone(&cfg, metadata)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read AWS cloud provider config file: %v", err)
|
||||
return nil, fmt.Errorf("unable to determine AWS zone from cloud provider config or EC2 instance metadata: %v", err)
|
||||
}
|
||||
|
||||
zone := cfg.Global.Zone
|
||||
@ -1064,7 +1090,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
|
||||
asg: asg,
|
||||
metadata: metadata,
|
||||
kms: kms,
|
||||
cfg: cfg,
|
||||
cfg: &cfg,
|
||||
region: regionName,
|
||||
|
||||
attaching: make(map[types.NodeName]map[mountDevice]awsVolumeID),
|
||||
@ -1072,8 +1098,9 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
|
||||
}
|
||||
awsCloud.instanceCache.cloud = awsCloud
|
||||
|
||||
if cfg.Global.VPC != "" && cfg.Global.SubnetID != "" && (cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "") {
|
||||
// When the master is running on a different AWS account, cloud provider or on-premises
|
||||
tagged := cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != ""
|
||||
if cfg.Global.VPC != "" && (cfg.Global.SubnetID != "" || cfg.Global.RoleARN != "") && tagged {
|
||||
// When the master is running on a different AWS account, cloud provider or on-premise
|
||||
// build up a dummy instance and use the VPC from the nodes account
|
||||
glog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises")
|
||||
awsCloud.selfAWSInstance = &awsInstance{
|
||||
@ -1089,7 +1116,6 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
|
||||
}
|
||||
awsCloud.selfAWSInstance = selfAWSInstance
|
||||
awsCloud.vpcID = selfAWSInstance.vpcID
|
||||
|
||||
}
|
||||
|
||||
if cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "" {
|
||||
@ -1135,11 +1161,6 @@ func (c *Cloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (c *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// LoadBalancer returns an implementation of LoadBalancer for Amazon Web Services.
|
||||
func (c *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return c, true
|
||||
@ -1166,7 +1187,7 @@ func (c *Cloud) HasClusterID() bool {
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (c *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
if c.selfAWSInstance.nodeName == name || len(name) == 0 {
|
||||
addresses := []v1.NodeAddress{}
|
||||
|
||||
@ -1283,7 +1304,7 @@ func extractNodeAddresses(instance *ec2.Instance) ([]v1.NodeAddress, error) {
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (c *Cloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
func (c *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
instanceID, err := kubernetesInstanceID(providerID).mapToAWSInstanceID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1298,7 +1319,7 @@ func (c *Cloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress,
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified nodeName (deprecated).
|
||||
func (c *Cloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
func (c *Cloud) ExternalID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
if c.selfAWSInstance.nodeName == nodeName {
|
||||
// We assume that if this is run on the instance itself, the instance exists and is alive
|
||||
return c.selfAWSInstance.awsID, nil
|
||||
@ -1317,7 +1338,7 @@ func (c *Cloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (c *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
instanceID, err := kubernetesInstanceID(providerID).mapToAWSInstanceID()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -1348,7 +1369,7 @@ func (c *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified nodeName.
|
||||
func (c *Cloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
func (c *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
// In the future it is possible to also return an endpoint as:
|
||||
// <endpoint>/<zone>/<instanceid>
|
||||
if c.selfAWSInstance.nodeName == nodeName {
|
||||
@ -1364,7 +1385,7 @@ func (c *Cloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (c *Cloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
func (c *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
instanceID, err := kubernetesInstanceID(providerID).mapToAWSInstanceID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -1379,7 +1400,7 @@ func (c *Cloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the node with the specified nodeName.
|
||||
func (c *Cloud) InstanceType(nodeName types.NodeName) (string, error) {
|
||||
func (c *Cloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
if c.selfAWSInstance.nodeName == nodeName {
|
||||
return c.selfAWSInstance.instanceType, nil
|
||||
}
|
||||
@ -1440,7 +1461,7 @@ func (c *Cloud) getCandidateZonesForDynamicVolume() (sets.String, error) {
|
||||
}
|
||||
|
||||
// GetZone implements Zones.GetZone
|
||||
func (c *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
func (c *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: c.selfAWSInstance.availabilityZone,
|
||||
Region: c.region,
|
||||
@ -1450,7 +1471,7 @@ func (c *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (c *Cloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
func (c *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
instanceID, err := kubernetesInstanceID(providerID).mapToAWSInstanceID()
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
@ -1471,7 +1492,7 @@ func (c *Cloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, erro
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (c *Cloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
func (c *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
instance, err := c.getInstanceByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
@ -1670,6 +1691,20 @@ func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) {
|
||||
return disk, nil
|
||||
}
|
||||
|
||||
// Helper function for describeVolume callers. Tries to retype given error to AWS error
|
||||
// and returns true in case the AWS error is "InvalidVolume.NotFound", false otherwise
|
||||
func isAWSErrorVolumeNotFound(err error) bool {
|
||||
if err != nil {
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
|
||||
if awsError.Code() == "InvalidVolume.NotFound" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Gets the full information about this volume from the EC2 API
|
||||
func (d *awsDisk) describeVolume() (*ec2.Volume, error) {
|
||||
volumeID := d.awsID
|
||||
@ -1680,13 +1715,13 @@ func (d *awsDisk) describeVolume() (*ec2.Volume, error) {
|
||||
|
||||
volumes, err := d.ec2.DescribeVolumes(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error querying ec2 for volume %q: %q", volumeID, err)
|
||||
return nil, err
|
||||
}
|
||||
if len(volumes) == 0 {
|
||||
return nil, fmt.Errorf("no volumes found for volume %q", volumeID)
|
||||
return nil, fmt.Errorf("no volumes found")
|
||||
}
|
||||
if len(volumes) > 1 {
|
||||
return nil, fmt.Errorf("multiple volumes found for volume %q", volumeID)
|
||||
return nil, fmt.Errorf("multiple volumes found")
|
||||
}
|
||||
return volumes[0], nil
|
||||
}
|
||||
@ -1790,12 +1825,29 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment,
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
info, err := d.describeVolume()
|
||||
if err != nil {
|
||||
// The VolumeNotFound error is special -- we don't need to wait for it to repeat
|
||||
if isAWSErrorVolumeNotFound(err) {
|
||||
if status == "detached" {
|
||||
// The disk doesn't exist, assume it's detached, log warning and stop waiting
|
||||
glog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID)
|
||||
stateStr := "detached"
|
||||
attachment = &ec2.VolumeAttachment{
|
||||
State: &stateStr,
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
if status == "attached" {
|
||||
// The disk doesn't exist, complain, give up waiting and report error
|
||||
glog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
describeErrorCount++
|
||||
if describeErrorCount > volumeAttachmentStatusConsecutiveErrorLimit {
|
||||
// report the error
|
||||
return false, err
|
||||
} else {
|
||||
glog.Warningf("Ignoring error from describe volume; will retry: %q", err)
|
||||
glog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err)
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
@ -2022,8 +2074,14 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName,
|
||||
// DetachDisk implements Volumes.DetachDisk
|
||||
func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
|
||||
if diskInfo == nil {
|
||||
return "", err
|
||||
if err != nil {
|
||||
if isAWSErrorVolumeNotFound(err) {
|
||||
// Someone deleted the volume being detached; complain, but do nothing else and return success
|
||||
glog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName)
|
||||
return "", nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if !attached && diskInfo.ec2Instance != nil {
|
||||
@ -2092,17 +2150,17 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er
|
||||
|
||||
var createAZ string
|
||||
if !volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
|
||||
createAZ = volume.ChooseZoneForVolume(allZones, volumeOptions.PVCName)
|
||||
createAZ = volumeutil.ChooseZoneForVolume(allZones, volumeOptions.PVCName)
|
||||
}
|
||||
if !volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
|
||||
if adminSetOfZones, err := volumeutil.ZonesToSet(volumeOptions.AvailabilityZones); err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
createAZ = volume.ChooseZoneForVolume(adminSetOfZones, volumeOptions.PVCName)
|
||||
createAZ = volumeutil.ChooseZoneForVolume(adminSetOfZones, volumeOptions.PVCName)
|
||||
}
|
||||
}
|
||||
if volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
|
||||
if err := volume.ValidateZone(volumeOptions.AvailabilityZone); err != nil {
|
||||
if err := volumeutil.ValidateZone(volumeOptions.AvailabilityZone); err != nil {
|
||||
return "", err
|
||||
}
|
||||
createAZ = volumeOptions.AvailabilityZone
|
||||
@ -2259,7 +2317,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Cloud) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
// Ignore any volumes that are being provisioned
|
||||
if pv.Spec.AWSElasticBlockStore.VolumeID == volume.ProvisionedVolumeName {
|
||||
return nil, nil
|
||||
@ -2318,9 +2376,15 @@ func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) {
|
||||
|
||||
// DiskIsAttached implements Volumes.DiskIsAttached
|
||||
func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
|
||||
diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
|
||||
if diskInfo == nil {
|
||||
return true, err
|
||||
_, attached, err := c.checkIfAttachedToNode(diskName, nodeName)
|
||||
if err != nil {
|
||||
if isAWSErrorVolumeNotFound(err) {
|
||||
// The disk doesn't exist, can't be attached
|
||||
glog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName)
|
||||
return false, nil
|
||||
} else {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
@ -2412,7 +2476,7 @@ func (c *Cloud) ResizeDisk(
|
||||
}
|
||||
requestBytes := newSize.Value()
|
||||
// AWS resizes in chunks of GiB (not GB)
|
||||
requestGiB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
|
||||
requestGiB := volumeutil.RoundUpSize(requestBytes, 1024*1024*1024)
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
||||
|
||||
// If disk already if of greater or equal size than requested we return
|
||||
@ -2824,8 +2888,9 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss
|
||||
|
||||
// Makes sure the security group exists.
|
||||
// For multi-cluster isolation, name must be globally unique, for example derived from the service UUID.
|
||||
// Additional tags can be specified
|
||||
// Returns the security group id or error
|
||||
func (c *Cloud) ensureSecurityGroup(name string, description string) (string, error) {
|
||||
func (c *Cloud) ensureSecurityGroup(name string, description string, additionalTags map[string]string) (string, error) {
|
||||
groupID := ""
|
||||
attempt := 0
|
||||
for {
|
||||
@ -2891,7 +2956,7 @@ func (c *Cloud) ensureSecurityGroup(name string, description string) (string, er
|
||||
return "", fmt.Errorf("created security group, but id was not returned: %s", name)
|
||||
}
|
||||
|
||||
err := c.tagging.createTags(c.ec2, groupID, ResourceLifecycleOwned, nil)
|
||||
err := c.tagging.createTags(c.ec2, groupID, ResourceLifecycleOwned, additionalTags)
|
||||
if err != nil {
|
||||
// If we retry, ensureClusterTags will recover from this - it
|
||||
// will add the missing tags. We could delete the security
|
||||
@ -3105,9 +3170,10 @@ func getPortSets(annotation string) (ports *portSets) {
|
||||
|
||||
// buildELBSecurityGroupList returns list of SecurityGroups which should be
|
||||
// attached to ELB created by a service. List always consist of at least
|
||||
// 1 member which is an SG created for this service or a SG from the Global config. Extra groups can be
|
||||
// specified via annotation
|
||||
func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, loadBalancerName, annotation string) ([]string, error) {
|
||||
// 1 member which is an SG created for this service or a SG from the Global config.
|
||||
// Extra groups can be specified via annotation, as can extra tags for any
|
||||
// new groups.
|
||||
func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, loadBalancerName string, annotations map[string]string) ([]string, error) {
|
||||
var err error
|
||||
var securityGroupID string
|
||||
|
||||
@ -3117,7 +3183,7 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load
|
||||
// Create a security group for the load balancer
|
||||
sgName := "k8s-elb-" + loadBalancerName
|
||||
sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName)
|
||||
securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription)
|
||||
securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations))
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating load balancer security group: %q", err)
|
||||
return nil, err
|
||||
@ -3125,7 +3191,7 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load
|
||||
}
|
||||
sgList := []string{securityGroupID}
|
||||
|
||||
for _, extraSG := range strings.Split(annotation, ",") {
|
||||
for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups], ",") {
|
||||
extraSG = strings.TrimSpace(extraSG)
|
||||
if len(extraSG) > 0 {
|
||||
sgList = append(sgList, extraSG)
|
||||
@ -3172,7 +3238,7 @@ func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer
|
||||
func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
annotations := apiService.Annotations
|
||||
glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)",
|
||||
clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, annotations)
|
||||
@ -3423,7 +3489,7 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n
|
||||
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
|
||||
securityGroupIDs, err := c.buildELBSecurityGroupList(serviceName, loadBalancerName, annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups])
|
||||
securityGroupIDs, err := c.buildELBSecurityGroupList(serviceName, loadBalancerName, annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -3543,7 +3609,7 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n
|
||||
}
|
||||
|
||||
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
|
||||
func (c *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
func (c *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
|
||||
if isNLB(service.Annotations) {
|
||||
@ -3803,7 +3869,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted.
|
||||
func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
|
||||
if isNLB(service.Annotations) {
|
||||
@ -3907,12 +3973,22 @@ func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servic
|
||||
}
|
||||
}
|
||||
|
||||
if len(v4rangesToRemove) > 0 || len(v6rangesToRemove) > 0 {
|
||||
// ipv4 and ipv6 removals cannot be included in the same permission
|
||||
if len(v4rangesToRemove) > 0 {
|
||||
// create a new *IpPermission to not accidentally remove UserIdGroupPairs
|
||||
removedPermission := &ec2.IpPermission{
|
||||
FromPort: matchingGroups[i].IpPermissions[j].FromPort,
|
||||
IpProtocol: matchingGroups[i].IpPermissions[j].IpProtocol,
|
||||
IpRanges: v4rangesToRemove,
|
||||
ToPort: matchingGroups[i].IpPermissions[j].ToPort,
|
||||
}
|
||||
removes = append(removes, removedPermission)
|
||||
}
|
||||
if len(v6rangesToRemove) > 0 {
|
||||
// create a new *IpPermission to not accidentally remove UserIdGroupPairs
|
||||
removedPermission := &ec2.IpPermission{
|
||||
FromPort: matchingGroups[i].IpPermissions[j].FromPort,
|
||||
IpProtocol: matchingGroups[i].IpPermissions[j].IpProtocol,
|
||||
Ipv6Ranges: v6rangesToRemove,
|
||||
ToPort: matchingGroups[i].IpPermissions[j].ToPort,
|
||||
}
|
||||
@ -4036,7 +4112,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servic
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer
|
||||
func (c *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
func (c *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
instances, err := c.findInstancesForELB(nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -4051,7 +4127,7 @@ func (c *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, node
|
||||
if lb == nil {
|
||||
return fmt.Errorf("Load balancer not found")
|
||||
}
|
||||
_, err = c.EnsureLoadBalancer(clusterName, service, nodes)
|
||||
_, err = c.EnsureLoadBalancer(ctx, clusterName, service, nodes)
|
||||
return err
|
||||
}
|
||||
lb, err := c.describeLoadBalancer(loadBalancerName)
|
||||
|
88
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
88
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
@ -662,7 +662,6 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
|
||||
|
||||
if clientTraffic {
|
||||
clientRuleAnnotation := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName)
|
||||
mtuRuleAnnotation := fmt.Sprintf("%s=%s", NLBMtuDiscoveryRuleDescription, lbName)
|
||||
// Client Traffic
|
||||
permission := &ec2.IpPermission{
|
||||
FromPort: aws.Int64(port),
|
||||
@ -682,26 +681,6 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
|
||||
} else {
|
||||
removes = append(removes, permission)
|
||||
}
|
||||
|
||||
// MTU discovery
|
||||
permission = &ec2.IpPermission{
|
||||
IpProtocol: aws.String("icmp"),
|
||||
FromPort: aws.Int64(3),
|
||||
ToPort: aws.Int64(4),
|
||||
}
|
||||
ranges = []*ec2.IpRange{}
|
||||
for _, cidr := range clientCidrs {
|
||||
ranges = append(ranges, &ec2.IpRange{
|
||||
CidrIp: aws.String(cidr),
|
||||
Description: aws.String(mtuRuleAnnotation),
|
||||
})
|
||||
}
|
||||
permission.IpRanges = ranges
|
||||
if add {
|
||||
adds = append(adds, permission)
|
||||
} else {
|
||||
removes = append(removes, permission)
|
||||
}
|
||||
} else {
|
||||
healthRuleAnnotation := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName)
|
||||
|
||||
@ -725,8 +704,8 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
|
||||
removes = append(removes, permission)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(adds) > 0 {
|
||||
changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, adds)
|
||||
if err != nil {
|
||||
@ -736,6 +715,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
|
||||
glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
|
||||
}
|
||||
}
|
||||
|
||||
if len(removes) > 0 {
|
||||
changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, removes)
|
||||
if err != nil {
|
||||
@ -745,6 +725,70 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se
|
||||
glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
|
||||
}
|
||||
}
|
||||
|
||||
if clientTraffic {
|
||||
// MTU discovery
|
||||
mtuRuleAnnotation := fmt.Sprintf("%s=%s", NLBMtuDiscoveryRuleDescription, lbName)
|
||||
mtuPermission := &ec2.IpPermission{
|
||||
IpProtocol: aws.String("icmp"),
|
||||
FromPort: aws.Int64(3),
|
||||
ToPort: aws.Int64(4),
|
||||
}
|
||||
ranges := []*ec2.IpRange{}
|
||||
for _, cidr := range clientCidrs {
|
||||
ranges = append(ranges, &ec2.IpRange{
|
||||
CidrIp: aws.String(cidr),
|
||||
Description: aws.String(mtuRuleAnnotation),
|
||||
})
|
||||
}
|
||||
mtuPermission.IpRanges = ranges
|
||||
|
||||
group, err := c.findSecurityGroup(instanceSecurityGroupID)
|
||||
if err != nil {
|
||||
glog.Warningf("Error retrieving security group: %q", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if group == nil {
|
||||
glog.Warning("Security group not found: ", instanceSecurityGroupID)
|
||||
return nil
|
||||
}
|
||||
|
||||
icmpExists := false
|
||||
permCount := 0
|
||||
for _, perm := range group.IpPermissions {
|
||||
if *perm.IpProtocol == "icmp" {
|
||||
icmpExists = true
|
||||
continue
|
||||
}
|
||||
|
||||
if perm.FromPort != nil {
|
||||
permCount++
|
||||
}
|
||||
}
|
||||
|
||||
if !icmpExists && permCount > 0 {
|
||||
// the icmp permission is missing
|
||||
changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission})
|
||||
if err != nil {
|
||||
glog.Warningf("Error adding MTU permission to security group: %q", err)
|
||||
return err
|
||||
}
|
||||
if !changed {
|
||||
glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
|
||||
}
|
||||
} else if icmpExists && permCount == 0 {
|
||||
// there is no additional permissions, remove icmp
|
||||
changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission})
|
||||
if err != nil {
|
||||
glog.Warningf("Error removing MTU permission to security group: %q", err)
|
||||
return err
|
||||
}
|
||||
if !changed {
|
||||
glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@ -65,7 +66,7 @@ func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) {
|
||||
|
||||
// ListRoutes implements Routes.ListRoutes
|
||||
// List all routes that match the filter
|
||||
func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
func (c *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -138,7 +139,7 @@ func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCh
|
||||
|
||||
// CreateRoute implements Routes.CreateRoute
|
||||
// Create the described route
|
||||
func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
func (c *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
instance, err := c.getInstanceByNodeName(route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -198,7 +199,7 @@ func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudpro
|
||||
|
||||
// DeleteRoute implements Routes.DeleteRoute
|
||||
// Delete the specified route
|
||||
func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
func (c *Cloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error {
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
101
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
101
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
@ -159,7 +160,10 @@ func TestReadAWSCloudConfig(t *testing.T) {
|
||||
if test.aws != nil {
|
||||
metadata, _ = test.aws.Metadata()
|
||||
}
|
||||
cfg, err := readAWSCloudConfig(test.reader, metadata)
|
||||
cfg, err := readAWSCloudConfig(test.reader)
|
||||
if err == nil {
|
||||
err = updateConfigZone(cfg, metadata)
|
||||
}
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Should error for case %s (cfg=%v)", test.name, cfg)
|
||||
@ -212,7 +216,11 @@ func TestNewAWSCloud(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Logf("Running test case %s", test.name)
|
||||
c, err := newAWSCloud(test.reader, test.awsServices)
|
||||
cfg, err := readAWSCloudConfig(test.reader)
|
||||
var c *Cloud
|
||||
if err == nil {
|
||||
c, err = newAWSCloud(*cfg, test.awsServices)
|
||||
}
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Should error for case %s", test.name)
|
||||
@ -232,7 +240,7 @@ func mockInstancesResp(selfInstance *ec2.Instance, instances []*ec2.Instance) (*
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
awsServices.instances = instances
|
||||
awsServices.selfInstance = selfInstance
|
||||
awsCloud, err := newAWSCloud(nil, awsServices)
|
||||
awsCloud, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -241,7 +249,7 @@ func mockInstancesResp(selfInstance *ec2.Instance, instances []*ec2.Instance) (*
|
||||
|
||||
func mockAvailabilityZone(availabilityZone string) *Cloud {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId).WithAz(availabilityZone)
|
||||
awsCloud, err := newAWSCloud(nil, awsServices)
|
||||
awsCloud, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -264,6 +272,12 @@ func TestNodeAddresses(t *testing.T) {
|
||||
var instance1 ec2.Instance
|
||||
var instance2 ec2.Instance
|
||||
|
||||
// ClusterID needs to be set
|
||||
var tag ec2.Tag
|
||||
tag.Key = aws.String(TagNameKubernetesClusterLegacy)
|
||||
tag.Value = aws.String(TestClusterId)
|
||||
tags := []*ec2.Tag{&tag}
|
||||
|
||||
//0
|
||||
instance0.InstanceId = aws.String("i-0")
|
||||
instance0.PrivateDnsName = aws.String("instance-same.ec2.internal")
|
||||
@ -282,6 +296,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
}
|
||||
instance0.InstanceType = aws.String("c3.large")
|
||||
instance0.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
instance0.Tags = tags
|
||||
state0 := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
@ -293,6 +308,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
instance1.PrivateIpAddress = aws.String("192.168.0.2")
|
||||
instance1.InstanceType = aws.String("c3.large")
|
||||
instance1.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
instance1.Tags = tags
|
||||
state1 := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
@ -305,6 +321,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
instance2.PublicIpAddress = aws.String("1.2.3.4")
|
||||
instance2.InstanceType = aws.String("c3.large")
|
||||
instance2.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
instance2.Tags = tags
|
||||
state2 := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
@ -313,13 +330,13 @@ func TestNodeAddresses(t *testing.T) {
|
||||
instances := []*ec2.Instance{&instance0, &instance1, &instance2}
|
||||
|
||||
aws1, _ := mockInstancesResp(&instance0, []*ec2.Instance{&instance0})
|
||||
_, err1 := aws1.NodeAddresses("instance-mismatch.ec2.internal")
|
||||
_, err1 := aws1.NodeAddresses(context.TODO(), "instance-mismatch.ec2.internal")
|
||||
if err1 == nil {
|
||||
t.Errorf("Should error when no instance found")
|
||||
}
|
||||
|
||||
aws2, _ := mockInstancesResp(&instance2, instances)
|
||||
_, err2 := aws2.NodeAddresses("instance-same.ec2.internal")
|
||||
_, err2 := aws2.NodeAddresses(context.TODO(), "instance-same.ec2.internal")
|
||||
if err2 == nil {
|
||||
t.Errorf("Should error when multiple instances found")
|
||||
}
|
||||
@ -327,7 +344,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
aws3, _ := mockInstancesResp(&instance0, instances[0:1])
|
||||
// change node name so it uses the instance instead of metadata
|
||||
aws3.selfAWSInstance.nodeName = "foo"
|
||||
addrs3, err3 := aws3.NodeAddresses("instance-same.ec2.internal")
|
||||
addrs3, err3 := aws3.NodeAddresses(context.TODO(), "instance-same.ec2.internal")
|
||||
if err3 != nil {
|
||||
t.Errorf("Should not error when instance found")
|
||||
}
|
||||
@ -343,12 +360,19 @@ func TestNodeAddresses(t *testing.T) {
|
||||
func TestNodeAddressesWithMetadata(t *testing.T) {
|
||||
var instance ec2.Instance
|
||||
|
||||
// ClusterID needs to be set
|
||||
var tag ec2.Tag
|
||||
tag.Key = aws.String(TagNameKubernetesClusterLegacy)
|
||||
tag.Value = aws.String(TestClusterId)
|
||||
tags := []*ec2.Tag{&tag}
|
||||
|
||||
instanceName := "instance.ec2.internal"
|
||||
instance.InstanceId = aws.String("i-0")
|
||||
instance.PrivateDnsName = &instanceName
|
||||
instance.PublicIpAddress = aws.String("2.3.4.5")
|
||||
instance.InstanceType = aws.String("c3.large")
|
||||
instance.Placement = &ec2.Placement{AvailabilityZone: aws.String("us-east-1a")}
|
||||
instance.Tags = tags
|
||||
state := ec2.InstanceState{
|
||||
Name: aws.String("running"),
|
||||
}
|
||||
@ -359,7 +383,7 @@ func TestNodeAddressesWithMetadata(t *testing.T) {
|
||||
|
||||
awsServices.networkInterfacesMacs = []string{"0a:26:89:f3:9c:f6", "0a:77:64:c4:6a:48"}
|
||||
awsServices.networkInterfacesPrivateIPs = [][]string{{"192.168.0.1"}, {"192.168.0.2"}}
|
||||
addrs, err := awsCloud.NodeAddresses("")
|
||||
addrs, err := awsCloud.NodeAddresses(context.TODO(), "")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -374,7 +398,7 @@ func TestGetRegion(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("Unexpected missing zones impl")
|
||||
}
|
||||
zone, err := zones.GetZone()
|
||||
zone, err := zones.GetZone(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
@ -388,7 +412,7 @@ func TestGetRegion(t *testing.T) {
|
||||
|
||||
func TestFindVPCID(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
if err != nil {
|
||||
t.Errorf("Error building aws cloud: %v", err)
|
||||
return
|
||||
@ -462,7 +486,7 @@ func constructRouteTable(subnetID string, public bool) *ec2.RouteTable {
|
||||
|
||||
func TestSubnetIDsinVPC(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
if err != nil {
|
||||
t.Errorf("Error building aws cloud: %v", err)
|
||||
return
|
||||
@ -797,7 +821,7 @@ func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
|
||||
instances := []*ec2.Instance{&terminatedInstance, &runningInstance}
|
||||
awsServices.instances = append(awsServices.instances, instances...)
|
||||
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
if err != nil {
|
||||
t.Errorf("Error building aws cloud: %v", err)
|
||||
return
|
||||
@ -817,7 +841,7 @@ func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
|
||||
|
||||
func TestGetInstanceByNodeNameBatching(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
assert.Nil(t, err, "Error building aws cloud: %v", err)
|
||||
var tag ec2.Tag
|
||||
tag.Key = aws.String(TagNameKubernetesClusterPrefix + TestClusterId)
|
||||
@ -844,7 +868,7 @@ func TestGetInstanceByNodeNameBatching(t *testing.T) {
|
||||
|
||||
func TestGetVolumeLabels(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
assert.Nil(t, err, "Error building aws cloud: %v", err)
|
||||
volumeId := awsVolumeID("vol-VolumeId")
|
||||
expectedVolumeRequest := &ec2.DescribeVolumesInput{VolumeIds: []*string{volumeId.awsString()}}
|
||||
@ -866,34 +890,34 @@ func TestGetVolumeLabels(t *testing.T) {
|
||||
|
||||
func TestDescribeLoadBalancerOnDelete(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, _ := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, _ := newAWSCloud(CloudConfig{}, awsServices)
|
||||
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
|
||||
|
||||
c.EnsureLoadBalancerDeleted(TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}})
|
||||
c.EnsureLoadBalancerDeleted(context.TODO(), TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}})
|
||||
}
|
||||
|
||||
func TestDescribeLoadBalancerOnUpdate(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, _ := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, _ := newAWSCloud(CloudConfig{}, awsServices)
|
||||
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
|
||||
|
||||
c.UpdateLoadBalancer(TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}}, []*v1.Node{})
|
||||
c.UpdateLoadBalancer(context.TODO(), TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}}, []*v1.Node{})
|
||||
}
|
||||
|
||||
func TestDescribeLoadBalancerOnGet(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, _ := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, _ := newAWSCloud(CloudConfig{}, awsServices)
|
||||
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
|
||||
|
||||
c.GetLoadBalancer(TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}})
|
||||
c.GetLoadBalancer(context.TODO(), TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}})
|
||||
}
|
||||
|
||||
func TestDescribeLoadBalancerOnEnsure(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, _ := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, _ := newAWSCloud(CloudConfig{}, awsServices)
|
||||
awsServices.elb.(*MockedFakeELB).expectDescribeLoadBalancers("aid")
|
||||
|
||||
c.EnsureLoadBalancer(TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}}, []*v1.Node{})
|
||||
c.EnsureLoadBalancer(context.TODO(), TestClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "myservice", UID: "id"}}, []*v1.Node{})
|
||||
}
|
||||
|
||||
func TestBuildListener(t *testing.T) {
|
||||
@ -1122,21 +1146,22 @@ func TestGetLoadBalancerAdditionalTags(t *testing.T) {
|
||||
|
||||
func TestLBExtraSecurityGroupsAnnotation(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, _ := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, _ := newAWSCloud(CloudConfig{}, awsServices)
|
||||
|
||||
sg1 := "sg-000001"
|
||||
sg2 := "sg-000002"
|
||||
sg1 := map[string]string{ServiceAnnotationLoadBalancerExtraSecurityGroups: "sg-000001"}
|
||||
sg2 := map[string]string{ServiceAnnotationLoadBalancerExtraSecurityGroups: "sg-000002"}
|
||||
sg3 := map[string]string{ServiceAnnotationLoadBalancerExtraSecurityGroups: "sg-000001, sg-000002"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
extraSGsAnnotation string
|
||||
expectedSGs []string
|
||||
annotations map[string]string
|
||||
expectedSGs []string
|
||||
}{
|
||||
{"No extra SG annotation", "", []string{}},
|
||||
{"Empty extra SGs specified", ", ,,", []string{}},
|
||||
{"SG specified", sg1, []string{sg1}},
|
||||
{"Multiple SGs specified", fmt.Sprintf("%s, %s", sg1, sg2), []string{sg1, sg2}},
|
||||
{"No extra SG annotation", map[string]string{}, []string{}},
|
||||
{"Empty extra SGs specified", map[string]string{ServiceAnnotationLoadBalancerExtraSecurityGroups: ", ,,"}, []string{}},
|
||||
{"SG specified", sg1, []string{sg1[ServiceAnnotationLoadBalancerExtraSecurityGroups]}},
|
||||
{"Multiple SGs specified", sg3, []string{sg1[ServiceAnnotationLoadBalancerExtraSecurityGroups], sg2[ServiceAnnotationLoadBalancerExtraSecurityGroups]}},
|
||||
}
|
||||
|
||||
awsServices.ec2.(*MockedFakeEC2).expectDescribeSecurityGroups(TestClusterId, "k8s-elb-aid", "cluster.test")
|
||||
@ -1145,7 +1170,7 @@ func TestLBExtraSecurityGroupsAnnotation(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
serviceName := types.NamespacedName{Namespace: "default", Name: "myservice"}
|
||||
|
||||
sgList, err := c.buildELBSecurityGroupList(serviceName, "aid", test.extraSGsAnnotation)
|
||||
sgList, err := c.buildELBSecurityGroupList(serviceName, "aid", test.annotations)
|
||||
assert.NoError(t, err, "buildELBSecurityGroupList failed")
|
||||
extraSGs := sgList[1:]
|
||||
assert.True(t, sets.NewString(test.expectedSGs...).Equal(sets.NewString(extraSGs...)),
|
||||
@ -1158,7 +1183,7 @@ func TestLBExtraSecurityGroupsAnnotation(t *testing.T) {
|
||||
func TestAddLoadBalancerTags(t *testing.T) {
|
||||
loadBalancerName := "test-elb"
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, _ := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, _ := newAWSCloud(CloudConfig{}, awsServices)
|
||||
|
||||
want := make(map[string]string)
|
||||
want["tag1"] = "val1"
|
||||
@ -1214,7 +1239,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
assert.Nil(t, err, "Error building aws cloud: %v", err)
|
||||
expectedHC := *defaultHC
|
||||
if test.overriddenFieldName != "" { // cater for test case with no overrides
|
||||
@ -1232,7 +1257,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
|
||||
|
||||
t.Run("does not make an API call if the current health check is the same", func(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
assert.Nil(t, err, "Error building aws cloud: %v", err)
|
||||
expectedHC := *defaultHC
|
||||
timeout := int64(3)
|
||||
@ -1254,7 +1279,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
|
||||
|
||||
t.Run("validates resulting expected health check before making an API call", func(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
assert.Nil(t, err, "Error building aws cloud: %v", err)
|
||||
expectedHC := *defaultHC
|
||||
invalidThreshold := int64(1)
|
||||
@ -1270,7 +1295,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
|
||||
|
||||
t.Run("handles invalid override values", func(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
assert.Nil(t, err, "Error building aws cloud: %v", err)
|
||||
annotations := map[string]string{ServiceAnnotationLoadBalancerHCTimeout: "3.3"}
|
||||
|
||||
@ -1282,7 +1307,7 @@ func TestEnsureLoadBalancerHealthCheck(t *testing.T) {
|
||||
|
||||
t.Run("returns error when updating the health check fails", func(t *testing.T) {
|
||||
awsServices := newMockedFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
assert.Nil(t, err, "Error building aws cloud: %v", err)
|
||||
returnErr := fmt.Errorf("throttling error")
|
||||
awsServices.elb.(*MockedFakeELB).expectConfigureHealthCheck(&lbName, defaultHC, returnErr)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions_test.go
generated
vendored
@ -74,7 +74,7 @@ func TestRecognizesNewRegion(t *testing.T) {
|
||||
}
|
||||
|
||||
awsServices := NewFakeAWSServices(TestClusterId).WithAz(region + "a")
|
||||
_, err := newAWSCloud(nil, awsServices)
|
||||
_, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
if err != nil {
|
||||
t.Errorf("error building AWS cloud: %v", err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go
generated
vendored
@ -75,7 +75,7 @@ func (t *awsTagging) init(legacyClusterID string, clusterID string) error {
|
||||
if clusterID != "" {
|
||||
glog.Infof("AWS cloud filtering on ClusterID: %v", clusterID)
|
||||
} else {
|
||||
glog.Warning("AWS cloud - no clusterID filtering applied for shared resources; do not run multiple clusters in this AZ.")
|
||||
return fmt.Errorf("AWS cloud failed to find ClusterID")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags_test.go
generated
vendored
@ -19,13 +19,12 @@ package aws
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilterTags(t *testing.T) {
|
||||
awsServices := NewFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
c, err := newAWSCloud(CloudConfig{}, awsServices)
|
||||
if err != nil {
|
||||
t.Errorf("Error building aws cloud: %v", err)
|
||||
return
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go
generated
vendored
@ -119,10 +119,9 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type
|
||||
info, err := disk.describeVolume()
|
||||
|
||||
if err != nil {
|
||||
describeError := fmt.Errorf("Error describing volume %s with %v", diskName, err)
|
||||
glog.Warning(describeError)
|
||||
glog.Warning("Error describing volume %s with %v", diskName, err)
|
||||
awsDiskInfo.volumeState = "unknown"
|
||||
return awsDiskInfo, false, describeError
|
||||
return awsDiskInfo, false, err
|
||||
}
|
||||
|
||||
awsDiskInfo.volumeState = aws.StringValue(info.State)
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
@ -12,18 +12,25 @@ go_library(
|
||||
"azure.go",
|
||||
"azure_backoff.go",
|
||||
"azure_blobDiskController.go",
|
||||
"azure_controllerCommon.go",
|
||||
"azure_cache.go",
|
||||
"azure_client.go",
|
||||
"azure_controller_common.go",
|
||||
"azure_controller_standard.go",
|
||||
"azure_controller_vmss.go",
|
||||
"azure_fakes.go",
|
||||
"azure_file.go",
|
||||
"azure_instance_metadata.go",
|
||||
"azure_instances.go",
|
||||
"azure_loadbalancer.go",
|
||||
"azure_managedDiskController.go",
|
||||
"azure_metrics.go",
|
||||
"azure_routes.go",
|
||||
"azure_standard.go",
|
||||
"azure_storage.go",
|
||||
"azure_storageaccount.go",
|
||||
"azure_util.go",
|
||||
"azure_util_vmss.go",
|
||||
"azure_vmsets.go",
|
||||
"azure_vmss.go",
|
||||
"azure_vmss_cache.go",
|
||||
"azure_wrap.go",
|
||||
"azure_zones.go",
|
||||
],
|
||||
@ -31,6 +38,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
@ -38,6 +46,7 @@ go_library(
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
@ -45,13 +54,15 @@ go_library(
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -59,25 +70,35 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_backoff_test.go",
|
||||
"azure_cache_test.go",
|
||||
"azure_loadbalancer_test.go",
|
||||
"azure_metrics_test.go",
|
||||
"azure_routes_test.go",
|
||||
"azure_standard_test.go",
|
||||
"azure_storage_test.go",
|
||||
"azure_storageaccount_test.go",
|
||||
"azure_test.go",
|
||||
"azure_util_test.go",
|
||||
"azure_vmss_cache_test.go",
|
||||
"azure_vmss_test.go",
|
||||
"azure_wrap_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -90,6 +111,9 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/azure/auth:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
@ -1,6 +1,16 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
||||
reviewers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["azure_auth.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// AzureAuthConfig holds auth related part of cloud config
|
||||
type AzureAuthConfig struct {
|
||||
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
|
||||
Cloud string `json:"cloud" yaml:"cloud"`
|
||||
// The AAD Tenant ID for the Subscription that the cluster is deployed in
|
||||
TenantID string `json:"tenantId" yaml:"tenantId"`
|
||||
// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
config.AADClientSecret,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding the client certificate: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
certificate,
|
||||
privateKey,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
|
||||
}
|
||||
|
||||
// ParseAzureEnvironment returns azure environment by name
|
||||
func ParseAzureEnvironment(cloudName string) (*azure.Environment, error) {
|
||||
var env azure.Environment
|
||||
var err error
|
||||
if cloudName == "" {
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
env, err = azure.EnvironmentFromName(cloudName)
|
||||
}
|
||||
return &env, err
|
||||
}
|
||||
|
||||
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
|
||||
// the private RSA key
|
||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
|
||||
}
|
||||
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
|
||||
if !isRsaKey {
|
||||
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
|
||||
}
|
||||
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
424
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
424
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
@ -17,30 +17,23 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -61,12 +54,8 @@ const (
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
// All fields are required unless otherwise specified
|
||||
type Config struct {
|
||||
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
|
||||
Cloud string `json:"cloud" yaml:"cloud"`
|
||||
// The AAD Tenant ID for the Subscription that the cluster is deployed in
|
||||
TenantID string `json:"tenantId" yaml:"tenantId"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
auth.AzureAuthConfig
|
||||
|
||||
// The name of the resource group that the cluster is deployed in
|
||||
ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"`
|
||||
// The location of the resource group that the cluster is deployed in
|
||||
@ -87,7 +76,7 @@ type Config struct {
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
|
||||
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
|
||||
// The type of azure nodes. Candidate valudes are: vmss and standard.
|
||||
// The type of azure nodes. Candidate values are: vmss and standard.
|
||||
// If not set, it will be default to standard.
|
||||
VMType string `json:"vmType" yaml:"vmType"`
|
||||
// The name of the scale set that should be used as the load balancer backend.
|
||||
@ -96,15 +85,6 @@ type Config struct {
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (scale sets), you MUST set this field.
|
||||
PrimaryScaleSetName string `json:"primaryScaleSetName" yaml:"primaryScaleSetName"`
|
||||
|
||||
// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Enable exponential backoff to manage resource request retries
|
||||
CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"`
|
||||
// Backoff retry limit
|
||||
@ -117,10 +97,14 @@ type Config struct {
|
||||
CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter" yaml:"cloudProviderBackoffJitter"`
|
||||
// Enable rate limiting
|
||||
CloudProviderRateLimit bool `json:"cloudProviderRateLimit" yaml:"cloudProviderRateLimit"`
|
||||
// Rate limit QPS
|
||||
// Rate limit QPS (Read)
|
||||
CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS" yaml:"cloudProviderRateLimitQPS"`
|
||||
// Rate limit Bucket Size
|
||||
CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"`
|
||||
// Rate limit QPS (Write)
|
||||
CloudProviderRateLimitQPSWrite float32 `json:"cloudProviderRateLimitQPSWrite" yaml:"cloudProviderRateLimitQPSWrite"`
|
||||
// Rate limit Bucket Size
|
||||
CloudProviderRateLimitBucketWrite int `json:"cloudProviderRateLimitBucketWrite" yaml:"cloudProviderRateLimitBucketWrite"`
|
||||
|
||||
// Use instance metadata service where possible
|
||||
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
|
||||
@ -132,76 +116,33 @@ type Config struct {
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"`
|
||||
}
|
||||
|
||||
// VirtualMachinesClient defines needed functions for azure network.VirtualMachinesClient
|
||||
type VirtualMachinesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error)
|
||||
Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error)
|
||||
List(resourceGroupName string) (result compute.VirtualMachineListResult, err error)
|
||||
ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error)
|
||||
}
|
||||
|
||||
// InterfacesClient defines needed functions for azure network.InterfacesClient
|
||||
type InterfacesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error)
|
||||
Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error)
|
||||
GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error)
|
||||
}
|
||||
|
||||
// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient
|
||||
type LoadBalancersClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error)
|
||||
Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error)
|
||||
List(resourceGroupName string) (result network.LoadBalancerListResult, err error)
|
||||
ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error)
|
||||
}
|
||||
|
||||
// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient
|
||||
type PublicIPAddressesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error)
|
||||
Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error)
|
||||
List(resourceGroupName string) (result network.PublicIPAddressListResult, err error)
|
||||
ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error)
|
||||
}
|
||||
|
||||
// SubnetsClient defines needed functions for azure network.SubnetsClient
|
||||
type SubnetsClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error)
|
||||
Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error)
|
||||
List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error)
|
||||
}
|
||||
|
||||
// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient
|
||||
type SecurityGroupsClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error)
|
||||
Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error)
|
||||
List(resourceGroupName string) (result network.SecurityGroupListResult, err error)
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
type Cloud struct {
|
||||
Config
|
||||
Environment azure.Environment
|
||||
RoutesClient network.RoutesClient
|
||||
SubnetsClient SubnetsClient
|
||||
InterfacesClient InterfacesClient
|
||||
RouteTablesClient network.RouteTablesClient
|
||||
LoadBalancerClient LoadBalancersClient
|
||||
PublicIPAddressesClient PublicIPAddressesClient
|
||||
SecurityGroupsClient SecurityGroupsClient
|
||||
VirtualMachinesClient VirtualMachinesClient
|
||||
StorageAccountClient storage.AccountsClient
|
||||
DisksClient disk.DisksClient
|
||||
operationPollRateLimiter flowcontrol.RateLimiter
|
||||
resourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadata
|
||||
Environment azure.Environment
|
||||
RoutesClient RoutesClient
|
||||
SubnetsClient SubnetsClient
|
||||
InterfacesClient InterfacesClient
|
||||
RouteTablesClient RouteTablesClient
|
||||
LoadBalancerClient LoadBalancersClient
|
||||
PublicIPAddressesClient PublicIPAddressesClient
|
||||
SecurityGroupsClient SecurityGroupsClient
|
||||
VirtualMachinesClient VirtualMachinesClient
|
||||
StorageAccountClient StorageAccountClient
|
||||
DisksClient DisksClient
|
||||
FileClient FileClient
|
||||
resourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadata
|
||||
vmSet VMSet
|
||||
|
||||
// Clients for vmss.
|
||||
VirtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient compute.VirtualMachineScaleSetVMsClient
|
||||
VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient
|
||||
|
||||
vmCache *timedCache
|
||||
lbCache *timedCache
|
||||
nsgCache *timedCache
|
||||
rtCache *timedCache
|
||||
|
||||
*BlobDiskController
|
||||
*ManagedDiskController
|
||||
@ -212,179 +153,86 @@ func init() {
|
||||
cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud)
|
||||
}
|
||||
|
||||
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
|
||||
// the private RSA key
|
||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
|
||||
}
|
||||
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
|
||||
if !isRsaKey {
|
||||
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
|
||||
}
|
||||
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
config.AADClientSecret,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding the client certificate: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
certificate,
|
||||
privateKey,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
|
||||
}
|
||||
|
||||
// NewCloud returns a Cloud with initialized clients
|
||||
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
config, env, err := ParseConfig(configReader)
|
||||
config, err := parseConfig(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
env, err := auth.ParseAzureEnvironment(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// operationPollRateLimiter.Accept() is a no-op if rate limits are configured off.
|
||||
operationPollRateLimiter := flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
operationPollRateLimiterWrite := flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
|
||||
// If reader is provided (and no writer) we will
|
||||
// use the same value for both.
|
||||
if config.CloudProviderRateLimit {
|
||||
// Assign rate limit defaults if no configuration was passed in
|
||||
if config.CloudProviderRateLimitQPS == 0 {
|
||||
config.CloudProviderRateLimitQPS = rateLimitQPSDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitBucket == 0 {
|
||||
config.CloudProviderRateLimitBucket = rateLimitBucketDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitQPSWrite == 0 {
|
||||
config.CloudProviderRateLimitQPSWrite = rateLimitQPSDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitBucketWrite == 0 {
|
||||
config.CloudProviderRateLimitBucketWrite = rateLimitBucketDefault
|
||||
}
|
||||
|
||||
operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(
|
||||
config.CloudProviderRateLimitQPS,
|
||||
config.CloudProviderRateLimitBucket)
|
||||
|
||||
operationPollRateLimiterWrite = flowcontrol.NewTokenBucketRateLimiter(
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPS,
|
||||
config.CloudProviderRateLimitBucket)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
|
||||
}
|
||||
|
||||
azClientConfig := &azClientConfig{
|
||||
subscriptionID: config.SubscriptionID,
|
||||
resourceManagerEndpoint: env.ResourceManagerEndpoint,
|
||||
servicePrincipalToken: servicePrincipalToken,
|
||||
rateLimiterReader: operationPollRateLimiter,
|
||||
rateLimiterWriter: operationPollRateLimiterWrite,
|
||||
}
|
||||
az := Cloud{
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := GetServicePrincipalToken(config, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subnetsClient := network.NewSubnetsClient(az.SubscriptionID)
|
||||
subnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
subnetsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&subnetsClient.Client)
|
||||
az.SubnetsClient = subnetsClient
|
||||
|
||||
az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID)
|
||||
az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RouteTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.RouteTablesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RouteTablesClient.Client)
|
||||
|
||||
az.RoutesClient = network.NewRoutesClient(az.SubscriptionID)
|
||||
az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RoutesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.RoutesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RoutesClient.Client)
|
||||
|
||||
interfacesClient := network.NewInterfacesClient(az.SubscriptionID)
|
||||
interfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
interfacesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&interfacesClient.Client)
|
||||
az.InterfacesClient = interfacesClient
|
||||
|
||||
loadBalancerClient := network.NewLoadBalancersClient(az.SubscriptionID)
|
||||
loadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
loadBalancerClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&loadBalancerClient.Client)
|
||||
az.LoadBalancerClient = loadBalancerClient
|
||||
|
||||
virtualMachinesClient := compute.NewVirtualMachinesClient(az.SubscriptionID)
|
||||
virtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
virtualMachinesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachinesClient.Client)
|
||||
az.VirtualMachinesClient = virtualMachinesClient
|
||||
|
||||
publicIPAddressClient := network.NewPublicIPAddressesClient(az.SubscriptionID)
|
||||
publicIPAddressClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
publicIPAddressClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&publicIPAddressClient.Client)
|
||||
az.PublicIPAddressesClient = publicIPAddressClient
|
||||
|
||||
securityGroupsClient := network.NewSecurityGroupsClient(az.SubscriptionID)
|
||||
securityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
securityGroupsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&securityGroupsClient.Client)
|
||||
az.SecurityGroupsClient = securityGroupsClient
|
||||
|
||||
virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(az.SubscriptionID)
|
||||
az.VirtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.VirtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachineScaleSetVMsClient.Client)
|
||||
az.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
|
||||
virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(az.SubscriptionID)
|
||||
az.VirtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.VirtualMachineScaleSetsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachineScaleSetsClient.Client)
|
||||
az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
|
||||
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.StorageAccountClient.Client)
|
||||
|
||||
az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.DisksClient.Client)
|
||||
|
||||
// Conditionally configure rate limits
|
||||
if az.CloudProviderRateLimit {
|
||||
// Assign rate limit defaults if no configuration was passed in
|
||||
if az.CloudProviderRateLimitQPS == 0 {
|
||||
az.CloudProviderRateLimitQPS = rateLimitQPSDefault
|
||||
}
|
||||
if az.CloudProviderRateLimitBucket == 0 {
|
||||
az.CloudProviderRateLimitBucket = rateLimitBucketDefault
|
||||
}
|
||||
az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(
|
||||
az.CloudProviderRateLimitQPS,
|
||||
az.CloudProviderRateLimitBucket)
|
||||
glog.V(2).Infof("Azure cloudprovider using rate limit config: QPS=%g, bucket=%d",
|
||||
az.CloudProviderRateLimitQPS,
|
||||
az.CloudProviderRateLimitBucket)
|
||||
} else {
|
||||
// if rate limits are configured off, az.operationPollRateLimiter.Accept() is a no-op
|
||||
az.operationPollRateLimiter = flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
DisksClient: newAzDisksClient(azClientConfig),
|
||||
RoutesClient: newAzRoutesClient(azClientConfig),
|
||||
SubnetsClient: newAzSubnetsClient(azClientConfig),
|
||||
InterfacesClient: newAzInterfacesClient(azClientConfig),
|
||||
RouteTablesClient: newAzRouteTablesClient(azClientConfig),
|
||||
LoadBalancerClient: newAzLoadBalancersClient(azClientConfig),
|
||||
SecurityGroupsClient: newAzSecurityGroupsClient(azClientConfig),
|
||||
StorageAccountClient: newAzStorageAccountClient(azClientConfig),
|
||||
VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig),
|
||||
PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig),
|
||||
VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig),
|
||||
VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig),
|
||||
FileClient: &azureFileClient{env: *env},
|
||||
}
|
||||
|
||||
// Conditionally configure resource request backoff
|
||||
@ -421,44 +269,59 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
|
||||
}
|
||||
|
||||
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
|
||||
az.vmSet, err = newScaleSet(&az)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
az.vmSet = newAvailabilitySet(&az)
|
||||
}
|
||||
|
||||
az.vmCache, err = az.newVMCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.lbCache, err = az.newLBCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.nsgCache, err = az.newNSGCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.rtCache, err = az.newRouteTableCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := initDiskControllers(&az); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &az, nil
|
||||
}
|
||||
|
||||
// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file
|
||||
func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) {
|
||||
// parseConfig returns a parsed configuration for an Azure cloudprovider config file
|
||||
func parseConfig(configReader io.Reader) (*Config, error) {
|
||||
var config Config
|
||||
var env azure.Environment
|
||||
|
||||
if configReader == nil {
|
||||
return &config, &env, nil
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
configContents, err := ioutil.ReadAll(configReader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(configContents, &config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.Cloud == "" {
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
env, err = azure.EnvironmentFromName(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if config.VMType != "" {
|
||||
config.VMType = strings.ToLower(config.VMType)
|
||||
}
|
||||
|
||||
return &config, &env, nil
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
@ -489,11 +352,6 @@ func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
|
||||
func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (az *Cloud) HasClusterID() bool {
|
||||
return true
|
||||
@ -518,15 +376,9 @@ func initDiskControllers(az *Cloud) error {
|
||||
// needed by both blob disk and managed disk controllers
|
||||
|
||||
common := &controllerCommon{
|
||||
aadResourceEndPoint: az.Environment.ServiceManagementEndpoint,
|
||||
clientID: az.AADClientID,
|
||||
clientSecret: az.AADClientSecret,
|
||||
location: az.Location,
|
||||
storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
|
||||
managementEndpoint: az.Environment.ResourceManagerEndpoint,
|
||||
resourceGroup: az.ResourceGroup,
|
||||
tenantID: az.TenantID,
|
||||
tokenEndPoint: az.Environment.ActiveDirectoryEndpoint,
|
||||
subscriptionID: az.SubscriptionID,
|
||||
cloud: az,
|
||||
}
|
||||
|
193
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
193
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
@ -17,13 +17,17 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// requestBackoff if backoff is disabled in cloud provider it
|
||||
@ -42,53 +46,22 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
|
||||
}
|
||||
|
||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, bool, error) {
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var exists bool
|
||||
var retryErr error
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
machine, exists, retryErr = az.getVirtualMachine(name)
|
||||
machine, retryErr = az.getVirtualMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
glog.V(2).Info("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, exists, err
|
||||
}
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = retryErr
|
||||
}
|
||||
|
||||
// GetScaleSetsVMWithRetry invokes az.getScaleSetsVM with exponential backoff retry
|
||||
func (az *Cloud) GetScaleSetsVMWithRetry(name types.NodeName) (compute.VirtualMachineScaleSetVM, bool, error) {
|
||||
var machine compute.VirtualMachineScaleSetVM
|
||||
var exists bool
|
||||
err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) {
|
||||
var retryErr error
|
||||
machine, exists, retryErr = az.getVmssVirtualMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("GetScaleSetsVMWithRetry backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(10).Infof("GetScaleSetsVMWithRetry backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, exists, err
|
||||
}
|
||||
|
||||
// VirtualMachineClientGetWithRetry invokes az.VirtualMachinesClient.Get with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientGetWithRetry(resourceGroup, vmName string, types compute.InstanceViewTypes) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, retryErr = az.VirtualMachinesClient.Get(resourceGroup, vmName, types)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, err
|
||||
}
|
||||
|
||||
@ -98,10 +71,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine,
|
||||
var result compute.VirtualMachineListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("VirtualMachinesClient.List(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -123,10 +93,7 @@ func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine,
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.ListNextResults(result)
|
||||
glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup, retryErr)
|
||||
@ -155,7 +122,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
glog.V(2).Info("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return ip, err
|
||||
@ -164,26 +131,32 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) {
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): start", *sg.Name)
|
||||
respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.nsgCache.Delete(*sg.Name)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): start", *lb.Name)
|
||||
respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
done, err := processRetryResponse(resp.Response, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
@ -194,10 +167,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -220,10 +190,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.ListNextResults(result)
|
||||
glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
@ -243,23 +210,20 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
return allLBs, nil
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in az.ResourceGroup
|
||||
func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) {
|
||||
// ListPIPWithRetry list the PIP resources in the given resource group
|
||||
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
allPIPs := []network.PublicIPAddress{}
|
||||
var result network.PublicIPAddressListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.List(pipResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -271,21 +235,18 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) {
|
||||
allPIPs = append(allPIPs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the vms for resource group
|
||||
// follow the next link to get all the pip resources for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.ListNextResults(result)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.ListNextResults(az.ResourceGroup, result)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", az.ResourceGroup)
|
||||
glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -299,14 +260,12 @@ func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) {
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error {
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): start", *pip.Name)
|
||||
respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil)
|
||||
respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(pipResourceGroup, *pip.Name, pip, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): end", *pip.Name)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
@ -314,8 +273,6 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error {
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): start", *nic.Name)
|
||||
respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -325,14 +282,11 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipName string) error {
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): start", pipName)
|
||||
respChan, errChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil)
|
||||
respChan, errChan := az.PublicIPAddressesClient.Delete(pipResourceGroup, pipName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): end", pipName)
|
||||
return processRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
@ -340,25 +294,24 @@ func (az *Cloud) DeletePublicIPWithRetry(pipName string) error {
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%s): start", lbName)
|
||||
respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%s): end", lbName)
|
||||
return processRetryResponse(resp, err)
|
||||
done, err := processRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after deleting
|
||||
az.lbCache.Delete(lbName)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): start", *routeTable.Name)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): end", *routeTable.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
@ -366,8 +319,6 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable
|
||||
// CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): start", *route.Name)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -379,8 +330,6 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
// DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -392,8 +341,6 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): start", vmName)
|
||||
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
@ -402,6 +349,15 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualM
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters computepreview.VirtualMachineScaleSetVM) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
@ -413,8 +369,8 @@ func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
// Fall-through: stop periodic backoff, return error object from most recent request
|
||||
return true, err
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior
|
||||
@ -437,3 +393,36 @@ func isSuccessHTTPResponse(resp autorest.Response) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldRetryHTTPRequest(resp *http.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
// HTTP 4xx or 5xx suggests we should retry
|
||||
if 399 < resp.StatusCode && resp.StatusCode < 600 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
||||
|
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
Normal file
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
func TestShouldRetry(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusBadRequest,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
err: fmt.Errorf("some error"),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: 399,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res := shouldRetryAPIRequest(resp, test.err)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSuccessResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusNotFound,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res := isSuccessHTTPResponse(resp)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRetryResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
stop bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusBadRequest,
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusSeeOther,
|
||||
err: fmt.Errorf("some error"),
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusSeeOther,
|
||||
stop: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
stop: true,
|
||||
},
|
||||
{
|
||||
code: 399,
|
||||
stop: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res, err := processRetryResponse(resp, test.err)
|
||||
if res != test.stop {
|
||||
t.Errorf("expected: %v, saw: %v", test.stop, res)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
186
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
186
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
@ -22,10 +22,8 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@ -60,15 +58,11 @@ type BlobDiskController struct {
|
||||
}
|
||||
|
||||
var (
|
||||
defaultContainerName = ""
|
||||
storageAccountNamePrefix = ""
|
||||
storageAccountNameMatch = ""
|
||||
accountsLock = &sync.Mutex{}
|
||||
accountsLock = &sync.Mutex{}
|
||||
)
|
||||
|
||||
func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) {
|
||||
c := BlobDiskController{common: common}
|
||||
c.setUniqueStrings()
|
||||
|
||||
// get accounts
|
||||
accounts, err := c.getAllStorageAccounts()
|
||||
@ -84,46 +78,26 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
|
||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (c *BlobDiskController) CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = c.common.cloud.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", 0, err
|
||||
}
|
||||
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if (storageAccountType == "" || account.StorageType == storageAccountType) && (location == "" || account.Location == location) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := c.common.cloud.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("no key found for storage account %s", account.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account.Name, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account.Name, name, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
return "", "", 0, fmt.Errorf("failed to find a matching storage account")
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account, blobName, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
@ -252,7 +226,7 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB))
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, vhdContainerName, int64(sizeGB))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -281,9 +255,9 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName)
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
_, err = blob.DeleteIfExists(nil)
|
||||
|
||||
@ -292,26 +266,13 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
c.accounts[storageAccountName].diskCount = int32(diskCount)
|
||||
} else {
|
||||
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
|
||||
return nil // we have failed to aquire a new count. not an error condition
|
||||
return nil // we have failed to acquire a new count. not an error condition
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
//Sets unique strings to be used as accountnames && || blob containers names
|
||||
func (c *BlobDiskController) setUniqueStrings() {
|
||||
uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID
|
||||
hash := MakeCRC32(uniqueString)
|
||||
//used to generate a unqie container name used by this cluster PVC
|
||||
defaultContainerName = hash
|
||||
|
||||
storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash)
|
||||
// Used to filter relevant accounts (accounts used by shared PVC)
|
||||
storageAccountNameMatch = storageAccountNamePrefix
|
||||
// Used as a template to create new names for relevant accounts
|
||||
storageAccountNamePrefix = storageAccountNamePrefix + "%s"
|
||||
}
|
||||
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
@ -359,13 +320,13 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
// short circut the check via local cache
|
||||
// short circuit the check via local cache
|
||||
// we are forgiving the fact that account may not be in cache yet
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// not cached, check existance and readiness
|
||||
// not cached, check existence and readiness
|
||||
bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
// account does not exist
|
||||
@ -383,7 +344,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
counter = counter + 1
|
||||
// check if we passed the max sleep
|
||||
if counter >= 20 {
|
||||
return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName)
|
||||
return fmt.Errorf("azureDisk - timeout waiting to acquire lock to validate account:%s readiness", storageAccountName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -392,7 +353,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
c.accounts[storageAccountName].isValidating = 0
|
||||
}()
|
||||
|
||||
// short circut the check again.
|
||||
// short circuit the check again.
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
@ -426,13 +387,13 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
return err
|
||||
}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bCreated {
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName)
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
|
||||
}
|
||||
|
||||
// flag so we no longer have to check on ARM
|
||||
@ -459,7 +420,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
}
|
||||
params := azstorage.ListBlobsParameters{}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
response, err := container.ListBlobs(params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -471,7 +432,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.List()
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(c.common.resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -481,11 +442,11 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount
|
||||
|
||||
accounts := make(map[string]*storageAccountState)
|
||||
for _, v := range *accountListResult.Value {
|
||||
if strings.Index(*v.Name, storageAccountNameMatch) != 0 {
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Info("azureDisk - accountListResult Name or Sku is nil")
|
||||
continue
|
||||
}
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Infof("azureDisk - accountListResult Name or Sku is nil")
|
||||
if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
@ -519,7 +480,7 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto
|
||||
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType))
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
@ -555,13 +516,13 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
countAccounts := 0 // account of this type.
|
||||
for _, v := range c.accounts {
|
||||
// filter out any stand-alone disks/accounts
|
||||
if strings.Index(v.name, storageAccountNameMatch) != 0 {
|
||||
if !strings.HasPrefix(v.name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
// note: we compute avge stratified by type.
|
||||
// this to enable user to grow per SA type to avoid low
|
||||
//avg utilization on one account type skewing all data.
|
||||
// note: we compute avg stratified by type.
|
||||
// this is to enable user to grow per SA type to avoid low
|
||||
// avg utilization on one account type skewing all data.
|
||||
|
||||
if v.saType == storageAccountType {
|
||||
// compute average
|
||||
@ -574,7 +535,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
// empty account
|
||||
if dCount == 0 {
|
||||
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
|
||||
return v.name, nil // shortcircut, avg is good and no need to adjust
|
||||
return v.name, nil // short circuit, avg is good and no need to adjust
|
||||
}
|
||||
// if this account is less allocated
|
||||
if dCount < maxDiskCount {
|
||||
@ -587,7 +548,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
// if we failed to find storageaccount
|
||||
if SAName == "" {
|
||||
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -600,10 +561,10 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
|
||||
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
|
||||
|
||||
// avg are not create and we should craete more accounts if we can
|
||||
// avg are not create and we should create more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -611,43 +572,15 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
// avergates are not ok and we are at capacity(max storage accounts allowed)
|
||||
// averages are not ok and we are at capacity (max storage accounts allowed)
|
||||
if aboveAvg && countAccounts == maxStorageAccounts {
|
||||
glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
|
||||
}
|
||||
|
||||
// we found a storage accounts && [ avg are ok || we reached max sa count ]
|
||||
return SAName, nil
|
||||
}
|
||||
func (c *BlobDiskController) getNextAccountNum() int {
|
||||
max := 0
|
||||
|
||||
for k := range c.accounts {
|
||||
// filter out accounts that are for standalone
|
||||
if strings.Index(k, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
num := getAccountNumFromName(k)
|
||||
if num > max {
|
||||
max = num
|
||||
}
|
||||
}
|
||||
|
||||
return max + 1
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error {
|
||||
resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err)
|
||||
}
|
||||
|
||||
c.removeAccountState(storageAccountName)
|
||||
|
||||
glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName)
|
||||
return nil
|
||||
}
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
@ -667,33 +600,6 @@ func (c *BlobDiskController) addAccountState(key string, state *storageAccountSt
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) removeAccountState(key string) {
|
||||
accountsLock.Lock()
|
||||
defer accountsLock.Unlock()
|
||||
delete(c.accounts, key)
|
||||
}
|
||||
|
||||
// pads account num with zeros as needed
|
||||
func getAccountNameForNum(num int) string {
|
||||
sNum := strconv.Itoa(num)
|
||||
missingZeros := 3 - len(sNum)
|
||||
strZero := ""
|
||||
for missingZeros > 0 {
|
||||
strZero = strZero + "0"
|
||||
missingZeros = missingZeros - 1
|
||||
}
|
||||
|
||||
sNum = strZero + sNum
|
||||
return fmt.Sprintf(storageAccountNamePrefix, sNum)
|
||||
}
|
||||
|
||||
func getAccountNumFromName(accountName string) int {
|
||||
nameLen := len(accountName)
|
||||
num, _ := strconv.Atoi(accountName[nameLen-3:])
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// getFunc defines a getter function for timedCache.
|
||||
type getFunc func(key string) (interface{}, error)
|
||||
|
||||
// cacheEntry is the internal structure stores inside TTLStore.
|
||||
type cacheEntry struct {
|
||||
key string
|
||||
data interface{}
|
||||
|
||||
// The lock to ensure not updating same entry simultaneously.
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// cacheKeyFunc defines the key function required in TTLStore.
|
||||
func cacheKeyFunc(obj interface{}) (string, error) {
|
||||
return obj.(*cacheEntry).key, nil
|
||||
}
|
||||
|
||||
// timedCache is a cache with TTL.
|
||||
type timedCache struct {
|
||||
store cache.Store
|
||||
lock sync.Mutex
|
||||
getter getFunc
|
||||
}
|
||||
|
||||
// newTimedcache creates a new timedCache.
|
||||
func newTimedcache(ttl time.Duration, getter getFunc) (*timedCache, error) {
|
||||
if getter == nil {
|
||||
return nil, fmt.Errorf("getter is not provided")
|
||||
}
|
||||
|
||||
return &timedCache{
|
||||
getter: getter,
|
||||
store: cache.NewTTLStore(cacheKeyFunc, ttl),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getInternal returns cacheEntry by key. If the key is not cached yet,
|
||||
// it returns a cacheEntry with nil data.
|
||||
func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
|
||||
entry, exists, err := t.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
entry, exists, err = t.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
// Still not found, add new entry with nil data.
|
||||
// Note the data will be filled later by getter.
|
||||
newEntry := &cacheEntry{
|
||||
key: key,
|
||||
data: nil,
|
||||
}
|
||||
t.store.Add(newEntry)
|
||||
return newEntry, nil
|
||||
}
|
||||
|
||||
// Get returns the requested item by key.
|
||||
func (t *timedCache) Get(key string) (interface{}, error) {
|
||||
entry, err := t.getInternal(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Data is still not cached yet, cache it by getter.
|
||||
if entry.data == nil {
|
||||
entry.lock.Lock()
|
||||
defer entry.lock.Unlock()
|
||||
|
||||
if entry.data == nil {
|
||||
data, err := t.getter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry.data = data
|
||||
}
|
||||
}
|
||||
|
||||
return entry.data, nil
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (t *timedCache) Delete(key string) error {
|
||||
return t.store.Delete(&cacheEntry{
|
||||
key: key,
|
||||
})
|
||||
}
|
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache_test.go
generated
vendored
Normal file
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache_test.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeCacheTTL = 2 * time.Second
|
||||
)
|
||||
|
||||
type fakeDataObj struct{}
|
||||
|
||||
type fakeDataSource struct {
|
||||
called int
|
||||
data map[string]*fakeDataObj
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (fake *fakeDataSource) get(key string) (interface{}, error) {
|
||||
fake.lock.Lock()
|
||||
defer fake.lock.Unlock()
|
||||
|
||||
fake.called = fake.called + 1
|
||||
if v, ok := fake.data[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fake *fakeDataSource) set(data map[string]*fakeDataObj) {
|
||||
fake.lock.Lock()
|
||||
defer fake.lock.Unlock()
|
||||
|
||||
fake.data = data
|
||||
fake.called = 0
|
||||
}
|
||||
|
||||
func newFakeCache(t *testing.T) (*fakeDataSource, *timedCache) {
|
||||
dataSource := &fakeDataSource{
|
||||
data: make(map[string]*fakeDataObj),
|
||||
}
|
||||
getter := dataSource.get
|
||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||
assert.NoError(t, err)
|
||||
return dataSource, cache
|
||||
}
|
||||
|
||||
func TestCacheGet(t *testing.T) {
|
||||
val := &fakeDataObj{}
|
||||
cases := []struct {
|
||||
name string
|
||||
data map[string]*fakeDataObj
|
||||
key string
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
name: "cache should return nil for empty data source",
|
||||
key: "key1",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "cache should return nil for non exist key",
|
||||
data: map[string]*fakeDataObj{"key2": val},
|
||||
key: "key1",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "cache should return data for existing key",
|
||||
data: map[string]*fakeDataObj{"key1": val},
|
||||
key: "key1",
|
||||
expected: val,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(c.data)
|
||||
val, err := cache.Get(c.key)
|
||||
assert.NoError(t, err, c.name)
|
||||
assert.Equal(t, c.expected, val, c.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheGetError(t *testing.T) {
|
||||
getError := fmt.Errorf("getError")
|
||||
getter := func(key string) (interface{}, error) {
|
||||
return nil, getError
|
||||
}
|
||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
val, err := cache.Get("key")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, getError, err)
|
||||
assert.Nil(t, val)
|
||||
}
|
||||
|
||||
func TestCacheDelete(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
dataSource.set(nil)
|
||||
cache.Delete(key)
|
||||
v, err = cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, nil, v, "cache should get nil after data is removed")
|
||||
}
|
||||
|
||||
func TestCacheExpired(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
time.Sleep(fakeCacheTTL)
|
||||
v, err = cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data even after expired")
|
||||
}
|
1316
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
Normal file
1316
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
192
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
Normal file
192
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
storageAccountNameTemplate = "pvc%s"
|
||||
|
||||
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
errDiskBlobNotFound = "DiskBlobNotFound"
|
||||
)
|
||||
|
||||
var defaultBackOff = kwait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 2 * time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.0,
|
||||
}
|
||||
|
||||
type controllerCommon struct {
|
||||
subscriptionID string
|
||||
location string
|
||||
storageEndpointSuffix string
|
||||
resourceGroup string
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
// 1. vmType is standard, attach with availabilitySet.AttachDisk.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then attach with availabilitySet.AttachDisk.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, attach with scaleSet.AttachDisk.
|
||||
return ss.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
// 1. vmType is standard, detach with availabilitySet.DetachDiskByName.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then detach with availabilitySet.DetachDiskByName.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, detach with scaleSet.DetachDiskByName.
|
||||
return ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetDiskLun.
|
||||
return ss.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetNextDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetNextDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetNextDiskLun.
|
||||
return ss.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
// 1. vmType is standard, check with availabilitySet.DisksAreAttached.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DisksAreAttached(diskNames, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then check with availabilitySet.DisksAreAttached.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DisksAreAttached(diskNames, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, check with scaleSet.DisksAreAttached.
|
||||
return ss.DisksAreAttached(diskNames, nodeName)
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -19,64 +19,22 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map)
|
||||
storageAccountNameTemplate = "pvc%s"
|
||||
|
||||
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
)
|
||||
|
||||
var defaultBackOff = kwait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 2 * time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.0,
|
||||
}
|
||||
|
||||
type controllerCommon struct {
|
||||
tenantID string
|
||||
subscriptionID string
|
||||
location string
|
||||
storageEndpointSuffix string
|
||||
resourceGroup string
|
||||
clientID string
|
||||
clientSecret string
|
||||
managementEndpoint string
|
||||
tokenEndPoint string
|
||||
aadResourceEndPoint string
|
||||
aadToken string
|
||||
expiresOn time.Time
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
@ -111,38 +69,39 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.resourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) {
|
||||
// if lease cannot be acquired, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - failed to acquire disk lease, try detach")
|
||||
c.cloud.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
as.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure attach succeeded")
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil || !exists {
|
||||
func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
@ -175,34 +134,33 @@ func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
respChan, errChan := as.VirtualMachinesClient.CreateOrUpdate(as.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure disk detach succeeded")
|
||||
glog.V(4).Info("azureDisk - azure disk detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
func (as *availabilitySet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
@ -219,12 +177,10 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
func (as *availabilitySet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
@ -242,13 +198,13 @@ func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
func (as *availabilitySet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
214
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
Normal file
214
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &computepreview.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
computepreview.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &computepreview.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: computepreview.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bFoundDisk {
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach %q from %s failed, err: %v", diskName, nodeName, err)
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (ss *scaleSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (ss *scaleSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (ss *scaleSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
638
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
638
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
@ -24,11 +25,17 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
type fakeAzureLBClient struct {
|
||||
@ -36,14 +43,14 @@ type fakeAzureLBClient struct {
|
||||
FakeStore map[string]map[string]network.LoadBalancer
|
||||
}
|
||||
|
||||
func newFakeAzureLBClient() fakeAzureLBClient {
|
||||
fLBC := fakeAzureLBClient{}
|
||||
func newFakeAzureLBClient() *fakeAzureLBClient {
|
||||
fLBC := &fakeAzureLBClient{}
|
||||
fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer)
|
||||
fLBC.mutex = &sync.Mutex{}
|
||||
return fLBC
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) {
|
||||
func (fLBC *fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
resultChan := make(chan network.LoadBalancer, 1)
|
||||
@ -65,7 +72,7 @@ func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalan
|
||||
for idx, config := range *parameters.FrontendIPConfigurations {
|
||||
if config.PrivateIPAllocationMethod == network.Dynamic {
|
||||
// Here we randomly assign an ip as private ip
|
||||
// It dosen't smart enough to know whether it is in the subnet's range
|
||||
// It doesn't smart enough to know whether it is in the subnet's range
|
||||
(*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr()
|
||||
}
|
||||
}
|
||||
@ -79,7 +86,7 @@ func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalan
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fLBC *fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -112,7 +119,7 @@ func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
func (fLBC *fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
@ -126,7 +133,7 @@ func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName str
|
||||
}
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) {
|
||||
func (fLBC *fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
var value []network.LoadBalancer
|
||||
@ -143,7 +150,7 @@ func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.Loa
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) {
|
||||
func (fLBC *fakeAzureLBClient) ListNextResults(resourceGroupName string, lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
result.Response.Response = &http.Response{
|
||||
@ -171,15 +178,15 @@ func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName stri
|
||||
pipName)
|
||||
}
|
||||
|
||||
func newFakeAzurePIPClient(subscriptionID string) fakeAzurePIPClient {
|
||||
fAPC := fakeAzurePIPClient{}
|
||||
func newFakeAzurePIPClient(subscriptionID string) *fakeAzurePIPClient {
|
||||
fAPC := &fakeAzurePIPClient{}
|
||||
fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress)
|
||||
fAPC.SubscriptionID = subscriptionID
|
||||
fAPC.mutex = &sync.Mutex{}
|
||||
return fAPC
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) {
|
||||
func (fAPC *fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
resultChan := make(chan network.PublicIPAddress, 1)
|
||||
@ -216,7 +223,7 @@ func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIP
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fAPC *fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -249,7 +256,7 @@ func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressN
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
func (fAPC *fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
@ -263,13 +270,13 @@ func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName
|
||||
}
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) {
|
||||
func (fAPC *fakeAzurePIPClient) ListNextResults(resourceGroupName string, lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
return network.PublicIPAddressListResult{}, nil
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) {
|
||||
func (fAPC *fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
var value []network.PublicIPAddress
|
||||
@ -291,15 +298,15 @@ type fakeAzureInterfacesClient struct {
|
||||
FakeStore map[string]map[string]network.Interface
|
||||
}
|
||||
|
||||
func newFakeAzureInterfacesClient() fakeAzureInterfacesClient {
|
||||
fIC := fakeAzureInterfacesClient{}
|
||||
func newFakeAzureInterfacesClient() *fakeAzureInterfacesClient {
|
||||
fIC := &fakeAzureInterfacesClient{}
|
||||
fIC.FakeStore = make(map[string]map[string]network.Interface)
|
||||
fIC.mutex = &sync.Mutex{}
|
||||
|
||||
return fIC
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) {
|
||||
func (fIC *fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
resultChan := make(chan network.Interface, 1)
|
||||
@ -325,7 +332,7 @@ func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, ne
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
func (fIC *fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
|
||||
@ -339,7 +346,7 @@ func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterf
|
||||
}
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@ -348,14 +355,14 @@ type fakeAzureVirtualMachinesClient struct {
|
||||
FakeStore map[string]map[string]compute.VirtualMachine
|
||||
}
|
||||
|
||||
func newFakeAzureVirtualMachinesClient() fakeAzureVirtualMachinesClient {
|
||||
fVMC := fakeAzureVirtualMachinesClient{}
|
||||
func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient {
|
||||
fVMC := &fakeAzureVirtualMachinesClient{}
|
||||
fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine)
|
||||
fVMC.mutex = &sync.Mutex{}
|
||||
return fVMC
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
resultChan := make(chan compute.VirtualMachine, 1)
|
||||
@ -380,7 +387,7 @@ func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName stri
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
@ -394,7 +401,7 @@ func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
var value []compute.VirtualMachine
|
||||
@ -410,7 +417,7 @@ func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (resul
|
||||
result.Value = &value
|
||||
return result, nil
|
||||
}
|
||||
func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) {
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) ListNextResults(resourceGroupName string, lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
return compute.VirtualMachineListResult{}, nil
|
||||
@ -421,14 +428,14 @@ type fakeAzureSubnetsClient struct {
|
||||
FakeStore map[string]map[string]network.Subnet
|
||||
}
|
||||
|
||||
func newFakeAzureSubnetsClient() fakeAzureSubnetsClient {
|
||||
fASC := fakeAzureSubnetsClient{}
|
||||
func newFakeAzureSubnetsClient() *fakeAzureSubnetsClient {
|
||||
fASC := &fakeAzureSubnetsClient{}
|
||||
fASC.FakeStore = make(map[string]map[string]network.Subnet)
|
||||
fASC.mutex = &sync.Mutex{}
|
||||
return fASC
|
||||
}
|
||||
|
||||
func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) {
|
||||
func (fASC *fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
resultChan := make(chan network.Subnet, 1)
|
||||
@ -454,7 +461,7 @@ func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virt
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fASC *fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -488,7 +495,7 @@ func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetwo
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
func (fASC *fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
@ -502,7 +509,7 @@ func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkN
|
||||
Message: "Not such Subnet",
|
||||
}
|
||||
}
|
||||
func (fASC fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) {
|
||||
func (fASC *fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
@ -525,14 +532,14 @@ type fakeAzureNSGClient struct {
|
||||
FakeStore map[string]map[string]network.SecurityGroup
|
||||
}
|
||||
|
||||
func newFakeAzureNSGClient() fakeAzureNSGClient {
|
||||
fNSG := fakeAzureNSGClient{}
|
||||
func newFakeAzureNSGClient() *fakeAzureNSGClient {
|
||||
fNSG := &fakeAzureNSGClient{}
|
||||
fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup)
|
||||
fNSG.mutex = &sync.Mutex{}
|
||||
return fNSG
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) {
|
||||
func (fNSG *fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
resultChan := make(chan network.SecurityGroup, 1)
|
||||
@ -557,7 +564,7 @@ func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkS
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
func (fNSG *fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
@ -590,7 +597,7 @@ func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityG
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
func (fNSG *fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
@ -604,7 +611,7 @@ func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGrou
|
||||
}
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) {
|
||||
func (fNSG *fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
var value []network.SecurityGroup
|
||||
@ -625,3 +632,556 @@ func getRandomIPPtr() *string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return to.StringPtr(fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)))
|
||||
}
|
||||
|
||||
type fakeVirtualMachineScaleSetVMsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]computepreview.VirtualMachineScaleSetVM
|
||||
}
|
||||
|
||||
func newFakeVirtualMachineScaleSetVMsClient() *fakeVirtualMachineScaleSetVMsClient {
|
||||
fVMC := &fakeVirtualMachineScaleSetVMsClient{}
|
||||
fVMC.FakeStore = make(map[string]map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
fVMC.mutex = &sync.Mutex{}
|
||||
|
||||
return fVMC
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) setFakeStore(store map[string]map[string]computepreview.VirtualMachineScaleSetVM) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
fVMC.FakeStore = store
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
result = []computepreview.VirtualMachineScaleSetVM{}
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMC.FakeStore[resourceGroupName] {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
|
||||
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := scaleSetMap[vmKey]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "No such VirtualMachineScaleSetVM",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result computepreview.VirtualMachineScaleSetVMInstanceView, err error) {
|
||||
_, err = fVMC.Get(ctx, resourceGroupName, VMScaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters computepreview.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
|
||||
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := scaleSetMap[vmKey]; ok {
|
||||
scaleSetMap[vmKey] = parameters
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeVirtualMachineScaleSetsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]computepreview.VirtualMachineScaleSet
|
||||
}
|
||||
|
||||
func newFakeVirtualMachineScaleSetsClient() *fakeVirtualMachineScaleSetsClient {
|
||||
fVMSSC := &fakeVirtualMachineScaleSetsClient{}
|
||||
fVMSSC.FakeStore = make(map[string]map[string]computepreview.VirtualMachineScaleSet)
|
||||
fVMSSC.mutex = &sync.Mutex{}
|
||||
|
||||
return fVMSSC
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) setFakeStore(store map[string]map[string]computepreview.VirtualMachineScaleSet) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
fVMSSC.FakeStore = store
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters computepreview.VirtualMachineScaleSet) (resp *http.Response, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
if _, ok := fVMSSC.FakeStore[resourceGroupName]; !ok {
|
||||
fVMSSC.FakeStore[resourceGroupName] = make(map[string]computepreview.VirtualMachineScaleSet)
|
||||
}
|
||||
fVMSSC.FakeStore[resourceGroupName][VMScaleSetName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result computepreview.VirtualMachineScaleSet, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
if scaleSetMap, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := scaleSetMap[VMScaleSetName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "No such ScaleSet",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []computepreview.VirtualMachineScaleSet, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
result = []computepreview.VirtualMachineScaleSet{}
|
||||
if _, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMSSC.FakeStore[resourceGroupName] {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeRoutesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Route
|
||||
}
|
||||
|
||||
func newFakeRoutesClient() *fakeRoutesClient {
|
||||
fRC := &fakeRoutesClient{}
|
||||
fRC.FakeStore = make(map[string]map[string]network.Route)
|
||||
fRC.mutex = &sync.Mutex{}
|
||||
return fRC
|
||||
}
|
||||
|
||||
func (fRC *fakeRoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, cancel <-chan struct{}) (<-chan network.Route, <-chan error) {
|
||||
fRC.mutex.Lock()
|
||||
defer fRC.mutex.Unlock()
|
||||
|
||||
resultChan := make(chan network.Route, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.Route
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fRC.FakeStore[routeTableName]; !ok {
|
||||
fRC.FakeStore[routeTableName] = make(map[string]network.Route)
|
||||
}
|
||||
fRC.FakeStore[routeTableName][routeName] = routeParameters
|
||||
result = fRC.FakeStore[routeTableName][routeName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fRC *fakeRoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fRC.mutex.Lock()
|
||||
defer fRC.mutex.Unlock()
|
||||
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp autorest.Response
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if routes, ok := fRC.FakeStore[routeTableName]; ok {
|
||||
if _, ok := routes[routeName]; ok {
|
||||
delete(routes, routeName)
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Route",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
type fakeRouteTablesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.RouteTable
|
||||
Calls []string
|
||||
}
|
||||
|
||||
func newFakeRouteTablesClient() *fakeRouteTablesClient {
|
||||
fRTC := &fakeRouteTablesClient{}
|
||||
fRTC.FakeStore = make(map[string]map[string]network.RouteTable)
|
||||
fRTC.mutex = &sync.Mutex{}
|
||||
return fRTC
|
||||
}
|
||||
|
||||
func (fRTC *fakeRouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters network.RouteTable, cancel <-chan struct{}) (<-chan network.RouteTable, <-chan error) {
|
||||
fRTC.mutex.Lock()
|
||||
defer fRTC.mutex.Unlock()
|
||||
|
||||
fRTC.Calls = append(fRTC.Calls, "CreateOrUpdate")
|
||||
|
||||
resultChan := make(chan network.RouteTable, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.RouteTable
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fRTC.FakeStore[resourceGroupName]; !ok {
|
||||
fRTC.FakeStore[resourceGroupName] = make(map[string]network.RouteTable)
|
||||
}
|
||||
fRTC.FakeStore[resourceGroupName][routeTableName] = parameters
|
||||
result = fRTC.FakeStore[resourceGroupName][routeTableName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fRTC *fakeRouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) {
|
||||
fRTC.mutex.Lock()
|
||||
defer fRTC.mutex.Unlock()
|
||||
|
||||
fRTC.Calls = append(fRTC.Calls, "Get")
|
||||
|
||||
if _, ok := fRTC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fRTC.FakeStore[resourceGroupName][routeTableName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such RouteTable",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeFileClient struct {
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) deleteFileShare(accountName, accountKey, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeStorageAccountClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]storage.Account
|
||||
Keys storage.AccountListKeysResult
|
||||
Accounts storage.AccountListResult
|
||||
Err error
|
||||
}
|
||||
|
||||
func newFakeStorageAccountClient() *fakeStorageAccountClient {
|
||||
fSAC := &fakeStorageAccountClient{}
|
||||
fSAC.FakeStore = make(map[string]map[string]storage.Account)
|
||||
fSAC.mutex = &sync.Mutex{}
|
||||
return fSAC
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) Create(resourceGroupName string, accountName string, parameters storage.AccountCreateParameters, cancel <-chan struct{}) (<-chan storage.Account, <-chan error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
resultChan := make(chan storage.Account, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result storage.Account
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fSAC.FakeStore[resourceGroupName]; !ok {
|
||||
fSAC.FakeStore[resourceGroupName] = make(map[string]storage.Account)
|
||||
}
|
||||
fSAC.FakeStore[resourceGroupName][accountName] = storage.Account{
|
||||
Name: &accountName,
|
||||
Sku: parameters.Sku,
|
||||
Kind: parameters.Kind,
|
||||
Location: parameters.Location,
|
||||
Identity: parameters.Identity,
|
||||
Tags: parameters.Tags,
|
||||
AccountProperties: &storage.AccountProperties{},
|
||||
}
|
||||
result = fSAC.FakeStore[resourceGroupName][accountName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
if rgAccounts, ok := fSAC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgAccounts[accountName]; ok {
|
||||
delete(rgAccounts, accountName)
|
||||
result.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
result.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such StorageAccount",
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) ListKeys(resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) {
|
||||
return fSAC.Keys, fSAC.Err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) ListByResourceGroup(resourceGroupName string) (result storage.AccountListResult, err error) {
|
||||
return fSAC.Accounts, fSAC.Err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) GetProperties(resourceGroupName string, accountName string) (result storage.Account, err error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
if _, ok := fSAC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fSAC.FakeStore[resourceGroupName][accountName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such StorageAccount",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDisksClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]disk.Model
|
||||
}
|
||||
|
||||
func newFakeDisksClient() *fakeDisksClient {
|
||||
fDC := &fakeDisksClient{}
|
||||
fDC.FakeStore = make(map[string]map[string]disk.Model)
|
||||
fDC.mutex = &sync.Mutex{}
|
||||
return fDC
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter disk.Model, cancel <-chan struct{}) (<-chan disk.Model, <-chan error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
resultChan := make(chan disk.Model, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result disk.Model
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
if _, ok := fDC.FakeStore[resourceGroupName]; !ok {
|
||||
fDC.FakeStore[resourceGroupName] = make(map[string]disk.Model)
|
||||
}
|
||||
fDC.FakeStore[resourceGroupName][diskName] = diskParameter
|
||||
result = fDC.FakeStore[resourceGroupName][diskName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan disk.OperationStatusResponse, <-chan error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
respChan := make(chan disk.OperationStatusResponse, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp disk.OperationStatusResponse
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if rgDisks, ok := fDC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgDisks[diskName]; ok {
|
||||
delete(rgDisks, diskName)
|
||||
resp.Response = autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
},
|
||||
}
|
||||
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Disk",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) Get(resourceGroupName string, diskName string) (result disk.Model, err error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
if _, ok := fDC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fDC.FakeStore[resourceGroupName][diskName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Disk",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeVMSet struct {
|
||||
NodeToIP map[string]map[string]string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetIPByNodeName(name, vmSetName string) (string, error) {
|
||||
nodes, found := f.NodeToIP[vmSetName]
|
||||
if !found {
|
||||
return "", fmt.Errorf("not found")
|
||||
}
|
||||
ip, found := nodes[name]
|
||||
if !found {
|
||||
return "", fmt.Errorf("not found")
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
return network.Interface{}, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
return types.NodeName(""), fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPrimaryVMSetName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
return -1, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
return -1, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
azs "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
@ -27,9 +28,33 @@ const (
|
||||
useHTTPS = true
|
||||
)
|
||||
|
||||
// FileClient is the interface for creating file shares, interface for test
|
||||
// injection.
|
||||
type FileClient interface {
|
||||
createFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
deleteFileShare(accountName, accountKey, name string) error
|
||||
resizeFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
}
|
||||
|
||||
// create file share
|
||||
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB int) error {
|
||||
fileClient, err := az.getFileSvcClient(accountName, accountKey)
|
||||
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.FileClient.createFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
||||
func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
|
||||
return az.FileClient.deleteFileShare(accountName, accountKey, name)
|
||||
}
|
||||
|
||||
func (az *Cloud) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.FileClient.resizeFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
||||
type azureFileClient struct {
|
||||
env azure.Environment
|
||||
}
|
||||
|
||||
func (f *azureFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -42,7 +67,7 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in
|
||||
if err = share.Create(nil); err != nil {
|
||||
return fmt.Errorf("failed to create file share, err: %v", err)
|
||||
}
|
||||
share.Properties.Quota = sizeGB
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
if err := share.Delete(nil); err != nil {
|
||||
glog.Errorf("Error deleting share: %v", err)
|
||||
@ -53,20 +78,38 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in
|
||||
}
|
||||
|
||||
// delete a file share
|
||||
func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
|
||||
fileClient, err := az.getFileSvcClient(accountName, accountKey)
|
||||
if err == nil {
|
||||
share := fileClient.GetShareReference(name)
|
||||
return share.Delete(nil)
|
||||
func (f *azureFileClient) deleteFileShare(accountName, accountKey, name string) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fileClient.GetShareReference(name).Delete(nil)
|
||||
}
|
||||
|
||||
func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
share := fileClient.GetShareReference(name)
|
||||
if share.Properties.Quota >= sizeGiB {
|
||||
glog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
|
||||
share.Properties.Quota, sizeGiB, accountName, name)
|
||||
return nil
|
||||
}
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
|
||||
}
|
||||
glog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
|
||||
client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
func (f *azureFileClient) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
|
||||
fileClient, err := azs.NewClient(accountName, accountKey, f.env.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating azure client: %v", err)
|
||||
}
|
||||
f := client.GetFileService()
|
||||
return &f, nil
|
||||
fc := fileClient.GetFileService()
|
||||
return &fc, nil
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -17,18 +17,20 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
ipAddress := IPAddress{}
|
||||
err := az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
@ -48,6 +50,7 @@ func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
ip, err := az.GetIPForMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff", name)
|
||||
@ -63,29 +66,29 @@ func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return az.NodeAddresses(name)
|
||||
return az.NodeAddresses(ctx, name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (az *Cloud) ExternalID(name types.NodeName) (string, error) {
|
||||
return az.InstanceID(name)
|
||||
func (az *Cloud) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return az.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = az.InstanceID(name)
|
||||
_, err = az.InstanceID(ctx, name)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return false, nil
|
||||
@ -99,101 +102,76 @@ func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if az.VMType == vmTypeVMSS {
|
||||
// VMSS vmName is not same with hostname, use hostname instead.
|
||||
metadataName, err = os.Hostname()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
metadataName = strings.ToLower(metadataName)
|
||||
return (metadataName == nodeName), err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(name types.NodeName) (string, error) {
|
||||
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isLocalInstance {
|
||||
externalInstanceID, err := az.metadata.Text("instance/compute/vmId")
|
||||
if err == nil {
|
||||
return externalInstanceID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
id, err := az.getVmssInstanceID(name)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
return az.getStandardInstanceID(name)
|
||||
// Not local instance, get instanceID from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
return id, err
|
||||
}
|
||||
// Compose instanceID based on nodeName for standard instance.
|
||||
if az.VMType == vmTypeStandard {
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
}
|
||||
|
||||
return az.getStandardInstanceID(name)
|
||||
}
|
||||
|
||||
func (az *Cloud) getVmssInstanceID(name types.NodeName) (string, error) {
|
||||
var machine compute.VirtualMachineScaleSetVM
|
||||
var exists bool
|
||||
var err error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err = az.getVmssVirtualMachine(name)
|
||||
if err != nil {
|
||||
if az.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, exists, err = az.GetScaleSetsVMWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
// Get scale set name and instanceID from vmName for vmss.
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getStandardInstanceID(name types.NodeName) (string, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var exists bool
|
||||
var err error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err = az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
if az.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, exists, err = az.GetVirtualMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
ssName, instanceID, err := extractVmssVMName(metadataName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
return az.getVmssMachineID(ssName, instanceID), nil
|
||||
}
|
||||
return *machine.ID, nil
|
||||
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return az.InstanceType(name)
|
||||
return az.InstanceType(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
@ -207,57 +185,18 @@ func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
machineType, err := az.getVmssInstanceType(name)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
return az.getStandardInstanceType(name)
|
||||
}
|
||||
|
||||
return machineType, err
|
||||
}
|
||||
|
||||
return az.getStandardInstanceType(name)
|
||||
}
|
||||
|
||||
// getVmssInstanceType gets instance with type vmss.
|
||||
func (az *Cloud) getVmssInstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVmssVirtualMachine(name)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.InstanceType(%s), az.getVmssVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
if machine.Sku.Name != nil {
|
||||
return *machine.Sku.Name, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("instance type is not set")
|
||||
}
|
||||
|
||||
// getStandardInstanceType gets instance with standard type.
|
||||
func (az *Cloud) getStandardInstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.InstanceType(%s), az.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
return az.vmSet.GetInstanceTypeByNodeName(string(name))
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
// On Azure this is the hostname, so we just return the hostname.
|
||||
func (az *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
@ -266,9 +205,3 @@ func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func mapNodeNameToVMName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapVMNameToNodeName maps an Azure VM Name to a k8s NodeName
|
||||
// This is a simple string cast.
|
||||
func mapVMNameToNodeName(vmName string) types.NodeName {
|
||||
return types.NodeName(vmName)
|
||||
}
|
||||
|
349
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
349
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
@ -17,72 +17,77 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
|
||||
const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal"
|
||||
const (
|
||||
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
|
||||
ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal"
|
||||
|
||||
// ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service
|
||||
// to specify what subnet it is exposed on
|
||||
const ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet"
|
||||
// ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service
|
||||
// to specify what subnet it is exposed on
|
||||
ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet"
|
||||
|
||||
// ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the
|
||||
// Azure load balancer selection based on availability sets
|
||||
// There are currently three possible load balancer selection modes :
|
||||
// 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode")
|
||||
// In this case the Loadbalancer of the primary Availability set is selected
|
||||
// 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set
|
||||
// is selected which has the miinimum rules associated with it.
|
||||
// 3. "as1,as2" mode - this is when the laod balancer from the specified availability sets is selected that has the
|
||||
// miinimum rules associated with it.
|
||||
const ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode"
|
||||
// ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the
|
||||
// Azure load balancer selection based on availability sets
|
||||
// There are currently three possible load balancer selection modes :
|
||||
// 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode")
|
||||
// In this case the Loadbalancer of the primary Availability set is selected
|
||||
// 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set
|
||||
// is selected which has the minimum rules associated with it.
|
||||
// 3. "as1,as2" mode - this is when the load balancer from the specified availability sets is selected that has the
|
||||
// minimum rules associated with it.
|
||||
ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode"
|
||||
|
||||
// ServiceAnnotationLoadBalancerAutoModeValue the annotation used on the service to specify the
|
||||
// Azure load balancer auto selection from the availability sets
|
||||
const ServiceAnnotationLoadBalancerAutoModeValue = "__auto__"
|
||||
// ServiceAnnotationLoadBalancerAutoModeValue is the annotation used on the service to specify the
|
||||
// Azure load balancer auto selection from the availability sets
|
||||
ServiceAnnotationLoadBalancerAutoModeValue = "__auto__"
|
||||
|
||||
// ServiceAnnotationDNSLabelName annotation speficying the DNS label name for the service.
|
||||
const ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name"
|
||||
// ServiceAnnotationDNSLabelName is the annotation used on the service
|
||||
// to specify the DNS label name for the service.
|
||||
ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name"
|
||||
|
||||
// ServiceAnnotationSharedSecurityRule is the annotation used on the service
|
||||
// to specify that the service should be exposed using an Azure security rule
|
||||
// that may be shared with other service, trading specificity of rules for an
|
||||
// increase in the number of services that can be exposed. This relies on the
|
||||
// Azure "augmented security rules" feature which at the time of writing is in
|
||||
// preview and available only in certain regions.
|
||||
const ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule"
|
||||
// ServiceAnnotationSharedSecurityRule is the annotation used on the service
|
||||
// to specify that the service should be exposed using an Azure security rule
|
||||
// that may be shared with other service, trading specificity of rules for an
|
||||
// increase in the number of services that can be exposed. This relies on the
|
||||
// Azure "augmented security rules" feature which at the time of writing is in
|
||||
// preview and available only in certain regions.
|
||||
ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule"
|
||||
)
|
||||
|
||||
// ServiceAnnotationLoadBalancerResourceGroup is the annotation used on the service
|
||||
// to specify the resource group of load balancer objects that are not in the same resource group as the cluster.
|
||||
const ServiceAnnotationLoadBalancerResourceGroup = "service.beta.kubernetes.io/azure-load-balancer-resource-group"
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// if so, what its status is.
|
||||
func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
_, status, exists, err = az.getServiceLoadBalancer(service, clusterName, nil, false)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if exists == false {
|
||||
if !exists {
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s)- IP doesn't exist in any of the lbs", clusterName, serviceName)
|
||||
return nil, false, fmt.Errorf("Service(%s) - Loadbalancer not found", serviceName)
|
||||
glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName)
|
||||
return nil, false, nil
|
||||
}
|
||||
return status, true, nil
|
||||
}
|
||||
|
||||
func getPublicIPLabel(service *v1.Service) string {
|
||||
func getPublicIPDomainNameLabel(service *v1.Service) string {
|
||||
if labelName, found := service.Annotations[ServiceAnnotationDNSLabelName]; found {
|
||||
return labelName
|
||||
}
|
||||
@ -90,7 +95,7 @@ func getPublicIPLabel(service *v1.Service) string {
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
// When a client updates the internal load balancer annotation,
|
||||
// the service may be switched from an internal LB to a public one, or vise versa.
|
||||
// Here we'll firstly ensure service do not lie in the opposite LB.
|
||||
@ -128,8 +133,8 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
_, err := az.EnsureLoadBalancer(clusterName, service, nodes)
|
||||
func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
_, err := az.EnsureLoadBalancer(ctx, clusterName, service, nodes)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -139,12 +144,12 @@ func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nod
|
||||
// This construction is useful because many cloud providers' load balancers
|
||||
// have multiple underlying components, meaning a Get could say that the LB
|
||||
// doesn't exist even if some part of it is still laying around.
|
||||
func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName)
|
||||
|
||||
serviceIPToCleanup, err := az.findServiceIPAddress(clusterName, service, isInternal)
|
||||
serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -166,15 +171,16 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servi
|
||||
return nil
|
||||
}
|
||||
|
||||
// getServiceLoadBalancer gets the loadbalancer for the service if it already exists
|
||||
// If wantLb is TRUE then -it selects a new load balancer
|
||||
// In case the selected load balancer does not exists it returns network.LoadBalancer struct
|
||||
// with added metadata (such as name, location) and existsLB set to FALSE
|
||||
// By default - cluster default LB is returned
|
||||
// getServiceLoadBalancer gets the loadbalancer for the service if it already exists.
|
||||
// If wantLb is TRUE then -it selects a new load balancer.
|
||||
// In case the selected load balancer does not exist it returns network.LoadBalancer struct
|
||||
// with added metadata (such as name, location) and existsLB set to FALSE.
|
||||
// By default - cluster default LB is returned.
|
||||
func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
var defaultLB *network.LoadBalancer
|
||||
defaultLBName := az.getLoadBalancerName(clusterName, az.Config.PrimaryAvailabilitySetName, isInternal)
|
||||
primaryVMSetName := az.vmSet.GetPrimaryVMSetName()
|
||||
defaultLBName := az.getLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
||||
|
||||
existingLBs, err := az.ListLBWithRetry()
|
||||
if err != nil {
|
||||
@ -226,26 +232,27 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
return defaultLB, nil, false, nil
|
||||
}
|
||||
|
||||
// select load balancer for the service in the cluster
|
||||
// the selection algorithm selectes the the load balancer with currently has
|
||||
// the minimum lb rules, there there are multiple LB's with same number of rules
|
||||
// it selects the first one (sorted based on name)
|
||||
// selectLoadBalancer selects load balancer for the service in the cluster.
|
||||
// The selection algorithm selects the load balancer which currently has
|
||||
// the minimum lb rules. If there are multiple LBs with same number of rules,
|
||||
// then selects the first one (sorted based on name).
|
||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
availabilitySetNames, err := az.getLoadBalancerAvailabilitySetNames(service, nodes)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.getLoadBalancerAvailabilitySetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
return nil, false, err
|
||||
}
|
||||
glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - availabilitysetsnames %v", clusterName, serviceName, isInternal, *availabilitySetNames)
|
||||
glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
|
||||
mapExistingLBs := map[string]network.LoadBalancer{}
|
||||
for _, lb := range *existingLBs {
|
||||
mapExistingLBs[*lb.Name] = lb
|
||||
}
|
||||
selectedLBRuleCount := math.MaxInt32
|
||||
for _, currASName := range *availabilitySetNames {
|
||||
for _, currASName := range *vmSetNames {
|
||||
currLBName := az.getLoadBalancerName(clusterName, currASName, isInternal)
|
||||
lb, exists := mapExistingLBs[currLBName]
|
||||
if !exists {
|
||||
@ -272,13 +279,13 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
}
|
||||
|
||||
if selectedLB == nil {
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected availability sets %v", clusterName, serviceName, isInternal, *availabilitySetNames)
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
glog.Error(err)
|
||||
return nil, false, err
|
||||
}
|
||||
// validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount
|
||||
if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount {
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, availabilitysetnames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *availabilitySetNames)
|
||||
err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames)
|
||||
glog.Error(err)
|
||||
return selectedLB, existsLb, err
|
||||
}
|
||||
@ -288,7 +295,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
|
||||
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
||||
if lb == nil {
|
||||
glog.V(10).Infof("getServiceLoadBalancerStatus lb is nil")
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus lb is nil")
|
||||
return nil, nil
|
||||
}
|
||||
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
||||
@ -314,7 +321,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID)
|
||||
}
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipName)
|
||||
pip, existsPip, err := az.getPublicIPAddress(az.getPublicIPAddressResourceGroup(service), pipName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -336,7 +343,9 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service)
|
||||
return getPublicIPName(clusterName, service), nil
|
||||
}
|
||||
|
||||
pips, err := az.ListPIPWithRetry()
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
|
||||
pips, err := az.ListPIPWithRetry(pipResourceGroup)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -347,7 +356,7 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service)
|
||||
return *pip.Name, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("user supplied IP Address %s was not found", loadBalancerIP)
|
||||
return "", fmt.Errorf("user supplied IP Address %s was not found in resource group %s", loadBalancerIP, pipResourceGroup)
|
||||
}
|
||||
|
||||
func flipServiceInternalAnnotation(service *v1.Service) *v1.Service {
|
||||
@ -365,27 +374,30 @@ func flipServiceInternalAnnotation(service *v1.Service) *v1.Service {
|
||||
return copyService
|
||||
}
|
||||
|
||||
func (az *Cloud) findServiceIPAddress(clusterName string, service *v1.Service, isInternalLb bool) (string, error) {
|
||||
func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service, isInternalLb bool) (string, error) {
|
||||
if len(service.Spec.LoadBalancerIP) > 0 {
|
||||
return service.Spec.LoadBalancerIP, nil
|
||||
}
|
||||
|
||||
lbStatus, existsLb, err := az.GetLoadBalancer(clusterName, service)
|
||||
lbStatus, existsLb, err := az.GetLoadBalancer(ctx, clusterName, service)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !existsLb {
|
||||
return "", fmt.Errorf("Expected to find an IP address for service %s but did not", service.Name)
|
||||
glog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name)
|
||||
return "", nil
|
||||
}
|
||||
if len(lbStatus.Ingress) < 1 {
|
||||
return "", fmt.Errorf("Expected to find an IP address for service %s but it had no ingresses", service.Name)
|
||||
glog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return lbStatus.Ingress[0].IP, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel string) (*network.PublicIPAddress, error) {
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipName)
|
||||
func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel string) (*network.PublicIPAddress, error) {
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -393,6 +405,7 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel stri
|
||||
return &pip, nil
|
||||
}
|
||||
|
||||
serviceName := getServiceName(service)
|
||||
pip.Name = to.StringPtr(pipName)
|
||||
pip.Location = to.StringPtr(az.Location)
|
||||
pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{
|
||||
@ -405,19 +418,15 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel stri
|
||||
}
|
||||
pip.Tags = &map[string]*string{"service": &serviceName}
|
||||
glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): start", *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pip)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): end", *pip.Name)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name)
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%q): start", *pip.Name)
|
||||
pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, *pip.Name, "")
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%q): end", *pip.Name)
|
||||
pip, err = az.PublicIPAddressesClient.Get(pipResourceGroup, *pip.Name, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -544,8 +553,8 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
domainNameLabel := getPublicIPLabel(service)
|
||||
pip, err := az.ensurePublicIPExists(serviceName, pipName, domainNameLabel)
|
||||
domainNameLabel := getPublicIPDomainNameLabel(service)
|
||||
pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -741,14 +750,24 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
||||
glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName)
|
||||
err := az.DeleteLBWithRetry(lbName)
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||
|
||||
// Remove the LB.
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName)
|
||||
err = az.DeleteLBWithRetry(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%q): end", lbName)
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
} else {
|
||||
glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName)
|
||||
err := az.CreateOrUpdateLBWithRetry(*lb)
|
||||
@ -756,28 +775,28 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isInternal {
|
||||
// Refresh updated lb which will be used later in other places.
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("getAzureLoadBalancer(%s) failed: %v", lbName, err)
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("load balancer %q not found", lbName)
|
||||
}
|
||||
lb = &newLB
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if wantLb && nodes != nil {
|
||||
// Add the machines to the backend pool if they're not already
|
||||
availabilitySetName := az.mapLoadBalancerNameToAvailabilitySet(lbName, clusterName)
|
||||
hostUpdates := make([]func() error, len(nodes))
|
||||
for i, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
f := func() error {
|
||||
err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID, availabilitySetName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates[i] = f
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
if errs != nil {
|
||||
return nil, utilerrors.Flatten(errs)
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -800,10 +819,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%q): start", az.SecurityGroupName)
|
||||
sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%q): end", az.SecurityGroupName)
|
||||
sg, err := az.getSecurityGroup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -972,7 +988,6 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
if dirtySg {
|
||||
sg.SecurityRules = &updatedRules
|
||||
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
err := az.CreateOrUpdateSGWithRetry(sg)
|
||||
if err != nil {
|
||||
@ -1143,7 +1158,9 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
}
|
||||
}
|
||||
|
||||
pips, err := az.ListPIPWithRetry()
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
|
||||
pips, err := az.ListPIPWithRetry(pipResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1159,15 +1176,14 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
// Public ip resource with match service tag
|
||||
} else {
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%q): start", pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipName)
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName)
|
||||
// We let err to pass through
|
||||
// It may be ignorable
|
||||
}
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%q): end", pipName) // response not read yet...
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName) // response not read yet...
|
||||
|
||||
err = ignoreStatusNotFoundFromError(err)
|
||||
if err != nil {
|
||||
@ -1182,8 +1198,8 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
if !isInternal && wantLb {
|
||||
// Confirm desired public ip resource exists
|
||||
var pip *network.PublicIPAddress
|
||||
domainNameLabel := getPublicIPLabel(service)
|
||||
if pip, err = az.ensurePublicIPExists(serviceName, desiredPipName, domainNameLabel); err != nil {
|
||||
domainNameLabel := getPublicIPDomainNameLabel(service)
|
||||
if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pip, nil
|
||||
@ -1246,98 +1262,17 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b
|
||||
return false
|
||||
}
|
||||
|
||||
// This ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, availabilitySetName string) error {
|
||||
var machine compute.VirtualMachine
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%q): start", vmName)
|
||||
machine, err := az.VirtualMachineClientGetWithRetry(az.ResourceGroup, vmName, "")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensureHostInPool(%s, %s, %s) abort backoff", serviceName, nodeName, backendPoolID)
|
||||
return err
|
||||
}
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%q): end", vmName)
|
||||
|
||||
primaryNicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nicName, err := getLastSegment(primaryNicID)
|
||||
if err != nil {
|
||||
return err
|
||||
func (az *Cloud) getPublicIPAddressResourceGroup(service *v1.Service) string {
|
||||
if resourceGroup, found := service.Annotations[ServiceAnnotationLoadBalancerResourceGroup]; found {
|
||||
return resourceGroup
|
||||
}
|
||||
|
||||
// Check availability set
|
||||
if availabilitySetName != "" {
|
||||
expectedAvailabilitySetName := az.getAvailabilitySetID(availabilitySetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"nicupdate(%s): skipping nic (%s) since it is not in the availabilitySet(%s)",
|
||||
serviceName, nicName, availabilitySetName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundPool := false
|
||||
newBackendPools := []network.BackendAddressPool{}
|
||||
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): start", *nic.Name)
|
||||
respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := az.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return az.ResourceGroup
|
||||
}
|
||||
|
||||
// Check if service requires an internal load balancer.
|
||||
func requiresInternalLoadBalancer(service *v1.Service) bool {
|
||||
if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternal]; ok {
|
||||
if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternal]; found {
|
||||
return l == "true"
|
||||
}
|
||||
|
||||
@ -1346,7 +1281,7 @@ func requiresInternalLoadBalancer(service *v1.Service) bool {
|
||||
|
||||
func subnet(service *v1.Service) *string {
|
||||
if requiresInternalLoadBalancer(service) {
|
||||
if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; ok {
|
||||
if l, found := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; found {
|
||||
return &l
|
||||
}
|
||||
}
|
||||
@ -1354,28 +1289,28 @@ func subnet(service *v1.Service) *string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getServiceLoadBalancerMode parses the mode value
|
||||
// if the value is __auto__ it returns isAuto = TRUE
|
||||
// if anything else it returns the unique availability set names after triming spaces
|
||||
func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, availabilitySetNames []string) {
|
||||
// getServiceLoadBalancerMode parses the mode value.
|
||||
// if the value is __auto__ it returns isAuto = TRUE.
|
||||
// if anything else it returns the unique VM set names after triming spaces.
|
||||
func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, vmSetNames []string) {
|
||||
mode, hasMode := service.Annotations[ServiceAnnotationLoadBalancerMode]
|
||||
mode = strings.TrimSpace(mode)
|
||||
isAuto = strings.EqualFold(mode, ServiceAnnotationLoadBalancerAutoModeValue)
|
||||
if !isAuto {
|
||||
// Break up list of "AS1,AS2"
|
||||
availabilitySetParsedList := strings.Split(mode, ",")
|
||||
vmSetParsedList := strings.Split(mode, ",")
|
||||
|
||||
// Trim the availability set names and remove duplicates
|
||||
// Trim the VM set names and remove duplicates
|
||||
// e.g. {"AS1"," AS2", "AS3", "AS3"} => {"AS1", "AS2", "AS3"}
|
||||
availabilitySetNameSet := sets.NewString()
|
||||
for _, v := range availabilitySetParsedList {
|
||||
availabilitySetNameSet.Insert(strings.TrimSpace(v))
|
||||
vmSetNameSet := sets.NewString()
|
||||
for _, v := range vmSetParsedList {
|
||||
vmSetNameSet.Insert(strings.TrimSpace(v))
|
||||
}
|
||||
|
||||
availabilitySetNames = availabilitySetNameSet.List()
|
||||
vmSetNames = vmSetNameSet.List()
|
||||
}
|
||||
|
||||
return hasMode, isAuto, availabilitySetNames
|
||||
return hasMode, isAuto, vmSetNames
|
||||
}
|
||||
|
||||
func useSharedSecurityRule(service *v1.Service) bool {
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
@ -9,7 +9,7 @@ Despite the ideal philosophy above, we have to face the reality. NSG depends on
|
||||
And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict.
|
||||
|
||||
There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost incase of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost in case of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...)
|
||||
@ -40,7 +40,7 @@ Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error)
|
||||
- gets the loadbalancer for the service if it already exists
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selction helps distribute the services across load balancers
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selection helps distribute the services across load balancers
|
||||
- In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE
|
||||
- By default - cluster default LB is returned
|
||||
|
||||
@ -74,4 +74,4 @@ Service Annotation for Auto and specific load balancer mode
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, false)
|
||||
- Reconcile Public IP, public IP needs related LB reconciled first
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
||||
|
@ -73,7 +73,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisonState, id, err := c.getDisk(diskName)
|
||||
provisionState, id, err := c.getDisk(diskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
@ -81,7 +81,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.ToLower(provisonState) == "succeeded" {
|
||||
if strings.ToLower(provisionState) == "succeeded" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
@ -106,8 +106,8 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We don't need poll here, k8s will immediatly stop referencing the disk
|
||||
// the disk will be evantually deleted - cleanly - by ARM
|
||||
// We don't need poll here, k8s will immediately stop referencing the disk
|
||||
// the disk will be eventually deleted - cleanly - by ARM
|
||||
|
||||
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
|
||||
|
||||
|
82
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics.go
generated
vendored
Normal file
82
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type apiCallMetrics struct {
|
||||
latency *prometheus.HistogramVec
|
||||
errors *prometheus.CounterVec
|
||||
}
|
||||
|
||||
var (
|
||||
metricLabels = []string{
|
||||
"request", // API function that is being invoked
|
||||
"resource_group", // Resource group of the resource being monitored
|
||||
"subscription_id", // Subscription ID of the resource being monitored
|
||||
}
|
||||
|
||||
apiMetrics = registerAPIMetrics(metricLabels...)
|
||||
)
|
||||
|
||||
type metricContext struct {
|
||||
start time.Time
|
||||
attributes []string
|
||||
}
|
||||
|
||||
func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
|
||||
return &metricContext{
|
||||
start: time.Now(),
|
||||
attributes: []string{prefix + "_" + request, resourceGroup, subscriptionID},
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *metricContext) Observe(err error) {
|
||||
apiMetrics.latency.WithLabelValues(mc.attributes...).Observe(
|
||||
time.Since(mc.start).Seconds())
|
||||
if err != nil {
|
||||
apiMetrics.errors.WithLabelValues(mc.attributes...).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func registerAPIMetrics(attributes ...string) *apiCallMetrics {
|
||||
metrics := &apiCallMetrics{
|
||||
latency: prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_azure_api_request_duration_seconds",
|
||||
Help: "Latency of an Azure API call",
|
||||
},
|
||||
attributes,
|
||||
),
|
||||
errors: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_azure_api_request_errors",
|
||||
Help: "Number of errors for an Azure API call",
|
||||
},
|
||||
attributes,
|
||||
),
|
||||
}
|
||||
|
||||
prometheus.MustRegister(metrics.latency)
|
||||
prometheus.MustRegister(metrics.errors)
|
||||
|
||||
return metrics
|
||||
}
|
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics_test.go
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAzureMetricLabelCardinality(t *testing.T) {
|
||||
mc := newMetricContext("test", "create", "resource_group", "subscription_id")
|
||||
assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match")
|
||||
}
|
||||
|
||||
func TestAzureMetricLabelPrefix(t *testing.T) {
|
||||
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id")
|
||||
found := false
|
||||
for _, attribute := range mc.attributes {
|
||||
if attribute == "prefix_request" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "request label must be prefixed")
|
||||
}
|
98
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
98
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
@ -28,18 +29,23 @@ import (
|
||||
)
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route, err error) {
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
}
|
||||
|
||||
// Injectable for testing
|
||||
func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
if !exists {
|
||||
return []*cloudprovider.Route{}, nil
|
||||
}
|
||||
|
||||
var kubeRoutes []*cloudprovider.Route
|
||||
if routeTable.Routes != nil {
|
||||
if routeTable.RouteTablePropertiesFormat != nil && routeTable.Routes != nil {
|
||||
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
@ -58,51 +64,53 @@ func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route,
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
return nil
|
||||
}
|
||||
return az.createRouteTable()
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTable() error {
|
||||
routeTable := network.RouteTable{
|
||||
Name: to.StringPtr(az.RouteTableName),
|
||||
Location: to.StringPtr(az.Location),
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
az.rtCache.Delete(az.RouteTableName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
routeTable = network.RouteTable{
|
||||
Name: to.StringPtr(az.RouteTableName),
|
||||
Location: to.StringPtr(az.Location),
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%q): start", az.RouteTableName)
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%q): end", az.RouteTableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -119,8 +127,6 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
@ -143,12 +149,10 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
|
344
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
Normal file
344
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
Normal file
@ -0,0 +1,344 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
func TestDeleteRoute(t *testing.T) {
|
||||
fakeRoutes := newFakeRoutesClient()
|
||||
|
||||
cloud := &Cloud{
|
||||
RoutesClient: fakeRoutes,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
|
||||
fakeRoutes.FakeStore = map[string]map[string]network.Route{
|
||||
cloud.RouteTableName: {
|
||||
routeName: {},
|
||||
},
|
||||
}
|
||||
|
||||
err := cloud.DeleteRoute(context.TODO(), "cluster", &route)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error deleting route: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
mp, found := fakeRoutes.FakeStore[cloud.RouteTableName]
|
||||
if !found {
|
||||
t.Errorf("unexpected missing item for %s", cloud.RouteTableName)
|
||||
t.FailNow()
|
||||
}
|
||||
ob, found := mp[routeName]
|
||||
if found {
|
||||
t.Errorf("unexpectedly found: %v that should have been deleted.", ob)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRoute(t *testing.T) {
|
||||
fakeTable := newFakeRouteTablesClient()
|
||||
fakeVM := &fakeVMSet{}
|
||||
fakeRoutes := newFakeRoutesClient()
|
||||
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fakeTable,
|
||||
RoutesClient: fakeRoutes,
|
||||
vmSet: fakeVM,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
fakeTable.FakeStore = map[string]map[string]network.RouteTable{}
|
||||
fakeTable.FakeStore[cloud.ResourceGroup] = map[string]network.RouteTable{
|
||||
cloud.RouteTableName: expectedTable,
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
|
||||
nodeIP := "2.4.6.8"
|
||||
fakeVM.NodeToIP = map[string]map[string]string{
|
||||
"": {
|
||||
"node": nodeIP,
|
||||
},
|
||||
}
|
||||
|
||||
err := cloud.CreateRoute(context.TODO(), "cluster", "unused", &route)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
if len(fakeTable.Calls) != 1 || fakeTable.Calls[0] != "Get" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fakeTable.Calls)
|
||||
}
|
||||
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
routeInfo, found := fakeRoutes.FakeStore[cloud.RouteTableName][routeName]
|
||||
if !found {
|
||||
t.Errorf("could not find route: %v in %v", routeName, fakeRoutes.FakeStore)
|
||||
t.FailNow()
|
||||
}
|
||||
if *routeInfo.AddressPrefix != route.DestinationCIDR {
|
||||
t.Errorf("Expected cidr: %s, saw %s", *routeInfo.AddressPrefix, route.DestinationCIDR)
|
||||
}
|
||||
if routeInfo.NextHopType != network.RouteNextHopTypeVirtualAppliance {
|
||||
t.Errorf("Expected next hop: %v, saw %v", network.RouteNextHopTypeVirtualAppliance, routeInfo.NextHopType)
|
||||
}
|
||||
if *routeInfo.NextHopIPAddress != nodeIP {
|
||||
t.Errorf("Expected IP address: %s, saw %s", nodeIP, *routeInfo.NextHopIPAddress)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
fake.FakeStore = map[string]map[string]network.RouteTable{}
|
||||
fake.FakeStore[cloud.ResourceGroup] = map[string]network.RouteTable{
|
||||
cloud.RouteTableName: expectedTable,
|
||||
}
|
||||
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
if len(fake.Calls) != 1 || fake.Calls[0] != "Get" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_NotExists(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
|
||||
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
table := fake.FakeStore[cloud.ResourceGroup][cloud.RouteTableName]
|
||||
if *table.Location != *expectedTable.Location {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
|
||||
}
|
||||
if *table.Name != *expectedTable.Name {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
|
||||
}
|
||||
if len(fake.Calls) != 2 || fake.Calls[0] != "Get" || fake.Calls[1] != "CreateOrUpdate" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTable(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
|
||||
err := cloud.createRouteTable()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error in creating route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
table := fake.FakeStore["foo"]["bar"]
|
||||
if *table.Location != *expectedTable.Location {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
|
||||
}
|
||||
if *table.Name != *expectedTable.Name {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRoutes(t *testing.T) {
|
||||
tests := []struct {
|
||||
rt network.RouteTable
|
||||
exists bool
|
||||
err error
|
||||
expectErr bool
|
||||
expectedError string
|
||||
expectedRoute []cloudprovider.Route
|
||||
name string
|
||||
}{
|
||||
{
|
||||
err: fmt.Errorf("test error"),
|
||||
expectErr: true,
|
||||
expectedError: "test error",
|
||||
},
|
||||
{
|
||||
exists: false,
|
||||
name: "doesn't exist",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{},
|
||||
exists: true,
|
||||
name: "nil routes",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
},
|
||||
exists: true,
|
||||
name: "no routes",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
|
||||
Routes: &[]network.Route{
|
||||
{
|
||||
Name: to.StringPtr("name"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("1.2.3.4/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exists: true,
|
||||
expectedRoute: []cloudprovider.Route{
|
||||
{
|
||||
Name: "name",
|
||||
TargetNode: mapRouteNameToNodeName("name"),
|
||||
DestinationCIDR: "1.2.3.4/16",
|
||||
},
|
||||
},
|
||||
name: "one route",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
|
||||
Routes: &[]network.Route{
|
||||
{
|
||||
Name: to.StringPtr("name"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("1.2.3.4/16"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr("name2"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("5.6.7.8/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exists: true,
|
||||
expectedRoute: []cloudprovider.Route{
|
||||
{
|
||||
Name: "name",
|
||||
TargetNode: mapRouteNameToNodeName("name"),
|
||||
DestinationCIDR: "1.2.3.4/16",
|
||||
},
|
||||
{
|
||||
Name: "name2",
|
||||
TargetNode: mapRouteNameToNodeName("name2"),
|
||||
DestinationCIDR: "5.6.7.8/16",
|
||||
},
|
||||
},
|
||||
name: "more routes",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
routes, err := processRoutes(test.rt, test.exists, test.err)
|
||||
if test.expectErr {
|
||||
if err == nil {
|
||||
t.Errorf("%s: unexpected non-error", test.name)
|
||||
continue
|
||||
}
|
||||
if err.Error() != test.expectedError {
|
||||
t.Errorf("%s: Expected error: %v, saw error: %v", test.name, test.expectedError, err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("%s; unexpected error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if len(routes) != len(test.expectedRoute) {
|
||||
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, routes, test.expectedRoute)
|
||||
continue
|
||||
}
|
||||
for ix := range test.expectedRoute {
|
||||
if !reflect.DeepEqual(test.expectedRoute[ix], *routes[ix]) {
|
||||
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, test.expectedRoute[ix], *routes[ix])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -30,9 +30,12 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -43,21 +46,22 @@ const (
|
||||
availabilitySetIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/%s"
|
||||
frontendIPConfigIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/%s"
|
||||
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
|
||||
loadBalancerRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/loadBalancingRules/%s"
|
||||
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
|
||||
securityRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s/securityRules/%s"
|
||||
|
||||
// InternalLoadBalancerNameSuffix is load balancer posfix
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
|
||||
storageAccountNameMaxLength = 24
|
||||
)
|
||||
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
|
||||
// returns the full identifier of a machine
|
||||
func (az *Cloud) getMachineID(machineName string) string {
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
@ -94,16 +98,6 @@ func (az *Cloud) getBackendPoolID(lbName, backendPoolName string) string {
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer rule.
|
||||
func (az *Cloud) getLoadBalancerRuleID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
loadBalancerRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer probe.
|
||||
func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
@ -114,134 +108,22 @@ func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a network security group security rule.
|
||||
func (az *Cloud) getSecurityRuleID(securityRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
securityRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
az.SecurityGroupName,
|
||||
securityRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a publicIPAddress.
|
||||
func (az *Cloud) getpublicIPAddressID(pipName string) string {
|
||||
return fmt.Sprintf(
|
||||
publicIPAddressIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
pipName)
|
||||
}
|
||||
|
||||
// getLoadBalancerAvailabilitySetNames selects all possible availability sets for
|
||||
// service load balancer, if the service has no loadbalancer mode annotaion returns the
|
||||
// primary availability set if service annotation for loadbalancer availability set
|
||||
// exists then return the eligible a availability set
|
||||
func (az *Cloud) getLoadBalancerAvailabilitySetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryAvailabilitySetName
|
||||
availabilitySetNames = &[]string{az.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
availabilitySetNames, err = az.getAgentPoolAvailabiliySets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*availabilitySetNames)
|
||||
if !isAuto {
|
||||
if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate availability set exists
|
||||
var found bool
|
||||
for sasx := range serviceAvailabilitySetNames {
|
||||
for asx := range *availabilitySetNames {
|
||||
if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) {
|
||||
found = true
|
||||
serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
|
||||
}
|
||||
}
|
||||
availabilitySetNames = &serviceAvailabilitySetNames
|
||||
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
|
||||
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||
if strings.EqualFold(clusterName, vmSetName) {
|
||||
vmSetName = az.vmSet.GetPrimaryVMSetName()
|
||||
}
|
||||
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// lists the virtual machines for for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s
|
||||
func (az *Cloud) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := az.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
|
||||
for vmx := range vms {
|
||||
vm := vms[vmx]
|
||||
if vm.AvailabilitySet != nil {
|
||||
vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
|
||||
}
|
||||
}
|
||||
availabilitySetIDs := sets.NewString()
|
||||
agentPoolAvailabilitySets = &[]string{}
|
||||
for nx := range nodes {
|
||||
nodeName := (*nodes[nx]).Name
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
// already added in the list
|
||||
continue
|
||||
}
|
||||
asName, err := getLastSegment(asID)
|
||||
if err != nil {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
// We want to keep it lower case, before the ID get fixed
|
||||
asName = strings.ToLower(asName)
|
||||
|
||||
*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
|
||||
}
|
||||
|
||||
return agentPoolAvailabilitySets, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) mapLoadBalancerNameToAvailabilitySet(lbName string, clusterName string) (availabilitySetName string) {
|
||||
availabilitySetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||
if strings.EqualFold(clusterName, lbName) {
|
||||
availabilitySetName = az.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
return availabilitySetName
|
||||
return vmSetName
|
||||
}
|
||||
|
||||
// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress.
|
||||
// Thus Azure do not allow mixed type (public and internal) load balancer.
|
||||
// So we'd have a separate name for internal load balancer.
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, availabilitySetName string, isInternal bool) string {
|
||||
lbNamePrefix := availabilitySetName
|
||||
if strings.EqualFold(availabilitySetName, az.Config.PrimaryAvailabilitySetName) {
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
@ -250,7 +132,7 @@ func (az *Cloud) getLoadBalancerName(clusterName string, availabilitySetName str
|
||||
return lbNamePrefix
|
||||
}
|
||||
|
||||
// isMasterNode returns returns true is the node has a master role label.
|
||||
// isMasterNode returns true if the node has a master role label.
|
||||
// The master role is determined by looking for:
|
||||
// * a kubernetes.io/role="master" label
|
||||
func isMasterNode(node *v1.Node) bool {
|
||||
@ -311,6 +193,10 @@ func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
|
||||
}
|
||||
|
||||
func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguration, error) {
|
||||
if nic.IPConfigurations == nil {
|
||||
return nil, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
|
||||
}
|
||||
|
||||
if len(*nic.IPConfigurations) == 1 {
|
||||
return &((*nic.IPConfigurations)[0]), nil
|
||||
}
|
||||
@ -321,7 +207,7 @@ func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguratio
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to determine the determine primary ipconfig. nicname=%q", *nic.Name)
|
||||
return nil, fmt.Errorf("failed to determine the primary ipconfig. nicname=%q", *nic.Name)
|
||||
}
|
||||
|
||||
func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
|
||||
@ -402,67 +288,7 @@ outer:
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
ip, err := az.getIPForVmssMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
return az.getIPForStandardMachine(nodeName)
|
||||
}
|
||||
|
||||
return ip, err
|
||||
}
|
||||
|
||||
return az.getIPForStandardMachine(nodeName)
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForStandardMachine(nodeName types.NodeName) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err := az.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), az.getVirtualMachine(%s), err=%v", nodeName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(nicID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), az.InterfacesClient.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// splitProviderID converts a providerID to a NodeName.
|
||||
func splitProviderID(providerID string) (types.NodeName, error) {
|
||||
matches := providerIDRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("error splitting providerID")
|
||||
}
|
||||
return types.NodeName(matches[1]), nil
|
||||
return az.vmSet.GetIPByNodeName(string(nodeName), "")
|
||||
}
|
||||
|
||||
var polyTable = crc32.MakeTable(crc32.Koopman)
|
||||
@ -519,3 +345,329 @@ func ExtractDiskData(diskData interface{}) (provisioningState string, diskState
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
||||
|
||||
// availabilitySet implements VMSet interface for Azure availability sets.
|
||||
type availabilitySet struct {
|
||||
*Cloud
|
||||
}
|
||||
|
||||
// newStandardSet creates a new availabilitySet.
|
||||
func newAvailabilitySet(az *Cloud) VMSet {
|
||||
return &availabilitySet{
|
||||
Cloud: az,
|
||||
}
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var err error
|
||||
|
||||
machine, err = as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is part of providerID for standard instances.
|
||||
matches := providerIDRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("error splitting providerID")
|
||||
}
|
||||
|
||||
return types.NodeName(matches[1]), nil
|
||||
}
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets zone from instance view.
|
||||
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
func (as *availabilitySet) GetPrimaryVMSetName() string {
|
||||
return as.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (as *availabilitySet) GetIPByNodeName(name, vmSetName string) (string, error) {
|
||||
nic, err := as.GetPrimaryInterface(name, vmSetName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s.
|
||||
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := as.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
|
||||
for vmx := range vms {
|
||||
vm := vms[vmx]
|
||||
if vm.AvailabilitySet != nil {
|
||||
vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
|
||||
}
|
||||
}
|
||||
availabilitySetIDs := sets.NewString()
|
||||
agentPoolAvailabilitySets = &[]string{}
|
||||
for nx := range nodes {
|
||||
nodeName := (*nodes[nx]).Name
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
// already added in the list
|
||||
continue
|
||||
}
|
||||
asName, err := getLastSegment(asID)
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
// We want to keep it lower case, before the ID get fixed
|
||||
asName = strings.ToLower(asName)
|
||||
|
||||
*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
|
||||
}
|
||||
|
||||
return agentPoolAvailabilitySets, nil
|
||||
}
|
||||
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryAvailabilitySetName
|
||||
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*availabilitySetNames)
|
||||
if !isAuto {
|
||||
if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate availability set exists
|
||||
var found bool
|
||||
for sasx := range serviceAvailabilitySetNames {
|
||||
for asx := range *availabilitySetNames {
|
||||
if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) {
|
||||
found = true
|
||||
serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
|
||||
}
|
||||
}
|
||||
availabilitySetNames = &serviceAvailabilitySetNames
|
||||
}
|
||||
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
primaryNicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
nicName, err := getLastSegment(primaryNicID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set
|
||||
if vmSetName != "" {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
}
|
||||
|
||||
nic, err := as.InterfacesClient.Get(as.ResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
return nic, nil
|
||||
}
|
||||
|
||||
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nic, err := as.GetPrimaryInterface(vmName, vmSetName)
|
||||
if err != nil {
|
||||
if err == errNotInVMSet {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundPool := false
|
||||
newBackendPools := []network.BackendAddressPool{}
|
||||
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
nicName := *nic.Name
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
respChan, errChan := as.InterfacesClient.CreateOrUpdate(as.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if as.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
hostUpdates := make([]func() error, len(nodes))
|
||||
for i, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates[i] = f
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
if errs != nil {
|
||||
return utilerrors.Flatten(errs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
// Do nothing for availability set.
|
||||
return nil
|
||||
}
|
||||
|
||||
// get a storage account by UUID
|
||||
func generateStorageAccountName(accountNamePrefix string) string {
|
||||
uniqueID := strings.Replace(string(uuid.NewUUID()), "-", "", -1)
|
||||
accountName := strings.ToLower(accountNamePrefix + uniqueID)
|
||||
if len(accountName) > storageAccountNameMaxLength {
|
||||
return accountName[:storageAccountNameMaxLength-1]
|
||||
}
|
||||
return accountName
|
||||
}
|
166
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
Normal file
166
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestIsMasterNode(t *testing.T) {
|
||||
if isMasterNode(&v1.Node{}) {
|
||||
t.Errorf("Empty node should not be master!")
|
||||
}
|
||||
if isMasterNode(&v1.Node{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelRole: "worker",
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Errorf("Node labelled 'worker' should not be master!")
|
||||
}
|
||||
if !isMasterNode(&v1.Node{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelRole: "master",
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Errorf("Node should be master!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLastSegment(t *testing.T) {
|
||||
tests := []struct {
|
||||
ID string
|
||||
expected string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
ID: "",
|
||||
expected: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
ID: "foo/",
|
||||
expected: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
ID: "foo/bar",
|
||||
expected: "bar",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
ID: "foo/bar/baz",
|
||||
expected: "baz",
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
s, e := getLastSegment(test.ID)
|
||||
if test.expectErr && e == nil {
|
||||
t.Errorf("Expected err, but it was nil")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && e != nil {
|
||||
t.Errorf("Unexpected error: %v", e)
|
||||
continue
|
||||
}
|
||||
if s != test.expected {
|
||||
t.Errorf("expected: %s, got %s", test.expected, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateStorageAccountName(t *testing.T) {
|
||||
tests := []struct {
|
||||
prefix string
|
||||
}{
|
||||
{
|
||||
prefix: "",
|
||||
},
|
||||
{
|
||||
prefix: "pvc",
|
||||
},
|
||||
{
|
||||
prefix: "1234512345123451234512345",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
accountName := generateStorageAccountName(test.prefix)
|
||||
if len(accountName) > storageAccountNameMaxLength || len(accountName) < 3 {
|
||||
t.Errorf("input prefix: %s, output account name: %s, length not in [3,%d]", test.prefix, accountName, storageAccountNameMaxLength)
|
||||
}
|
||||
|
||||
for _, char := range accountName {
|
||||
if (char < 'a' || char > 'z') && (char < '0' || char > '9') {
|
||||
t.Errorf("input prefix: %s, output account name: %s, there is non-digit or non-letter(%q)", test.prefix, accountName, char)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
cases := []struct {
|
||||
description string
|
||||
lbName string
|
||||
clusterName string
|
||||
expectedVMSet string
|
||||
}{
|
||||
{
|
||||
description: "default external LB should map to primary vmset",
|
||||
lbName: "azure",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "primary",
|
||||
},
|
||||
{
|
||||
description: "default internal LB should map to primary vmset",
|
||||
lbName: "azure-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "primary",
|
||||
},
|
||||
{
|
||||
description: "non-default external LB should map to its own vmset",
|
||||
lbName: "azuretest-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "azuretest",
|
||||
},
|
||||
{
|
||||
description: "non-default internal LB should map to its own vmset",
|
||||
lbName: "azuretest-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "azuretest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
vmset := az.mapLoadBalancerNameToVMSet(c.lbName, c.clusterName)
|
||||
assert.Equal(t, c.expectedVMSet, vmset, c.description)
|
||||
}
|
||||
}
|
63
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
63
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
@ -19,56 +19,41 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageAccountType = string(storage.StandardLRS)
|
||||
fileShareAccountNamePrefix = "f"
|
||||
sharedDiskAccountNamePrefix = "ds"
|
||||
dedicatedDiskAccountNamePrefix = "dd"
|
||||
)
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = az.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := az.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not get storage key for storage account %s: %v", account.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = az.createFileShare(account.Name, key, name, requestGB)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to create share %s in account %s: %v", name, account.Name, err)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("created share %s in account %s", name, account.Name)
|
||||
return account.Name, key, err
|
||||
}
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) {
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = fmt.Errorf("failed to find a matching storage account")
|
||||
if err := az.createFileShare(account, key, shareName, requestGiB); err != nil {
|
||||
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err)
|
||||
}
|
||||
return "", "", err
|
||||
glog.V(4).Infof("created share %s in account %s", shareName, account)
|
||||
return account, key, nil
|
||||
}
|
||||
|
||||
// DeleteFileShare deletes a file share using storage account name and key
|
||||
func (az *Cloud) DeleteFileShare(accountName, key, name string) error {
|
||||
err := az.deleteFileShare(accountName, key, name)
|
||||
if err != nil {
|
||||
func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) error {
|
||||
if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("share %s deleted", name)
|
||||
glog.V(4).Infof("share %s deleted", shareName)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// ResizeFileShare resizes a file share
|
||||
func (az *Cloud) ResizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.resizeFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
)
|
||||
|
||||
func TestCreateFileShare(t *testing.T) {
|
||||
cloud := &Cloud{}
|
||||
fake := newFakeStorageAccountClient()
|
||||
cloud.StorageAccountClient = fake
|
||||
cloud.FileClient = &fakeFileClient{}
|
||||
|
||||
name := "baz"
|
||||
sku := "sku"
|
||||
location := "centralus"
|
||||
value := "foo key"
|
||||
bogus := "bogus"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
acct string
|
||||
acctType string
|
||||
loc string
|
||||
gb int
|
||||
accounts storage.AccountListResult
|
||||
keys storage.AccountListKeysResult
|
||||
err error
|
||||
|
||||
expectErr bool
|
||||
expectAcct string
|
||||
expectKey string
|
||||
}{
|
||||
{
|
||||
name: "foo",
|
||||
acct: "bar",
|
||||
acctType: "type",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: "type",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
|
||||
},
|
||||
},
|
||||
keys: storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
expectAcct: "baz",
|
||||
expectKey: "key",
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &bogus, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &bogus},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fake.Accounts = test.accounts
|
||||
fake.Keys = test.keys
|
||||
fake.Err = test.err
|
||||
|
||||
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.loc, test.gb)
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
if test.expectAcct != account {
|
||||
t.Errorf("Expected: %s, got %s", test.expectAcct, account)
|
||||
}
|
||||
if test.expectKey != key {
|
||||
t.Errorf("Expected: %s, got %s", test.expectKey, key)
|
||||
}
|
||||
}
|
||||
}
|
81
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
81
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
@ -20,6 +20,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
@ -27,32 +29,29 @@ type accountWithLocation struct {
|
||||
Name, StorageType, Location string
|
||||
}
|
||||
|
||||
// getStorageAccounts gets the storage accounts' name, type, location in a resource group
|
||||
func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): start", az.ResourceGroup)
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup)
|
||||
glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): end", az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Value == nil {
|
||||
return nil, fmt.Errorf("no storage accounts from resource group %s", az.ResourceGroup)
|
||||
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", az.ResourceGroup)
|
||||
}
|
||||
|
||||
accounts := []accountWithLocation{}
|
||||
for _, acct := range *result.Value {
|
||||
if acct.Name != nil {
|
||||
name := *acct.Name
|
||||
loc := ""
|
||||
if acct.Location != nil {
|
||||
loc = *acct.Location
|
||||
if acct.Name != nil && acct.Location != nil && acct.Sku != nil {
|
||||
storageType := string((*acct.Sku).Name)
|
||||
if matchingAccountType != "" && !strings.EqualFold(matchingAccountType, storageType) {
|
||||
continue
|
||||
}
|
||||
storageType := ""
|
||||
if acct.Sku != nil {
|
||||
storageType = string((*acct.Sku).Name)
|
||||
|
||||
location := *acct.Location
|
||||
if matchingLocation != "" && !strings.EqualFold(matchingLocation, location) {
|
||||
continue
|
||||
}
|
||||
accounts = append(accounts, accountWithLocation{Name: name, StorageType: storageType, Location: loc})
|
||||
accounts = append(accounts, accountWithLocation{Name: *acct.Name, StorageType: storageType, Location: location})
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,10 +60,7 @@ func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) {
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("StorageAccountClient.ListKeys(%q): start", account)
|
||||
result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account)
|
||||
glog.V(10).Infof("StorageAccountClient.ListKeys(%q): end", account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -83,3 +79,52 @@ func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("no valid keys")
|
||||
}
|
||||
|
||||
// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) {
|
||||
if len(accountName) == 0 {
|
||||
// find a storage account that matches accountType
|
||||
accounts, err := az.getStorageAccounts(accountType, location)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
|
||||
}
|
||||
|
||||
if len(accounts) > 0 {
|
||||
accountName = accounts[0].Name
|
||||
glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
|
||||
}
|
||||
|
||||
if len(accountName) == 0 {
|
||||
// not found a matching account, now create a new account in current resource group
|
||||
accountName = generateStorageAccountName(genAccountNamePrefix)
|
||||
if location == "" {
|
||||
location = az.Location
|
||||
}
|
||||
if accountType == "" {
|
||||
accountType = defaultStorageAccountType
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel)
|
||||
err := <-errchan
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the access key with this account
|
||||
accountKey, err := az.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
return accountName, accountKey, nil
|
||||
}
|
||||
|
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go
generated
vendored
Normal file
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
)
|
||||
|
||||
func TestGetStorageAccessKeys(t *testing.T) {
|
||||
cloud := &Cloud{}
|
||||
fake := newFakeStorageAccountClient()
|
||||
cloud.StorageAccountClient = fake
|
||||
value := "foo bar"
|
||||
|
||||
tests := []struct {
|
||||
results storage.AccountListKeysResult
|
||||
expectedKey string
|
||||
expectErr bool
|
||||
err error
|
||||
}{
|
||||
{storage.AccountListKeysResult{}, "", true, nil},
|
||||
{
|
||||
storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
"bar",
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{},
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
"bar",
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{storage.AccountListKeysResult{}, "", true, fmt.Errorf("test error")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
expectedKey := test.expectedKey
|
||||
fake.Keys = test.results
|
||||
fake.Err = test.err
|
||||
key, err := cloud.getStorageAccesskey("acct")
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("Unexpected non-error")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
if key != expectedKey {
|
||||
t.Errorf("expected: %s, saw %s", expectedKey, key)
|
||||
}
|
||||
}
|
||||
}
|
139
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
139
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
@ -29,8 +30,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
@ -125,7 +126,7 @@ func testLoadBalancerServiceDefaultModeSelection(t *testing.T, isInternal bool)
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -178,7 +179,7 @@ func testLoadBalancerServiceAutoModeSelection(t *testing.T, isInternal bool) {
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
setLoadBalancerAutoModeAnnotation(&svc)
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -241,7 +242,7 @@ func testLoadBalancerServicesSpecifiedSelection(t *testing.T, isInternal bool) {
|
||||
lbMode := fmt.Sprintf("%s,%s", selectedAvailabilitySetName1, selectedAvailabilitySetName2)
|
||||
setLoadBalancerModeAnnotation(&svc, lbMode)
|
||||
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -279,7 +280,7 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) {
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -305,7 +306,7 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) {
|
||||
} else {
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
_, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
_, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err == nil {
|
||||
t.Errorf("Expect any new service to fail as max limit in lb has reached")
|
||||
} else {
|
||||
@ -336,7 +337,7 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo
|
||||
svc = getTestService(svcName, v1.ProtocolTCP, 8081)
|
||||
}
|
||||
setLoadBalancerAutoModeAnnotation(&svc)
|
||||
lbStatus, err := az.EnsureLoadBalancer(testClusterName, &svc, clusterResources.nodes)
|
||||
lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -365,7 +366,7 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo
|
||||
t.Errorf("Unexpected number of LB's: Expected (%d) Found (%d)", expectedNumOfLB, lbCount)
|
||||
}
|
||||
|
||||
err := az.EnsureLoadBalancerDeleted(testClusterName, &svc)
|
||||
err := az.EnsureLoadBalancerDeleted(context.TODO(), testClusterName, &svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
@ -850,8 +851,10 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) {
|
||||
func getTestCloud() (az *Cloud) {
|
||||
az = &Cloud{
|
||||
Config: Config{
|
||||
TenantID: "tenant",
|
||||
SubscriptionID: "subscription",
|
||||
AzureAuthConfig: auth.AzureAuthConfig{
|
||||
TenantID: "tenant",
|
||||
SubscriptionID: "subscription",
|
||||
},
|
||||
ResourceGroup: "rg",
|
||||
VnetResourceGroup: "rg",
|
||||
Location: "westus",
|
||||
@ -863,13 +866,22 @@ func getTestCloud() (az *Cloud) {
|
||||
MaximumLoadBalancerRuleCount: 250,
|
||||
},
|
||||
}
|
||||
az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(100, 100)
|
||||
az.DisksClient = newFakeDisksClient()
|
||||
az.InterfacesClient = newFakeAzureInterfacesClient()
|
||||
az.LoadBalancerClient = newFakeAzureLBClient()
|
||||
az.PublicIPAddressesClient = newFakeAzurePIPClient(az.Config.SubscriptionID)
|
||||
az.SubnetsClient = newFakeAzureSubnetsClient()
|
||||
az.RoutesClient = newFakeRoutesClient()
|
||||
az.RouteTablesClient = newFakeRouteTablesClient()
|
||||
az.SecurityGroupsClient = newFakeAzureNSGClient()
|
||||
az.SubnetsClient = newFakeAzureSubnetsClient()
|
||||
az.VirtualMachineScaleSetsClient = newFakeVirtualMachineScaleSetsClient()
|
||||
az.VirtualMachineScaleSetVMsClient = newFakeVirtualMachineScaleSetVMsClient()
|
||||
az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient()
|
||||
az.InterfacesClient = newFakeAzureInterfacesClient()
|
||||
az.vmSet = newAvailabilitySet(az)
|
||||
az.vmCache, _ = az.newVMCache()
|
||||
az.lbCache, _ = az.newLBCache()
|
||||
az.nsgCache, _ = az.newNSGCache()
|
||||
az.rtCache, _ = az.newRouteTableCache()
|
||||
|
||||
return az
|
||||
}
|
||||
@ -1000,23 +1012,6 @@ func getBackendPort(port int32) int32 {
|
||||
return port + 10000
|
||||
}
|
||||
|
||||
func getTestPublicFipConfigurationProperties() network.FrontendIPConfigurationPropertiesFormat {
|
||||
return network.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("/this/is/a/public/ip/address/id")},
|
||||
}
|
||||
}
|
||||
|
||||
func getTestInternalFipConfigurationProperties(expectedSubnetName *string) network.FrontendIPConfigurationPropertiesFormat {
|
||||
var expectedSubnet *network.Subnet
|
||||
if expectedSubnetName != nil {
|
||||
expectedSubnet = &network.Subnet{Name: expectedSubnetName}
|
||||
}
|
||||
return network.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("/this/is/a/public/ip/address/id")},
|
||||
Subnet: expectedSubnet,
|
||||
}
|
||||
}
|
||||
|
||||
func getTestService(identifier string, proto v1.Protocol, requestedPorts ...int32) v1.Service {
|
||||
ports := []v1.ServicePort{}
|
||||
for _, port := range requestedPorts {
|
||||
@ -1056,39 +1051,6 @@ func setLoadBalancerAutoModeAnnotation(service *v1.Service) {
|
||||
setLoadBalancerModeAnnotation(service, ServiceAnnotationLoadBalancerAutoModeValue)
|
||||
}
|
||||
|
||||
func getTestLoadBalancer(services ...v1.Service) network.LoadBalancer {
|
||||
rules := []network.LoadBalancingRule{}
|
||||
probes := []network.Probe{}
|
||||
|
||||
for _, service := range services {
|
||||
for _, port := range service.Spec.Ports {
|
||||
ruleName := getLoadBalancerRuleName(&service, port, nil)
|
||||
rules = append(rules, network.LoadBalancingRule{
|
||||
Name: to.StringPtr(ruleName),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
},
|
||||
})
|
||||
probes = append(probes, network.Probe{
|
||||
Name: to.StringPtr(ruleName),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
lb := network.LoadBalancer{
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
|
||||
LoadBalancingRules: &rules,
|
||||
Probes: &probes,
|
||||
},
|
||||
}
|
||||
|
||||
return lb
|
||||
}
|
||||
|
||||
func getServiceSourceRanges(service *v1.Service) []string {
|
||||
if len(service.Spec.LoadBalancerSourceRanges) == 0 {
|
||||
if !requiresInternalLoadBalancer(service) {
|
||||
@ -1285,7 +1247,7 @@ func validatePublicIP(t *testing.T, publicIP *network.PublicIPAddress, service *
|
||||
}
|
||||
// We cannot use service.Spec.LoadBalancerIP to compare with
|
||||
// Public IP's IPAddress
|
||||
// Becuase service properties are updated outside of cloudprovider code
|
||||
// Because service properties are updated outside of cloudprovider code
|
||||
}
|
||||
|
||||
func contains(ruleValues []string, targetValue string) bool {
|
||||
@ -1607,19 +1569,57 @@ func validateEmptyConfig(t *testing.T, config string) {
|
||||
if azureCloud.CloudProviderBackoff != false {
|
||||
t.Errorf("got incorrect value for CloudProviderBackoff")
|
||||
}
|
||||
|
||||
// rate limits should be disabled by default if not explicitly enabled in config
|
||||
if azureCloud.CloudProviderRateLimit != false {
|
||||
t.Errorf("got incorrect value for CloudProviderRateLimit")
|
||||
}
|
||||
}
|
||||
func TestGetZone(t *testing.T) {
|
||||
data := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, data)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
cloud := &Cloud{}
|
||||
cloud.Location = "eastus"
|
||||
|
||||
zone, err := cloud.getZoneFromURL(ts.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if zone.FailureDomain != "99" {
|
||||
t.Errorf("Unexpected value: %s, expected '99'", zone.FailureDomain)
|
||||
}
|
||||
if zone.Region != cloud.Location {
|
||||
t.Errorf("Expected: %s, saw: %s", cloud.Location, zone.Region)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchFaultDomain(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, `{"ID":"_azdev","UD":"0","FD":"99"}`)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
faultDomain, err := fetchFaultDomain(ts.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if faultDomain == nil {
|
||||
t.Errorf("Unexpected nil fault domain")
|
||||
}
|
||||
if *faultDomain != "99" {
|
||||
t.Errorf("Expected '99', saw '%s'", *faultDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeInstanceInfo(t *testing.T) {
|
||||
response := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
|
||||
faultDomain, err := readFaultDomain(strings.NewReader(response))
|
||||
if err != nil {
|
||||
t.Error("Unexpected error in ReadFaultDomain")
|
||||
t.Errorf("Unexpected error in ReadFaultDomain: %v", err)
|
||||
}
|
||||
|
||||
if faultDomain == nil {
|
||||
@ -1631,7 +1631,8 @@ func TestDecodeInstanceInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitProviderID(t *testing.T) {
|
||||
func TestGetNodeNameByProviderID(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
providers := []struct {
|
||||
providerID string
|
||||
name types.NodeName
|
||||
@ -1666,7 +1667,7 @@ func TestSplitProviderID(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range providers {
|
||||
name, err := splitProviderID(test.providerID)
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(test.providerID)
|
||||
if (err != nil) != test.fail {
|
||||
t.Errorf("Expected to failt=%t, with pattern %v", test.fail, test)
|
||||
}
|
||||
@ -1717,7 +1718,7 @@ func TestMetadataParsing(t *testing.T) {
|
||||
"macAddress": "002248020E1E"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
network := NetworkMetadata{}
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_test.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_test.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetVmssInstanceID(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
machineName string
|
||||
expectError bool
|
||||
expectedInstanceID string
|
||||
}{{
|
||||
msg: "invalid vmss instance name",
|
||||
machineName: "vmvm",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
msg: "valid vmss instance name",
|
||||
machineName: "vm00000Z",
|
||||
expectError: false,
|
||||
expectedInstanceID: "35",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
instanceID, err := getVmssInstanceID(test.machineName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
} else {
|
||||
assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
}
|
102
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go
generated
vendored
102
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go
generated
vendored
@ -1,102 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func (az *Cloud) getIPForVmssMachine(nodeName types.NodeName) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err := az.getVmssVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), az.getVmssVirtualMachine(%s), err=%v", nodeName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicID, err := getPrimaryInterfaceIDForVmssMachine(machine)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(nicID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(az.ResourceGroup, az.Config.PrimaryScaleSetName, *machine.InstanceID, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), az.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func getPrimaryInterfaceIDForVmssMachine(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
// machineName is composed of computerNamePrefix and 36-based instanceID.
|
||||
// And instanceID part if in fixed length of 6 characters.
|
||||
// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
|
||||
func getVmssInstanceID(machineName string) (string, error) {
|
||||
nameLength := len(machineName)
|
||||
if nameLength < 6 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
|
||||
if err != nil {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
71
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// VMSet defines functions all vmsets (including scale set and availability
|
||||
// set) should be implemented.
|
||||
type VMSet interface {
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
GetInstanceIDByNodeName(name string) (string, error)
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
GetInstanceTypeByNodeName(name string) (string, error)
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
GetIPByNodeName(name, vmSetName string) (string, error)
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error)
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
GetNodeNameByProviderID(providerID string) (types.NodeName, error)
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
GetZoneByNodeName(name string) (cloudprovider.Zone, error)
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
GetPrimaryVMSetName() string
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string) error
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error)
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
}
|
764
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
Normal file
764
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
Normal file
@ -0,0 +1,764 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
|
||||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
|
||||
)
|
||||
|
||||
// scaleSet implements VMSet interface for Azure scale set.
|
||||
type scaleSet struct {
|
||||
*Cloud
|
||||
|
||||
// availabilitySet is also required for scaleSet because some instances
|
||||
// (e.g. master nodes) may not belong to any scale sets.
|
||||
availabilitySet VMSet
|
||||
|
||||
vmssCache *timedCache
|
||||
vmssVMCache *timedCache
|
||||
nodeNameToScaleSetMappingCache *timedCache
|
||||
availabilitySetNodesCache *timedCache
|
||||
}
|
||||
|
||||
// newScaleSet creates a new scaleSet.
|
||||
func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
var err error
|
||||
ss := &scaleSet{
|
||||
Cloud: az,
|
||||
availabilitySet: newAvailabilitySet(az),
|
||||
}
|
||||
|
||||
ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssCache, err = ss.newVmssCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssVMCache, err = ss.newVmssVMCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
instanceID, err = getScaleSetVMInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
ssName, err = ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
return "", "", vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(ss.makeVmssVMName(ssName, instanceID))
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("Can't find node (%q) in any scale sets", nodeName)
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm computepreview.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("cound't find vmss virtual machine by scaleSetName (%q) and instanceID (%q)", scaleSetName, instanceID)
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return *(cachedVM.(*computepreview.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetInstanceIDByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return *vm.ID, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is not part of providerID for vmss instances.
|
||||
scaleSetName, err := extractScaleSetNameByExternalID(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
vm, err := ss.getVmssVMByInstanceID(scaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
|
||||
nodeName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
return types.NodeName(nodeName), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vm.Sku != nil && vm.Sku.Name != nil {
|
||||
return *vm.Sku.Name, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetZoneByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)),
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
func (ss *scaleSet) GetPrimaryVMSetName() string {
|
||||
return ss.Config.PrimaryScaleSetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine IP by node name.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName, vmSetName string) (string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName, vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q, %q), err=%v", nodeName, nodeName, vmSetName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine computepreview.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
// machineName is composed of computerNamePrefix and 36-based instanceID.
|
||||
// And instanceID part if in fixed length of 6 characters.
|
||||
// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
|
||||
func getScaleSetVMInstanceID(machineName string) (string, error) {
|
||||
nameLength := len(machineName)
|
||||
if nameLength < 6 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
|
||||
if err != nil {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByExternalID extracts the scaleset name by node's externalID.
|
||||
func extractScaleSetNameByExternalID(externalID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(externalID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// listScaleSets lists all scale sets.
|
||||
func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, ss.ResourceGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ssNames := make([]string, len(allScaleSets))
|
||||
for i := range allScaleSets {
|
||||
ssNames[i] = *(allScaleSets[i].Name)
|
||||
}
|
||||
|
||||
return ssNames, nil
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]computepreview.VirtualMachineScaleSetVM, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(computepreview.InstanceView))
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allVMs, nil
|
||||
}
|
||||
|
||||
// getAgentPoolScaleSets lists the virtual machines for the resource group and then builds
|
||||
// a list of scale sets that match the nodes available to k8s.
|
||||
func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
agentPoolScaleSets := &[]string{}
|
||||
for nx := range nodes {
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
*agentPoolScaleSets = append(*agentPoolScaleSets, ssName)
|
||||
}
|
||||
|
||||
return agentPoolScaleSets, nil
|
||||
}
|
||||
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer. If the service has
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryScaleSetName.
|
||||
scaleSetNames := &[]string{ss.Config.PrimaryScaleSetName}
|
||||
return scaleSetNames, nil
|
||||
}
|
||||
|
||||
scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*scaleSetNames) == 0 {
|
||||
glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*scaleSetNames)
|
||||
|
||||
if !isAuto {
|
||||
if serviceVMSetNames == nil || len(serviceVMSetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate scale set exists
|
||||
var found bool
|
||||
for sasx := range serviceVMSetNames {
|
||||
for asx := range *scaleSetNames {
|
||||
if strings.EqualFold((*scaleSetNames)[asx], serviceVMSetNames[sasx]) {
|
||||
found = true
|
||||
serviceVMSetNames[sasx] = (*scaleSetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
|
||||
return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx])
|
||||
}
|
||||
}
|
||||
vmSetNames = &serviceVMSetNames
|
||||
}
|
||||
|
||||
return vmSetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName, "")
|
||||
}
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check scale set name.
|
||||
if vmSetName != "" && !strings.EqualFold(ssName, vmSetName) {
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(primaryInterfaceID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Fix interface's location, which is required when updating the interface.
|
||||
// TODO: is this a bug of azure SDK?
|
||||
if nic.Location == nil || *nic.Location == "" {
|
||||
nic.Location = vm.Location
|
||||
}
|
||||
|
||||
return nic, nil
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (computepreview.VirtualMachineScaleSet, bool, error) {
|
||||
var result computepreview.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
cached, retryErr := ss.vmssCache.Get(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Info("backoff: success for scale set %q", name)
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
result = *(cached.(*computepreview.VirtualMachineScaleSet))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return result, exists, err
|
||||
}
|
||||
|
||||
// getPrimaryNetworkConfiguration gets primary network interface configuration for scale sets.
|
||||
func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetNetworkConfiguration, error) {
|
||||
networkConfigurations := *networkConfigurationList
|
||||
if len(networkConfigurations) == 1 {
|
||||
return &networkConfigurations[0], nil
|
||||
}
|
||||
|
||||
for idx := range networkConfigurations {
|
||||
networkConfig := &networkConfigurations[idx]
|
||||
if networkConfig.Primary != nil && *networkConfig.Primary == true {
|
||||
return networkConfig, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *computepreview.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*computepreview.VirtualMachineScaleSetIPConfiguration, error) {
|
||||
ipConfigurations := *config.IPConfigurations
|
||||
if len(ipConfigurations) == 1 {
|
||||
return &ipConfigurations[0], nil
|
||||
}
|
||||
|
||||
for idx := range ipConfigurations {
|
||||
ipConfig := &ipConfigurations[idx]
|
||||
if ipConfig.Primary != nil && *ipConfig.Primary == true {
|
||||
return ipConfig, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet computepreview.VirtualMachineScaleSet) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string) error {
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
errorMessage := fmt.Errorf("Scale set %q not found", vmSetName)
|
||||
glog.Errorf("%v", errorMessage)
|
||||
return errorMessage
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update primary IP configuration's LoadBalancerBackendAddressPools.
|
||||
foundPool := false
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
newBackendPools = append(newBackendPools,
|
||||
computepreview.SubResource{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName)
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Construct instanceIDs from nodes.
|
||||
instanceIDs := []string{}
|
||||
for _, curNode := range nodes {
|
||||
curScaleSetName, err := extractScaleSetNameByExternalID(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, omitting it", curNode.Name)
|
||||
continue
|
||||
}
|
||||
if curScaleSetName != vmSetName {
|
||||
glog.V(4).Infof("Node %q is not belonging to scale set %q, omitting it", curNode.Name, vmSetName)
|
||||
continue
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get last segment from %q: %v", curNode.Spec.ExternalID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
instanceIDs = append(instanceIDs, instanceID)
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string) error {
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.EnsureBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, vmSetName, vmSetName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("ss.EnsureBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, vmSetName, vmSetName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration.
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools == nil || len(*primaryIPConfiguration.LoadBalancerBackendAddressPools) == 0 {
|
||||
return nil
|
||||
}
|
||||
existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
newBackendPools := []computepreview.SubResource{}
|
||||
foundPool := false
|
||||
for i := len(existingBackendPools) - 1; i >= 0; i-- {
|
||||
curPool := existingBackendPools[i]
|
||||
if strings.EqualFold(poolID, *curPool.ID) {
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, vmSetName)
|
||||
foundPool = true
|
||||
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
// Pool not found, assume it has been already removed.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update scale set with backoff.
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", vmSetName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
instanceIDs := []string{"*"}
|
||||
vmInstanceIDs := computepreview.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update virtualMachineScaleSet again. This is a workaround for removing VMSS reference from LB.
|
||||
// TODO: remove this workaround when figuring out the root cause.
|
||||
if len(newBackendPools) == 0 {
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", vmSetName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", vmSetName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf(
|
||||
vmssMachineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
scaleSetName,
|
||||
instanceID)
|
||||
}
|
197
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
Normal file
197
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
var (
|
||||
vmssNameSeparator = "_"
|
||||
|
||||
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
vmssCacheTTL = time.Minute
|
||||
vmssVMCacheTTL = time.Minute
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 15 * time.Minute
|
||||
)
|
||||
|
||||
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
|
||||
// The map is required because vmss nodeName is not equal to its vmName.
|
||||
type nodeNameToScaleSetMapping map[string]string
|
||||
|
||||
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
|
||||
}
|
||||
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
ret := strings.Split(name, vmssNameSeparator)
|
||||
if len(ret) != 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return ret[0], ret[1], nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
scaleSetNames, err := ss.listScaleSets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
}
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := sets.NewString()
|
||||
for _, vm := range vmList {
|
||||
localCache.Insert(*vm.Name)
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// vmssVM name's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not found, the VM doesn't belong to any known scale sets.
|
||||
if ssName == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssVMCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) {
|
||||
getScaleSetName := func(nodeName string) (string, error) {
|
||||
nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
realMapping := nodeNameMapping.(nodeNameToScaleSetMapping)
|
||||
if ssName, ok := realMapping[nodeName]; ok {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ssName, err := getScaleSetName(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ssName != "" {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
// ssName is still not found, it is likely that new Nodes are created.
|
||||
// Force refresh the cache and try again.
|
||||
ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey)
|
||||
return getScaleSetName(nodeName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
|
||||
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availabilitySetNodes := cached.(sets.String)
|
||||
return availabilitySetNodes.Has(nodeName), nil
|
||||
}
|
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtractVmssVMName(t *testing.T) {
|
||||
cases := []struct {
|
||||
description string
|
||||
vmName string
|
||||
expectError bool
|
||||
expectedScaleSet string
|
||||
expectedInstanceID string
|
||||
}{
|
||||
{
|
||||
description: "wrong vmss VM name should report error",
|
||||
vmName: "vm1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "wrong VM name separator should report error",
|
||||
vmName: "vm-1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "correct vmss VM name should return correct scaleSet and instanceID",
|
||||
vmName: "vm_1234",
|
||||
expectedScaleSet: "vm",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ssName, instanceID, err := extractVmssVMName(c.vmName)
|
||||
if c.expectError {
|
||||
assert.Error(t, err, c.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, c.expectedScaleSet, ssName, c.description)
|
||||
assert.Equal(t, c.expectedInstanceID, instanceID, c.description)
|
||||
}
|
||||
}
|
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
computepreview "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, vmList)
|
||||
ss, err := newScaleSet(cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss.(*scaleSet), nil
|
||||
}
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
|
||||
scaleSets := make(map[string]map[string]computepreview.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]computepreview.VirtualMachineScaleSet{
|
||||
scaleSetName: {
|
||||
Name: &scaleSetName,
|
||||
},
|
||||
}
|
||||
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
|
||||
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
ssVMs := make(map[string]map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]computepreview.VirtualMachineScaleSetVM)
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
networkInterfaces := []computepreview.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmName] = computepreview.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &computepreview.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &computepreview.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
},
|
||||
NetworkProfile: &computepreview.NetworkProfile{
|
||||
NetworkInterfaces: &networkInterfaces,
|
||||
},
|
||||
},
|
||||
ID: &ID,
|
||||
InstanceID: &instanceID,
|
||||
Name: &vmName,
|
||||
Location: &ss.Location,
|
||||
}
|
||||
}
|
||||
virtualMachineScaleSetVMsClient.setFakeStore(ssVMs)
|
||||
|
||||
ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
}
|
||||
|
||||
func TestGetScaleSetVMInstanceID(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
machineName string
|
||||
expectError bool
|
||||
expectedInstanceID string
|
||||
}{{
|
||||
msg: "invalid vmss instance name",
|
||||
machineName: "vmvm",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
msg: "valid vmss instance name",
|
||||
machineName: "vm00000Z",
|
||||
expectError: false,
|
||||
expectedInstanceID: "35",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
instanceID, err := getScaleSetVMInstanceID(test.machineName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
} else {
|
||||
assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInstanceIDByNodeName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
nodeName string
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "scaleSet should get instance by node name",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000001",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/1",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should get instance by node name with upper cases hostname",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"VMSSEE6C2000000", "VMSSEE6C2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should not get instance for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, real, test.description)
|
||||
}
|
||||
}
|
257
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
257
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
@ -17,19 +17,23 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
vmCacheTTL = time.Minute
|
||||
lbCacheTTL = 2 * time.Minute
|
||||
nsgCacheTTL = 2 * time.Minute
|
||||
rtCacheTTL = 2 * time.Minute
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
@ -62,139 +66,44 @@ func ignoreStatusNotFoundFromError(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
|
||||
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm
|
||||
/// resource request in short period.
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) {
|
||||
vmName := string(nodeName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%s): start", vmName)
|
||||
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return vm, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return vm, false, nil
|
||||
}
|
||||
|
||||
return vm, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getVmssVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
vmName := string(nodeName)
|
||||
instanceID, err := getVmssInstanceID(vmName)
|
||||
cachedVM, err := az.vmCache.Get(vmName)
|
||||
if err != nil {
|
||||
return vm, false, err
|
||||
return vm, err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): start", vmName)
|
||||
vm, err = az.VirtualMachineScaleSetVMsClient.Get(az.ResourceGroup, az.PrimaryScaleSetName, instanceID)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): end", vmName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return vm, false, realErr
|
||||
if cachedVM == nil {
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return vm, false, nil
|
||||
}
|
||||
|
||||
return vm, exists, err
|
||||
return *(cachedVM.(*compute.VirtualMachine)), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%s): start", az.RouteTableName)
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%s): end", az.RouteTableName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return routeTable, false, realErr
|
||||
cachedRt, err := az.rtCache.Get(az.RouteTableName)
|
||||
if err != nil {
|
||||
return routeTable, false, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if cachedRt == nil {
|
||||
return routeTable, false, nil
|
||||
}
|
||||
|
||||
return routeTable, exists, err
|
||||
return *(cachedRt.(*network.RouteTable)), true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err error) {
|
||||
func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pip network.PublicIPAddress, exists bool, err error) {
|
||||
resourceGroup := az.ResourceGroup
|
||||
if pipResourceGroup != "" {
|
||||
resourceGroup = pipResourceGroup
|
||||
}
|
||||
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%s): start", az.SecurityGroupName)
|
||||
sg, err = az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%s): end", az.SecurityGroupName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return sg, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return sg, false, nil
|
||||
}
|
||||
|
||||
return sg, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
var realErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Get(%s): start", name)
|
||||
lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "")
|
||||
glog.V(10).Infof("LoadBalancerClient.Get(%s): end", name)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return lb, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return lb, false, nil
|
||||
}
|
||||
|
||||
return lb, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResult, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%s): start", az.ResourceGroup)
|
||||
lbListResult, err = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%s): end", az.ResourceGroup)
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return lbListResult, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return lbListResult, false, nil
|
||||
}
|
||||
|
||||
return lbListResult, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%s): start", name)
|
||||
pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, name, "")
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%s): end", name)
|
||||
|
||||
pip, err = az.PublicIPAddressesClient.Get(resourceGroup, pipName, "")
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return pip, false, realErr
|
||||
@ -217,11 +126,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
rg = az.ResourceGroup
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SubnetsClient.Get(%s): start", subnetName)
|
||||
subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "")
|
||||
glog.V(10).Infof("SubnetsClient.Get(%s): end", subnetName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return subnet, false, realErr
|
||||
@ -233,3 +138,107 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
|
||||
|
||||
return subnet, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
cachedLB, err := az.lbCache.Get(name)
|
||||
if err != nil {
|
||||
return lb, false, err
|
||||
}
|
||||
|
||||
if cachedLB == nil {
|
||||
return lb, false, nil
|
||||
}
|
||||
|
||||
return *(cachedLB.(*network.LoadBalancer)), true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) {
|
||||
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName)
|
||||
if err != nil {
|
||||
return nsg, err
|
||||
}
|
||||
|
||||
if securityGroup == nil {
|
||||
return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName)
|
||||
}
|
||||
|
||||
return *(securityGroup.(*network.SecurityGroup)), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView
|
||||
// request. If we first send an InstanceView request and then a non InstanceView request, the second
|
||||
// request will still hit throttling. This is what happens now for cloud controller manager: In this
|
||||
// case we do get instance view every time to fulfill the azure_zones requirement without hitting
|
||||
// throttling.
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, key, compute.InstanceView)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &vm, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
lb, err := az.LoadBalancerClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &lb, nil
|
||||
}
|
||||
|
||||
return newTimedcache(lbCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
nsg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &nsg, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nsgCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
rt, err := az.RouteTablesClient.Get(az.ResourceGroup, key, "")
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &rt, nil
|
||||
}
|
||||
|
||||
return newTimedcache(rtCacheTTL, getter)
|
||||
}
|
||||
|
41
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
@ -17,17 +17,15 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
)
|
||||
|
||||
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
|
||||
@ -42,12 +40,17 @@ type instanceInfo struct {
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
return az.getZoneFromURL(instanceInfoURL)
|
||||
}
|
||||
|
||||
// This is injectable for testing.
|
||||
func (az *Cloud) getZoneFromURL(url string) (cloudprovider.Zone, error) {
|
||||
faultMutex.Lock()
|
||||
defer faultMutex.Unlock()
|
||||
if faultDomain == nil {
|
||||
var err error
|
||||
faultDomain, err = fetchFaultDomain()
|
||||
faultDomain, err = fetchFaultDomain(url)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
@ -62,36 +65,24 @@ func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
nodeName, err := splitProviderID(providerID)
|
||||
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
return az.GetZoneByNodeName(nodeName)
|
||||
|
||||
return az.GetZoneByNodeName(ctx, nodeName)
|
||||
}
|
||||
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
|
||||
vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, string(nodeName), compute.InstanceView)
|
||||
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
}
|
||||
return zone, nil
|
||||
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
return az.vmSet.GetZoneByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
func fetchFaultDomain() (*string, error) {
|
||||
resp, err := http.Get(instanceInfoURL)
|
||||
func fetchFaultDomain(url string) (*string, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
74
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD
generated
vendored
74
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD
generated
vendored
@ -13,11 +13,40 @@ go_library(
|
||||
"cloudstack_instances.go",
|
||||
"cloudstack_loadbalancer.go",
|
||||
"metadata.go",
|
||||
"metadata_other.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"metadata_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"metadata_other.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack",
|
||||
@ -25,21 +54,54 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/d2g/dhcp4:go_default_library",
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/xanzy/go-cloudstack/cloudstack:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cloudstack_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -181,18 +182,13 @@ func (cs *CSCloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (cs *CSCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (cs *CSCloud) HasClusterID() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the region that the program is running in.
|
||||
func (cs *CSCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
func (cs *CSCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
if cs.zone == "" {
|
||||
@ -220,7 +216,7 @@ func (cs *CSCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
}
|
||||
|
||||
// GetZoneByProviderID returns the Zone, found by using the provider ID.
|
||||
func (cs *CSCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
func (cs *CSCloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
@ -242,7 +238,7 @@ func (cs *CSCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, e
|
||||
}
|
||||
|
||||
// GetZoneByNodeName returns the Zone, found by using the node name.
|
||||
func (cs *CSCloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
func (cs *CSCloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@ -28,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (cs *CSCloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (cs *CSCloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
string(name),
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
@ -44,7 +45,7 @@ func (cs *CSCloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the addresses of the specified instance.
|
||||
func (cs *CSCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
func (cs *CSCloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
providerID,
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
@ -80,12 +81,12 @@ func (cs *CSCloud) nodeAddresses(instance *cloudstack.VirtualMachine) ([]v1.Node
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (cs *CSCloud) ExternalID(name types.NodeName) (string, error) {
|
||||
return cs.InstanceID(name)
|
||||
func (cs *CSCloud) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return cs.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (cs *CSCloud) InstanceID(name types.NodeName) (string, error) {
|
||||
func (cs *CSCloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
string(name),
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
@ -101,7 +102,7 @@ func (cs *CSCloud) InstanceID(name types.NodeName) (string, error) {
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (cs *CSCloud) InstanceType(name types.NodeName) (string, error) {
|
||||
func (cs *CSCloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
string(name),
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
@ -117,7 +118,7 @@ func (cs *CSCloud) InstanceType(name types.NodeName) (string, error) {
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
func (cs *CSCloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
func (cs *CSCloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
providerID,
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
@ -133,17 +134,17 @@ func (cs *CSCloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances is currently not implemented.
|
||||
func (cs *CSCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (cs *CSCloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return errors.New("AddSSHKeyToAllInstances not implemented")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
func (cs *CSCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func (cs *CSCloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns if the instance still exists.
|
||||
func (cs *CSCloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (cs *CSCloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
_, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
providerID,
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
@ -40,7 +41,7 @@ type loadBalancer struct {
|
||||
}
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and if so, what its status is.
|
||||
func (cs *CSCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
glog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
@ -63,7 +64,7 @@ func (cs *CSCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer creates a new load balancer, or updates the existing one. Returns the status of the balancer.
|
||||
func (cs *CSCloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (status *v1.LoadBalancerStatus, err error) {
|
||||
func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (status *v1.LoadBalancerStatus, err error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes)
|
||||
|
||||
if len(service.Spec.Ports) == 0 {
|
||||
@ -165,7 +166,7 @@ func (cs *CSCloud) EnsureLoadBalancer(clusterName string, service *v1.Service, n
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (cs *CSCloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
@ -211,7 +212,7 @@ func (cs *CSCloud) UpdateLoadBalancer(clusterName string, service *v1.Service, n
|
||||
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it exists, returning
|
||||
// nil if the load balancer specified either didn't exist or was successfully deleted.
|
||||
func (cs *CSCloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -107,7 +108,7 @@ func TestLoadBalancer(t *testing.T) {
|
||||
t.Fatalf("LoadBalancer() returned false")
|
||||
}
|
||||
|
||||
_, exists, err := lb.GetLoadBalancer(testClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "noexist"}})
|
||||
_, exists, err := lb.GetLoadBalancer(context.TODO(), testClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "noexist"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned error: %s", err)
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -46,7 +47,7 @@ const (
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (m *metadata) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (m *metadata) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
externalIP, err := m.get(metadataTypeExternalIP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get external IP: %v", err)
|
||||
@ -64,17 +65,17 @@ func (m *metadata) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the addresses of the specified instance.
|
||||
func (m *metadata) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
func (m *metadata) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
return nil, errors.New("NodeAddressesByProviderID not implemented")
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (m *metadata) ExternalID(name types.NodeName) (string, error) {
|
||||
return m.InstanceID(name)
|
||||
func (m *metadata) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
return m.InstanceID(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (m *metadata) InstanceID(name types.NodeName) (string, error) {
|
||||
func (m *metadata) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
instanceID, err := m.get(metadataTypeInstanceID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get instance ID: %v", err)
|
||||
@ -89,7 +90,7 @@ func (m *metadata) InstanceID(name types.NodeName) (string, error) {
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (m *metadata) InstanceType(name types.NodeName) (string, error) {
|
||||
func (m *metadata) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
instanceType, err := m.get(metadataTypeInstanceType)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get instance type: %v", err)
|
||||
@ -99,27 +100,27 @@ func (m *metadata) InstanceType(name types.NodeName) (string, error) {
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
func (m *metadata) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
func (m *metadata) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
return "", errors.New("InstanceTypeByProviderID not implemented")
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances is currently not implemented.
|
||||
func (m *metadata) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (m *metadata) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return errors.New("AddSSHKeyToAllInstances not implemented")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
func (m *metadata) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func (m *metadata) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns if the instance still exists.
|
||||
func (m *metadata) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (m *metadata) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, errors.New("InstanceExistsByProviderID not implemented")
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the region that the program is running in.
|
||||
func (m *metadata) GetZone() (cloudprovider.Zone, error) {
|
||||
func (m *metadata) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
if m.zone == "" {
|
||||
@ -139,12 +140,12 @@ func (m *metadata) GetZone() (cloudprovider.Zone, error) {
|
||||
}
|
||||
|
||||
// GetZoneByProviderID returns the Zone, found by using the provider ID.
|
||||
func (m *metadata) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
func (m *metadata) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, errors.New("GetZoneByProviderID not implemented")
|
||||
}
|
||||
|
||||
// GetZoneByNodeName returns the Zone, found by using the node name.
|
||||
func (m *metadata) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
func (m *metadata) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, errors.New("GetZoneByNodeName not implemented")
|
||||
}
|
||||
|
||||
@ -195,7 +196,7 @@ func findDHCPServer() (string, error) {
|
||||
|
||||
offerPacket, err := client.GetOffer(&discoverPacket)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error recieving DHCP offer package: %v", err)
|
||||
return "", fmt.Errorf("error receiving DHCP offer package: %v", err)
|
||||
}
|
||||
|
||||
offerPacketOptions := offerPacket.ParseOptions()
|
||||
|
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
@ -90,11 +91,11 @@ func (f *FakeCloud) ClearCalls() {
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (f *FakeCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
|
||||
func (f *FakeCloud) ListClusters() ([]string, error) {
|
||||
func (f *FakeCloud) ListClusters(ctx context.Context) ([]string, error) {
|
||||
return f.ClusterList, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Master(name string) (string, error) {
|
||||
func (f *FakeCloud) Master(ctx context.Context, name string) (string, error) {
|
||||
return f.MasterName, f.Err
|
||||
}
|
||||
|
||||
@ -110,11 +111,6 @@ func (f *FakeCloud) ProviderName() string {
|
||||
return f.Provider
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (f *FakeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (f *FakeCloud) HasClusterID() bool {
|
||||
return true
|
||||
@ -142,7 +138,7 @@ func (f *FakeCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
}
|
||||
|
||||
// GetLoadBalancer is a stub implementation of LoadBalancer.GetLoadBalancer.
|
||||
func (f *FakeCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
func (f *FakeCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: f.ExternalIP.String()}}
|
||||
|
||||
@ -151,7 +147,7 @@ func (f *FakeCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v
|
||||
|
||||
// EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer.
|
||||
// It adds an entry "create" into the internal method call record.
|
||||
func (f *FakeCloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
func (f *FakeCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
f.addCall("create")
|
||||
if f.Balancers == nil {
|
||||
f.Balancers = make(map[string]FakeBalancer)
|
||||
@ -160,7 +156,7 @@ func (f *FakeCloud) EnsureLoadBalancer(clusterName string, service *v1.Service,
|
||||
name := cloudprovider.GetLoadBalancerName(service)
|
||||
spec := service.Spec
|
||||
|
||||
zone, err := f.GetZone()
|
||||
zone, err := f.GetZone(context.TODO())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -176,7 +172,7 @@ func (f *FakeCloud) EnsureLoadBalancer(clusterName string, service *v1.Service,
|
||||
|
||||
// UpdateLoadBalancer is a test-spy implementation of LoadBalancer.UpdateLoadBalancer.
|
||||
// It adds an entry "update" into the internal method call record.
|
||||
func (f *FakeCloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
func (f *FakeCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
f.addCall("update")
|
||||
f.UpdateCalls = append(f.UpdateCalls, FakeUpdateBalancerCall{service, nodes})
|
||||
return f.Err
|
||||
@ -184,30 +180,30 @@ func (f *FakeCloud) UpdateLoadBalancer(clusterName string, service *v1.Service,
|
||||
|
||||
// EnsureLoadBalancerDeleted is a test-spy implementation of LoadBalancer.EnsureLoadBalancerDeleted.
|
||||
// It adds an entry "delete" into the internal method call record.
|
||||
func (f *FakeCloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
func (f *FakeCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
f.addCall("delete")
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (f *FakeCloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (f *FakeCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func (f *FakeCloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// NodeAddresses is a test-spy implementation of Instances.NodeAddresses.
|
||||
// It adds an entry "node-addresses" into the internal method call record.
|
||||
func (f *FakeCloud) NodeAddresses(instance types.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (f *FakeCloud) NodeAddresses(ctx context.Context, instance types.NodeName) ([]v1.NodeAddress, error) {
|
||||
f.addCall("node-addresses")
|
||||
return f.Addresses, f.Err
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID is a test-spy implementation of Instances.NodeAddressesByProviderID.
|
||||
// It adds an entry "node-addresses-by-provider-id" into the internal method call record.
|
||||
func (f *FakeCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
func (f *FakeCloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
f.addCall("node-addresses-by-provider-id")
|
||||
return f.Addresses, f.Err
|
||||
}
|
||||
@ -215,32 +211,32 @@ func (f *FakeCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddre
|
||||
// ExternalID is a test-spy implementation of Instances.ExternalID.
|
||||
// It adds an entry "external-id" into the internal method call record.
|
||||
// It returns an external id to the mapped instance name, if not found, it will return "ext-{instance}"
|
||||
func (f *FakeCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
func (f *FakeCloud) ExternalID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
f.addCall("external-id")
|
||||
return f.ExtID[nodeName], f.Err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (f *FakeCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
func (f *FakeCloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
||||
f.addCall("instance-id")
|
||||
return f.ExtID[nodeName], nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (f *FakeCloud) InstanceType(instance types.NodeName) (string, error) {
|
||||
func (f *FakeCloud) InstanceType(ctx context.Context, instance types.NodeName) (string, error) {
|
||||
f.addCall("instance-type")
|
||||
return f.InstanceTypes[instance], nil
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
func (f *FakeCloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
func (f *FakeCloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
f.addCall("instance-type-by-provider-id")
|
||||
return f.InstanceTypes[types.NodeName(providerID)], nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (f *FakeCloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (f *FakeCloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
f.addCall("instance-exists-by-provider-id")
|
||||
return f.ExistsByProviderID, f.ErrByProviderID
|
||||
}
|
||||
@ -258,7 +254,7 @@ func (f *FakeCloud) List(filter string) ([]types.NodeName, error) {
|
||||
return result, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
func (f *FakeCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
f.addCall("get-zone")
|
||||
return f.Zone, f.Err
|
||||
}
|
||||
@ -266,7 +262,7 @@ func (f *FakeCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (f *FakeCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
func (f *FakeCloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
f.addCall("get-zone-by-provider-id")
|
||||
return f.Zone, f.Err
|
||||
}
|
||||
@ -274,12 +270,12 @@ func (f *FakeCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone,
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (f *FakeCloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
func (f *FakeCloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
f.addCall("get-zone-by-node-name")
|
||||
return f.Zone, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
func (f *FakeCloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("list-routes")
|
||||
@ -293,7 +289,7 @@ func (f *FakeCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, erro
|
||||
return routes, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
func (f *FakeCloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("create-route")
|
||||
@ -310,7 +306,7 @@ func (f *FakeCloud) CreateRoute(clusterName string, nameHint string, route *clou
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeCloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
func (f *FakeCloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("delete-route")
|
||||
@ -323,7 +319,7 @@ func (f *FakeCloud) DeleteRoute(clusterName string, route *cloudprovider.Route)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *FakeCloud) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
func (c *FakeCloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
if val, ok := c.VolumeLabelMap[pv.Name]; ok {
|
||||
return val, nil
|
||||
}
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
@ -23,7 +23,6 @@ go_library(
|
||||
"gce_disks.go",
|
||||
"gce_firewall.go",
|
||||
"gce_forwardingrule.go",
|
||||
"gce_forwardingrule_fakes.go",
|
||||
"gce_healthchecks.go",
|
||||
"gce_instancegroup.go",
|
||||
"gce_instances.go",
|
||||
@ -37,17 +36,23 @@ go_library(
|
||||
"gce_routes.go",
|
||||
"gce_targetpool.go",
|
||||
"gce_targetproxy.go",
|
||||
"gce_tpu.go",
|
||||
"gce_urlmap.go",
|
||||
"gce_util.go",
|
||||
"gce_zones.go",
|
||||
"metrics.go",
|
||||
"support.go",
|
||||
"token_source.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
@ -65,6 +70,7 @@ go_library(
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/container/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/google.golang.org/api/tpu/v1alpha1:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -76,6 +82,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
@ -98,10 +105,11 @@ go_test(
|
||||
"gce_util_test.go",
|
||||
"metrics_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
@ -112,6 +120,7 @@ go_test(
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -125,6 +134,9 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/gce/cloud:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
64
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"doc.go",
|
||||
"gce_projects.go",
|
||||
"gen.go",
|
||||
"op.go",
|
||||
"project.go",
|
||||
"ratelimit.go",
|
||||
"service.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"gen_test.go",
|
||||
"mock_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/filter:all-srcs",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/gen:all-srcs",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:all-srcs",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/mock:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
55
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/constants.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/constants.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetworkTier represents the Network Service Tier used by a resource
|
||||
type NetworkTier string
|
||||
|
||||
// LbScheme represents the possible types of load balancers
|
||||
type LbScheme string
|
||||
|
||||
const (
|
||||
NetworkTierStandard NetworkTier = "Standard"
|
||||
NetworkTierPremium NetworkTier = "Premium"
|
||||
NetworkTierDefault NetworkTier = NetworkTierPremium
|
||||
|
||||
SchemeExternal LbScheme = "EXTERNAL"
|
||||
SchemeInternal LbScheme = "INTERNAL"
|
||||
)
|
||||
|
||||
// ToGCEValue converts NetworkTier to a string that we can populate the
|
||||
// NetworkTier field of GCE objects, including ForwardingRules and Addresses.
|
||||
func (n NetworkTier) ToGCEValue() string {
|
||||
return strings.ToUpper(string(n))
|
||||
}
|
||||
|
||||
// NetworkTierGCEValueToType converts the value of the NetworkTier field of a
|
||||
// GCE object to the NetworkTier type.
|
||||
func NetworkTierGCEValueToType(s string) NetworkTier {
|
||||
switch s {
|
||||
case NetworkTierStandard.ToGCEValue():
|
||||
return NetworkTierStandard
|
||||
case NetworkTierPremium.ToGCEValue():
|
||||
return NetworkTierPremium
|
||||
default:
|
||||
return NetworkTier(s)
|
||||
}
|
||||
}
|
112
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/doc.go
generated
vendored
Normal file
112
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/doc.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cloud implements a more golang friendly interface to the GCE compute
|
||||
// API. The code in this package is generated automatically via the generator
|
||||
// implemented in "gen/main.go". The code generator creates the basic CRUD
|
||||
// actions for the given resource: "Insert", "Get", "List" and "Delete".
|
||||
// Additional methods by customizing the ServiceInfo object (see below).
|
||||
// Generated code includes a full mock of the GCE compute API.
|
||||
//
|
||||
// Usage
|
||||
//
|
||||
// The root of the GCE compute API is the interface "Cloud". Code written using
|
||||
// Cloud can be used against the actual implementation "GCE" or "MockGCE".
|
||||
//
|
||||
// func foo(cloud Cloud) {
|
||||
// igs, err := cloud.InstanceGroups().List(ctx, "us-central1-b", filter.None)
|
||||
// ...
|
||||
// }
|
||||
// // Run foo against the actual cloud.
|
||||
// foo(NewGCE(&Service{...}))
|
||||
// // Run foo with a mock.
|
||||
// foo(NewMockGCE())
|
||||
//
|
||||
// Rate limiting and routing
|
||||
//
|
||||
// The generated code allows for custom policies for operation rate limiting
|
||||
// and GCE project routing. See RateLimiter and ProjectRouter for more details.
|
||||
//
|
||||
// Mocks
|
||||
//
|
||||
// Mocks are automatically generated for each type implementing basic logic for
|
||||
// resource manipulation. This eliminates the boilerplate required to mock GCE
|
||||
// functionality. Each method will also have a corresponding "xxxHook"
|
||||
// function generated in the mock structure where unit test code can hook the
|
||||
// execution of the method.
|
||||
//
|
||||
// Mocks for different versions of the same service will share the same set of
|
||||
// objects, i.e. an alpha object will be visible with beta and GA methods.
|
||||
// Note that translation is done with JSON serialization between the API versions.
|
||||
//
|
||||
// Changing service code generation
|
||||
//
|
||||
// The list of services to generate is contained in "meta/meta.go". To add a
|
||||
// service, add an entry to the list "meta.AllServices". An example entry:
|
||||
//
|
||||
// &ServiceInfo{
|
||||
// Object: "InstanceGroup", // Name of the object type.
|
||||
// Service: "InstanceGroups", // Name of the service.
|
||||
// Resource: "instanceGroups", // Lowercase resource name (as appears in the URL).
|
||||
// version: meta.VersionAlpha, // API version (one entry per version is needed).
|
||||
// keyType: Zonal, // What kind of resource this is.
|
||||
// serviceType: reflect.TypeOf(&alpha.InstanceGroupsService{}), // Associated golang type.
|
||||
// additionalMethods: []string{ // Additional methods to generate code for.
|
||||
// "SetNamedPorts",
|
||||
// },
|
||||
// options: <options> // Or'd ("|") together.
|
||||
// }
|
||||
//
|
||||
// Read-only objects
|
||||
//
|
||||
// Services such as Regions and Zones do not allow for mutations. Specify
|
||||
// "ReadOnly" in ServiceInfo.options to omit the mutation methods.
|
||||
//
|
||||
// Adding custom methods
|
||||
//
|
||||
// Some methods that may not be properly handled by the generated code. To enable
|
||||
// addition of custom code to the generated mocks, set the "CustomOps" option
|
||||
// in "meta.ServiceInfo" entry. This will make the generated service interface
|
||||
// embed a "<ServiceName>Ops" interface. This interface MUST be written by hand
|
||||
// and contain the custom method logic. Corresponding methods must be added to
|
||||
// the corresponding Mockxxx and GCExxx struct types.
|
||||
//
|
||||
// // In "meta/meta.go":
|
||||
// &ServiceInfo{
|
||||
// Object: "InstanceGroup",
|
||||
// ...
|
||||
// options: CustomOps,
|
||||
// }
|
||||
//
|
||||
// // In the generated code "gen.go":
|
||||
// type InstanceGroups interface {
|
||||
// InstanceGroupsOps // Added by CustomOps option.
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// // In hand written file:
|
||||
// type InstanceGroupsOps interface {
|
||||
// MyMethod()
|
||||
// }
|
||||
//
|
||||
// func (mock *MockInstanceGroups) MyMethod() {
|
||||
// // Custom mock implementation.
|
||||
// }
|
||||
//
|
||||
// func (gce *GCEInstanceGroups) MyMethod() {
|
||||
// // Custom implementation.
|
||||
// }
|
||||
package cloud
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["filter.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["filter_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
303
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go
generated
vendored
Normal file
303
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package filter encapsulates the filter argument to compute API calls.
|
||||
//
|
||||
// // List all global addresses (no filter).
|
||||
// c.GlobalAddresses().List(ctx, filter.None)
|
||||
//
|
||||
// // List global addresses filtering for name matching "abc.*".
|
||||
// c.GlobalAddresses().List(ctx, filter.Regexp("name", "abc.*"))
|
||||
//
|
||||
// // List on multiple conditions.
|
||||
// f := filter.Regexp("name", "homer.*").AndNotRegexp("name", "homers")
|
||||
// c.GlobalAddresses().List(ctx, f)
|
||||
package filter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
// None indicates that the List result set should not be filter (i.e.
|
||||
// return all values).
|
||||
None *F
|
||||
)
|
||||
|
||||
// Regexp returns a filter for fieldName matches regexp v.
|
||||
func Regexp(fieldName, v string) *F {
|
||||
return (&F{}).AndRegexp(fieldName, v)
|
||||
}
|
||||
|
||||
// NotRegexp returns a filter for fieldName not matches regexp v.
|
||||
func NotRegexp(fieldName, v string) *F {
|
||||
return (&F{}).AndNotRegexp(fieldName, v)
|
||||
}
|
||||
|
||||
// EqualInt returns a filter for fieldName ~ v.
|
||||
func EqualInt(fieldName string, v int) *F {
|
||||
return (&F{}).AndEqualInt(fieldName, v)
|
||||
}
|
||||
|
||||
// NotEqualInt returns a filter for fieldName != v.
|
||||
func NotEqualInt(fieldName string, v int) *F {
|
||||
return (&F{}).AndNotEqualInt(fieldName, v)
|
||||
}
|
||||
|
||||
// EqualBool returns a filter for fieldName == v.
|
||||
func EqualBool(fieldName string, v bool) *F {
|
||||
return (&F{}).AndEqualBool(fieldName, v)
|
||||
}
|
||||
|
||||
// NotEqualBool returns a filter for fieldName != v.
|
||||
func NotEqualBool(fieldName string, v bool) *F {
|
||||
return (&F{}).AndNotEqualBool(fieldName, v)
|
||||
}
|
||||
|
||||
// F is a filter to be used with List() operations.
|
||||
//
|
||||
// From the compute API description:
|
||||
//
|
||||
// Sets a filter {expression} for filtering listed resources. Your {expression}
|
||||
// must be in the format: field_name comparison_string literal_string.
|
||||
//
|
||||
// The field_name is the name of the field you want to compare. Only atomic field
|
||||
// types are supported (string, number, boolean). The comparison_string must be
|
||||
// either eq (equals) or ne (not equals). The literal_string is the string value
|
||||
// to filter to. The literal value must be valid for the type of field you are
|
||||
// filtering by (string, number, boolean). For string fields, the literal value is
|
||||
// interpreted as a regular expression using RE2 syntax. The literal value must
|
||||
// match the entire field.
|
||||
//
|
||||
// For example, to filter for instances that do not have a name of
|
||||
// example-instance, you would use name ne example-instance.
|
||||
//
|
||||
// You can filter on nested fields. For example, you could filter on instances
|
||||
// that have set the scheduling.automaticRestart field to true. Use filtering on
|
||||
// nested fields to take advantage of labels to organize and search for results
|
||||
// based on label values.
|
||||
//
|
||||
// To filter on multiple expressions, provide each separate expression within
|
||||
// parentheses. For example, (scheduling.automaticRestart eq true)
|
||||
// (zone eq us-central1-f). Multiple expressions are treated as AND expressions,
|
||||
// meaning that resources must match all expressions to pass the filters.
|
||||
type F struct {
|
||||
predicates []filterPredicate
|
||||
}
|
||||
|
||||
// And joins two filters together.
|
||||
func (fl *F) And(rest *F) *F {
|
||||
fl.predicates = append(fl.predicates, rest.predicates...)
|
||||
return fl
|
||||
}
|
||||
|
||||
// AndRegexp adds a field match string predicate.
|
||||
func (fl *F) AndRegexp(fieldName, v string) *F {
|
||||
fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, s: &v})
|
||||
return fl
|
||||
}
|
||||
|
||||
// AndNotRegexp adds a field not match string predicate.
|
||||
func (fl *F) AndNotRegexp(fieldName, v string) *F {
|
||||
fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, s: &v})
|
||||
return fl
|
||||
}
|
||||
|
||||
// AndEqualInt adds a field == int predicate.
|
||||
func (fl *F) AndEqualInt(fieldName string, v int) *F {
|
||||
fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, i: &v})
|
||||
return fl
|
||||
}
|
||||
|
||||
// AndNotEqualInt adds a field != int predicate.
|
||||
func (fl *F) AndNotEqualInt(fieldName string, v int) *F {
|
||||
fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, i: &v})
|
||||
return fl
|
||||
}
|
||||
|
||||
// AndEqualBool adds a field == bool predicate.
|
||||
func (fl *F) AndEqualBool(fieldName string, v bool) *F {
|
||||
fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, b: &v})
|
||||
return fl
|
||||
}
|
||||
|
||||
// AndNotEqualBool adds a field != bool predicate.
|
||||
func (fl *F) AndNotEqualBool(fieldName string, v bool) *F {
|
||||
fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, b: &v})
|
||||
return fl
|
||||
}
|
||||
|
||||
func (fl *F) String() string {
|
||||
if len(fl.predicates) == 1 {
|
||||
return fl.predicates[0].String()
|
||||
}
|
||||
|
||||
var pl []string
|
||||
for _, p := range fl.predicates {
|
||||
pl = append(pl, "("+p.String()+")")
|
||||
}
|
||||
return strings.Join(pl, " ")
|
||||
}
|
||||
|
||||
// Match returns true if the F as specifies matches the given object. This
|
||||
// is used by the Mock implementations to perform filtering and SHOULD NOT be
|
||||
// used in production code as it is not well-tested to be equivalent to the
|
||||
// actual compute API.
|
||||
func (fl *F) Match(obj interface{}) bool {
|
||||
if fl == nil {
|
||||
return true
|
||||
}
|
||||
for _, p := range fl.predicates {
|
||||
if !p.match(obj) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type filterOp int
|
||||
|
||||
const (
|
||||
equals filterOp = iota
|
||||
notEquals filterOp = iota
|
||||
)
|
||||
|
||||
// filterPredicate is an individual predicate for a fieldName and value.
|
||||
type filterPredicate struct {
|
||||
fieldName string
|
||||
|
||||
op filterOp
|
||||
s *string
|
||||
i *int
|
||||
b *bool
|
||||
}
|
||||
|
||||
func (fp *filterPredicate) String() string {
|
||||
var op string
|
||||
switch fp.op {
|
||||
case equals:
|
||||
op = "eq"
|
||||
case notEquals:
|
||||
op = "ne"
|
||||
default:
|
||||
op = "invalidOp"
|
||||
}
|
||||
|
||||
var value string
|
||||
switch {
|
||||
case fp.s != nil:
|
||||
// There does not seem to be any sort of escaping as specified in the
|
||||
// document. This means it's possible to create malformed expressions.
|
||||
value = *fp.s
|
||||
case fp.i != nil:
|
||||
value = fmt.Sprintf("%d", *fp.i)
|
||||
case fp.b != nil:
|
||||
value = fmt.Sprintf("%t", *fp.b)
|
||||
default:
|
||||
value = "invalidValue"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %s %s", fp.fieldName, op, value)
|
||||
}
|
||||
|
||||
func (fp *filterPredicate) match(o interface{}) bool {
|
||||
v, err := extractValue(fp.fieldName, o)
|
||||
glog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var match bool
|
||||
switch x := v.(type) {
|
||||
case string:
|
||||
if fp.s == nil {
|
||||
return false
|
||||
}
|
||||
re, err := regexp.Compile(*fp.s)
|
||||
if err != nil {
|
||||
glog.Errorf("Match regexp %q is invalid: %v", *fp.s, err)
|
||||
return false
|
||||
}
|
||||
match = re.Match([]byte(x))
|
||||
case int:
|
||||
if fp.i == nil {
|
||||
return false
|
||||
}
|
||||
match = x == *fp.i
|
||||
case bool:
|
||||
if fp.b == nil {
|
||||
return false
|
||||
}
|
||||
match = x == *fp.b
|
||||
}
|
||||
|
||||
switch fp.op {
|
||||
case equals:
|
||||
return match
|
||||
case notEquals:
|
||||
return !match
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// snakeToCamelCase converts from "names_like_this" to "NamesLikeThis" to
|
||||
// interoperate between proto and Golang naming conventions.
|
||||
func snakeToCamelCase(s string) string {
|
||||
parts := strings.Split(s, "_")
|
||||
var ret string
|
||||
for _, x := range parts {
|
||||
ret += strings.Title(x)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// extractValue returns the value of the field named by path in object o if it exists.
|
||||
func extractValue(path string, o interface{}) (interface{}, error) {
|
||||
parts := strings.Split(path, ".")
|
||||
for _, f := range parts {
|
||||
v := reflect.ValueOf(o)
|
||||
// Dereference Ptr to handle *struct.
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return nil, errors.New("field is nil")
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("cannot get field from non-struct (%T)", o)
|
||||
}
|
||||
v = v.FieldByName(snakeToCamelCase(f))
|
||||
if !v.IsValid() {
|
||||
return nil, fmt.Errorf("cannot get field %q as it is not a valid field in %T", f, o)
|
||||
}
|
||||
if !v.CanInterface() {
|
||||
return nil, fmt.Errorf("cannot get field %q in obj of type %T", f, o)
|
||||
}
|
||||
o = v.Interface()
|
||||
}
|
||||
switch o.(type) {
|
||||
case string, int, bool:
|
||||
return o, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unhandled object of type %T", o)
|
||||
}
|
176
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go
generated
vendored
Normal file
176
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter_test.go
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilterToString(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
f *F
|
||||
want string
|
||||
}{
|
||||
{Regexp("field1", "abc"), `field1 eq abc`},
|
||||
{NotRegexp("field1", "abc"), `field1 ne abc`},
|
||||
{EqualInt("field1", 13), "field1 eq 13"},
|
||||
{NotEqualInt("field1", 13), "field1 ne 13"},
|
||||
{EqualBool("field1", true), "field1 eq true"},
|
||||
{NotEqualBool("field1", true), "field1 ne true"},
|
||||
{Regexp("field1", "abc").AndRegexp("field2", "def"), `(field1 eq abc) (field2 eq def)`},
|
||||
{Regexp("field1", "abc").AndNotEqualInt("field2", 17), `(field1 eq abc) (field2 ne 17)`},
|
||||
{Regexp("field1", "abc").And(EqualInt("field2", 17)), `(field1 eq abc) (field2 eq 17)`},
|
||||
} {
|
||||
if tc.f.String() != tc.want {
|
||||
t.Errorf("filter %#v String() = %q, want %q", tc.f, tc.f.String(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterMatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type inner struct {
|
||||
X string
|
||||
}
|
||||
type S struct {
|
||||
S string
|
||||
I int
|
||||
B bool
|
||||
Unhandled struct{}
|
||||
NestedField *inner
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
f *F
|
||||
o interface{}
|
||||
want bool
|
||||
}{
|
||||
{f: None, o: &S{}, want: true},
|
||||
{f: Regexp("s", "abc"), o: &S{}},
|
||||
{f: EqualInt("i", 10), o: &S{}},
|
||||
{f: EqualBool("b", true), o: &S{}},
|
||||
{f: NotRegexp("s", "abc"), o: &S{}, want: true},
|
||||
{f: NotEqualInt("i", 10), o: &S{}, want: true},
|
||||
{f: NotEqualBool("b", true), o: &S{}, want: true},
|
||||
{f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{}},
|
||||
{f: Regexp("s", "abc"), o: &S{S: "abc"}, want: true},
|
||||
{f: Regexp("s", "a.*"), o: &S{S: "abc"}, want: true},
|
||||
{f: Regexp("s", "a((("), o: &S{S: "abc"}},
|
||||
{f: NotRegexp("s", "abc"), o: &S{S: "abc"}},
|
||||
{f: EqualInt("i", 10), o: &S{I: 11}},
|
||||
{f: EqualInt("i", 10), o: &S{I: 10}, want: true},
|
||||
{f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{S: "abc"}},
|
||||
{f: Regexp("s", "abcd").AndEqualBool("b", true), o: &S{S: "abc"}},
|
||||
{f: Regexp("s", "abc").AndEqualBool("b", true), o: &S{S: "abc", B: true}, want: true},
|
||||
{f: Regexp("s", "abc").And(EqualBool("b", true)), o: &S{S: "abc", B: true}, want: true},
|
||||
{f: Regexp("unhandled", "xyz"), o: &S{}},
|
||||
{f: Regexp("nested_field.x", "xyz"), o: &S{}},
|
||||
{f: Regexp("nested_field.x", "xyz"), o: &S{NestedField: &inner{"xyz"}}, want: true},
|
||||
{f: NotRegexp("nested_field.x", "xyz"), o: &S{NestedField: &inner{"xyz"}}},
|
||||
{f: Regexp("nested_field.y", "xyz"), o: &S{NestedField: &inner{"xyz"}}},
|
||||
{f: Regexp("nested_field", "xyz"), o: &S{NestedField: &inner{"xyz"}}},
|
||||
} {
|
||||
got := tc.f.Match(tc.o)
|
||||
if got != tc.want {
|
||||
t.Errorf("%v: Match(%+v) = %v, want %v", tc.f, tc.o, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterSnakeToCamelCase(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
s string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc", "Abc"},
|
||||
{"_foo", "Foo"},
|
||||
{"a_b_c", "ABC"},
|
||||
{"a_BC_def", "ABCDef"},
|
||||
{"a_Bc_def", "ABcDef"},
|
||||
} {
|
||||
got := snakeToCamelCase(tc.s)
|
||||
if got != tc.want {
|
||||
t.Errorf("snakeToCamelCase(%q) = %q, want %q", tc.s, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterExtractValue(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type nest2 struct {
|
||||
Y string
|
||||
}
|
||||
type nest struct {
|
||||
X string
|
||||
Nest2 nest2
|
||||
}
|
||||
st := &struct {
|
||||
S string
|
||||
I int
|
||||
F bool
|
||||
Nest nest
|
||||
NestPtr *nest
|
||||
|
||||
Unhandled float64
|
||||
}{
|
||||
"abc",
|
||||
13,
|
||||
true,
|
||||
nest{"xyz", nest2{"zzz"}},
|
||||
&nest{"yyy", nest2{}},
|
||||
0.0,
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
path string
|
||||
o interface{}
|
||||
want interface{}
|
||||
wantErr bool
|
||||
}{
|
||||
{path: "s", o: st, want: "abc"},
|
||||
{path: "i", o: st, want: 13},
|
||||
{path: "f", o: st, want: true},
|
||||
{path: "nest.x", o: st, want: "xyz"},
|
||||
{path: "nest_ptr.x", o: st, want: "yyy"},
|
||||
// Error cases.
|
||||
{path: "", o: st, wantErr: true},
|
||||
{path: "no_such_field", o: st, wantErr: true},
|
||||
{path: "s.invalid_type", o: st, wantErr: true},
|
||||
{path: "unhandled", o: st, wantErr: true},
|
||||
{path: "nest.x", o: &struct{ Nest *nest }{}, wantErr: true},
|
||||
} {
|
||||
o, err := extractValue(tc.path, tc.o)
|
||||
gotErr := err != nil
|
||||
if gotErr != tc.wantErr {
|
||||
t.Errorf("extractValue(%v, %+v) = %v, %v; gotErr = %v, tc.wantErr = %v", tc.path, tc.o, o, err, gotErr, tc.wantErr)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(o, tc.want) {
|
||||
t.Errorf("extractValue(%v, %+v) = %v, nil; want %v, nil", tc.path, tc.o, o, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
99
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gce_projects.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gce_projects.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
// ProjectsOps is the manually implemented methods for the Projects service.
|
||||
type ProjectsOps interface {
|
||||
Get(ctx context.Context, projectID string) (*compute.Project, error)
|
||||
SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error
|
||||
}
|
||||
|
||||
// MockProjectOpsState is stored in the mock.X field.
|
||||
type MockProjectOpsState struct {
|
||||
metadata map[string]*compute.Metadata
|
||||
}
|
||||
|
||||
// Get a project by projectID.
|
||||
func (m *MockProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
if p, ok := m.Objects[*meta.GlobalKey(projectID)]; ok {
|
||||
return p.ToGA(), nil
|
||||
}
|
||||
return nil, &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("MockProjects %v not found", projectID),
|
||||
}
|
||||
}
|
||||
|
||||
// Get a project by projectID.
|
||||
func (g *GCEProjects) Get(ctx context.Context, projectID string) (*compute.Project, error) {
|
||||
rk := &RateLimitKey{
|
||||
ProjectID: projectID,
|
||||
Operation: "Get",
|
||||
Version: meta.Version("ga"),
|
||||
Service: "Projects",
|
||||
}
|
||||
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
call := g.s.GA.Projects.Get(projectID)
|
||||
call.Context(ctx)
|
||||
return call.Do()
|
||||
}
|
||||
|
||||
// SetCommonInstanceMetadata for a given project.
|
||||
func (m *MockProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, meta *compute.Metadata) error {
|
||||
if m.X == nil {
|
||||
m.X = &MockProjectOpsState{metadata: map[string]*compute.Metadata{}}
|
||||
}
|
||||
state := m.X.(*MockProjectOpsState)
|
||||
state.metadata[projectID] = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCommonInstanceMetadata for a given project.
|
||||
func (g *GCEProjects) SetCommonInstanceMetadata(ctx context.Context, projectID string, m *compute.Metadata) error {
|
||||
rk := &RateLimitKey{
|
||||
ProjectID: projectID,
|
||||
Operation: "SetCommonInstanceMetadata",
|
||||
Version: meta.Version("ga"),
|
||||
Service: "Projects",
|
||||
}
|
||||
if err := g.s.RateLimiter.Accept(ctx, rk); err != nil {
|
||||
return err
|
||||
}
|
||||
call := g.s.GA.Projects.SetCommonInstanceMetadata(projectID, m)
|
||||
call.Context(ctx)
|
||||
|
||||
op, err := call.Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return g.s.WaitForCompletion(ctx, op)
|
||||
}
|
13154
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
Normal file
13154
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/BUILD
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/BUILD
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = ["//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "gen",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
1277
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/main.go
generated
vendored
Normal file
1277
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/main.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1809
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
Normal file
1809
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/BUILD
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"key.go",
|
||||
"meta.go",
|
||||
"method.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["key_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package meta contains the meta description of the GCE cloud types to
|
||||
// generate code for.
|
||||
package meta
|
108
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/key.go
generated
vendored
Normal file
108
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/key.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Key for a GCP resource.
|
||||
type Key struct {
|
||||
Name string
|
||||
Zone string
|
||||
Region string
|
||||
}
|
||||
|
||||
// KeyType is the type of the key.
|
||||
type KeyType string
|
||||
|
||||
const (
|
||||
// Zonal key type.
|
||||
Zonal = "zonal"
|
||||
// Regional key type.
|
||||
Regional = "regional"
|
||||
// Global key type.
|
||||
Global = "global"
|
||||
)
|
||||
|
||||
var (
|
||||
// locationRegexp is the format of regions/zone names in GCE.
|
||||
locationRegexp = regexp.MustCompile("^[a-z](?:[-a-z0-9]+)?$")
|
||||
)
|
||||
|
||||
// ZonalKey returns the key for a zonal resource.
|
||||
func ZonalKey(name, zone string) *Key {
|
||||
return &Key{name, zone, ""}
|
||||
}
|
||||
|
||||
// RegionalKey returns the key for a regional resource.
|
||||
func RegionalKey(name, region string) *Key {
|
||||
return &Key{name, "", region}
|
||||
}
|
||||
|
||||
// GlobalKey returns the key for a global resource.
|
||||
func GlobalKey(name string) *Key {
|
||||
return &Key{name, "", ""}
|
||||
}
|
||||
|
||||
// Type returns the type of the key.
|
||||
func (k *Key) Type() KeyType {
|
||||
switch {
|
||||
case k.Zone != "":
|
||||
return Zonal
|
||||
case k.Region != "":
|
||||
return Regional
|
||||
default:
|
||||
return Global
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of the key.
|
||||
func (k Key) String() string {
|
||||
switch k.Type() {
|
||||
case Zonal:
|
||||
return fmt.Sprintf("Key{%q, zone: %q}", k.Name, k.Zone)
|
||||
case Regional:
|
||||
return fmt.Sprintf("Key{%q, region: %q}", k.Name, k.Region)
|
||||
default:
|
||||
return fmt.Sprintf("Key{%q}", k.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Valid is true if the key is valid.
|
||||
func (k *Key) Valid() bool {
|
||||
if k.Zone != "" && k.Region != "" {
|
||||
return false
|
||||
}
|
||||
switch {
|
||||
case k.Region != "":
|
||||
return locationRegexp.Match([]byte(k.Region))
|
||||
case k.Zone != "":
|
||||
return locationRegexp.Match([]byte(k.Zone))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// KeysToMap creates a map[Key]bool from a list of keys.
|
||||
func KeysToMap(keys ...Key) map[Key]bool {
|
||||
ret := map[Key]bool{}
|
||||
for _, k := range keys {
|
||||
ret[k] = true
|
||||
}
|
||||
return ret
|
||||
}
|
76
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go
generated
vendored
Normal file
76
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/key_test.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestKeyType(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
key *Key
|
||||
want KeyType
|
||||
}{
|
||||
{GlobalKey("abc"), Global},
|
||||
{ZonalKey("abc", "us-central1-b"), Zonal},
|
||||
{RegionalKey("abc", "us-central1"), Regional},
|
||||
} {
|
||||
if tc.key.Type() != tc.want {
|
||||
t.Errorf("key.Type() == %v, want %v", tc.key.Type(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyString(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, k := range []*Key{
|
||||
GlobalKey("abc"),
|
||||
RegionalKey("abc", "us-central1"),
|
||||
ZonalKey("abc", "us-central1-b"),
|
||||
} {
|
||||
if k.String() == "" {
|
||||
t.Errorf(`k.String() = "", want non-empty`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyValid(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
region := "us-central1"
|
||||
zone := "us-central1-b"
|
||||
|
||||
for _, tc := range []struct {
|
||||
key *Key
|
||||
want bool
|
||||
}{
|
||||
{GlobalKey("abc"), true},
|
||||
{RegionalKey("abc", region), true},
|
||||
{ZonalKey("abc", zone), true},
|
||||
{RegionalKey("abc", "/invalid/"), false},
|
||||
{ZonalKey("abc", "/invalid/"), false},
|
||||
{&Key{"abc", zone, region}, false},
|
||||
} {
|
||||
got := tc.key.Valid()
|
||||
if got != tc.want {
|
||||
t.Errorf("key %+v; key.Valid() = %v, want %v", tc.key, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
386
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go
generated
vendored
Normal file
386
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
alpha "google.golang.org/api/compute/v0.alpha"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// Version of the API (ga, alpha, beta).
|
||||
type Version string
|
||||
|
||||
const (
|
||||
// NoGet prevents the Get() method from being generated.
|
||||
NoGet = 1 << iota
|
||||
// NoList prevents the List() method from being generated.
|
||||
NoList = 1 << iota
|
||||
// NoDelete prevents the Delete() method from being generated.
|
||||
NoDelete = 1 << iota
|
||||
// NoInsert prevents the Insert() method from being generated.
|
||||
NoInsert = 1 << iota
|
||||
// CustomOps specifies that an empty interface xxxOps will be generated to
|
||||
// enable custom method calls to be attached to the generated service
|
||||
// interface.
|
||||
CustomOps = 1 << iota
|
||||
// AggregatedList will generated a method for AggregatedList().
|
||||
AggregatedList = 1 << iota
|
||||
|
||||
// ReadOnly specifies that the given resource is read-only and should not
|
||||
// have insert() or delete() methods generated for the wrapper.
|
||||
ReadOnly = NoDelete | NoInsert
|
||||
|
||||
// VersionGA is the API version in compute.v1.
|
||||
VersionGA Version = "ga"
|
||||
// VersionAlpha is the API version in computer.v0.alpha.
|
||||
VersionAlpha Version = "alpha"
|
||||
// VersionBeta is the API version in computer.v0.beta.
|
||||
VersionBeta Version = "beta"
|
||||
)
|
||||
|
||||
// AllVersions is a list of all versions of the GCE API.
|
||||
var AllVersions = []Version{
|
||||
VersionGA,
|
||||
VersionAlpha,
|
||||
VersionBeta,
|
||||
}
|
||||
|
||||
// AllServices are a list of all the services to generate code for. Keep
|
||||
// this list in lexiographical order by object type.
|
||||
var AllServices = []*ServiceInfo{
|
||||
{
|
||||
Object: "Address",
|
||||
Service: "Addresses",
|
||||
Resource: "addresses",
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&ga.AddressesService{}),
|
||||
},
|
||||
{
|
||||
Object: "Address",
|
||||
Service: "Addresses",
|
||||
Resource: "addresses",
|
||||
version: VersionAlpha,
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&alpha.AddressesService{}),
|
||||
},
|
||||
{
|
||||
Object: "Address",
|
||||
Service: "Addresses",
|
||||
Resource: "addresses",
|
||||
version: VersionBeta,
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&beta.AddressesService{}),
|
||||
},
|
||||
{
|
||||
Object: "Address",
|
||||
Service: "GlobalAddresses",
|
||||
Resource: "addresses",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.GlobalAddressesService{}),
|
||||
},
|
||||
{
|
||||
Object: "BackendService",
|
||||
Service: "BackendServices",
|
||||
Resource: "backendServices",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.BackendServicesService{}),
|
||||
additionalMethods: []string{
|
||||
"GetHealth",
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "BackendService",
|
||||
Service: "BackendServices",
|
||||
Resource: "backendServices",
|
||||
version: VersionAlpha,
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&alpha.BackendServicesService{}),
|
||||
additionalMethods: []string{"Update"},
|
||||
},
|
||||
{
|
||||
Object: "BackendService",
|
||||
Service: "RegionBackendServices",
|
||||
Resource: "backendServices",
|
||||
version: VersionGA,
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&ga.RegionBackendServicesService{}),
|
||||
additionalMethods: []string{
|
||||
"GetHealth",
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "BackendService",
|
||||
Service: "RegionBackendServices",
|
||||
Resource: "backendServices",
|
||||
version: VersionAlpha,
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&alpha.RegionBackendServicesService{}),
|
||||
additionalMethods: []string{
|
||||
"GetHealth",
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "Disk",
|
||||
Service: "Disks",
|
||||
Resource: "disks",
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&ga.DisksService{}),
|
||||
},
|
||||
{
|
||||
Object: "Disk",
|
||||
Service: "Disks",
|
||||
Resource: "disks",
|
||||
version: VersionAlpha,
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&alpha.DisksService{}),
|
||||
},
|
||||
{
|
||||
Object: "Disk",
|
||||
Service: "RegionDisks",
|
||||
Resource: "disks",
|
||||
version: VersionAlpha,
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&alpha.DisksService{}),
|
||||
},
|
||||
{
|
||||
Object: "Firewall",
|
||||
Service: "Firewalls",
|
||||
Resource: "firewalls",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.FirewallsService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "ForwardingRule",
|
||||
Service: "ForwardingRules",
|
||||
Resource: "forwardingRules",
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&ga.ForwardingRulesService{}),
|
||||
},
|
||||
{
|
||||
Object: "ForwardingRule",
|
||||
Service: "ForwardingRules",
|
||||
Resource: "forwardingRules",
|
||||
version: VersionAlpha,
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&alpha.ForwardingRulesService{}),
|
||||
},
|
||||
{
|
||||
Object: "ForwardingRule",
|
||||
Service: "GlobalForwardingRules",
|
||||
Resource: "forwardingRules",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.GlobalForwardingRulesService{}),
|
||||
additionalMethods: []string{
|
||||
"SetTarget",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "HealthCheck",
|
||||
Service: "HealthChecks",
|
||||
Resource: "healthChecks",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.HealthChecksService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "HealthCheck",
|
||||
Service: "HealthChecks",
|
||||
Resource: "healthChecks",
|
||||
version: VersionAlpha,
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&alpha.HealthChecksService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "HttpHealthCheck",
|
||||
Service: "HttpHealthChecks",
|
||||
Resource: "httpHealthChecks",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.HttpHealthChecksService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "HttpsHealthCheck",
|
||||
Service: "HttpsHealthChecks",
|
||||
Resource: "httpsHealthChecks",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.HttpsHealthChecksService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "InstanceGroup",
|
||||
Service: "InstanceGroups",
|
||||
Resource: "instanceGroups",
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&ga.InstanceGroupsService{}),
|
||||
additionalMethods: []string{
|
||||
"AddInstances",
|
||||
"ListInstances",
|
||||
"RemoveInstances",
|
||||
"SetNamedPorts",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "Instance",
|
||||
Service: "Instances",
|
||||
Resource: "instances",
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&ga.InstancesService{}),
|
||||
additionalMethods: []string{
|
||||
"AttachDisk",
|
||||
"DetachDisk",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "Instance",
|
||||
Service: "Instances",
|
||||
Resource: "instances",
|
||||
version: VersionBeta,
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&beta.InstancesService{}),
|
||||
additionalMethods: []string{
|
||||
"AttachDisk",
|
||||
"DetachDisk",
|
||||
"UpdateNetworkInterface",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "Instance",
|
||||
Service: "Instances",
|
||||
Resource: "instances",
|
||||
version: VersionAlpha,
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&alpha.InstancesService{}),
|
||||
additionalMethods: []string{
|
||||
"AttachDisk",
|
||||
"DetachDisk",
|
||||
"UpdateNetworkInterface",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "NetworkEndpointGroup",
|
||||
Service: "NetworkEndpointGroups",
|
||||
Resource: "networkEndpointGroups",
|
||||
version: VersionAlpha,
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&alpha.NetworkEndpointGroupsService{}),
|
||||
additionalMethods: []string{
|
||||
"AttachNetworkEndpoints",
|
||||
"DetachNetworkEndpoints",
|
||||
"ListNetworkEndpoints",
|
||||
},
|
||||
options: AggregatedList,
|
||||
},
|
||||
{
|
||||
Object: "Project",
|
||||
Service: "Projects",
|
||||
Resource: "projects",
|
||||
keyType: Global,
|
||||
// Generate only the stub with no methods.
|
||||
options: NoGet | NoList | NoInsert | NoDelete | CustomOps,
|
||||
serviceType: reflect.TypeOf(&ga.ProjectsService{}),
|
||||
},
|
||||
{
|
||||
Object: "Region",
|
||||
Service: "Regions",
|
||||
Resource: "regions",
|
||||
keyType: Global,
|
||||
options: ReadOnly,
|
||||
serviceType: reflect.TypeOf(&ga.RegionsService{}),
|
||||
},
|
||||
{
|
||||
Object: "Route",
|
||||
Service: "Routes",
|
||||
Resource: "routes",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.RoutesService{}),
|
||||
},
|
||||
{
|
||||
Object: "SslCertificate",
|
||||
Service: "SslCertificates",
|
||||
Resource: "sslCertificates",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.SslCertificatesService{}),
|
||||
},
|
||||
{
|
||||
Object: "TargetHttpProxy",
|
||||
Service: "TargetHttpProxies",
|
||||
Resource: "targetHttpProxies",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.TargetHttpProxiesService{}),
|
||||
additionalMethods: []string{
|
||||
"SetUrlMap",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "TargetHttpsProxy",
|
||||
Service: "TargetHttpsProxies",
|
||||
Resource: "targetHttpsProxies",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.TargetHttpsProxiesService{}),
|
||||
additionalMethods: []string{
|
||||
"SetSslCertificates",
|
||||
"SetUrlMap",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "TargetPool",
|
||||
Service: "TargetPools",
|
||||
Resource: "targetPools",
|
||||
keyType: Regional,
|
||||
serviceType: reflect.TypeOf(&ga.TargetPoolsService{}),
|
||||
additionalMethods: []string{
|
||||
"AddInstance",
|
||||
"RemoveInstance",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "UrlMap",
|
||||
Service: "UrlMaps",
|
||||
Resource: "urlMaps",
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&ga.UrlMapsService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "Zone",
|
||||
Service: "Zones",
|
||||
Resource: "zones",
|
||||
keyType: Global,
|
||||
options: ReadOnly,
|
||||
serviceType: reflect.TypeOf(&ga.ZonesService{}),
|
||||
},
|
||||
}
|
337
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/method.go
generated
vendored
Normal file
337
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/method.go
generated
vendored
Normal file
@ -0,0 +1,337 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func newArg(t reflect.Type) *arg {
|
||||
ret := &arg{}
|
||||
|
||||
// Dereference the pointer types to get at the underlying concrete type.
|
||||
Loop:
|
||||
for {
|
||||
switch t.Kind() {
|
||||
case reflect.Ptr:
|
||||
ret.numPtr++
|
||||
t = t.Elem()
|
||||
default:
|
||||
ret.pkg = t.PkgPath()
|
||||
ret.typeName += t.Name()
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type arg struct {
|
||||
pkg, typeName string
|
||||
numPtr int
|
||||
}
|
||||
|
||||
func (a *arg) normalizedPkg() string {
|
||||
if a.pkg == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Strip the repo.../vendor/ prefix from the package path if present.
|
||||
parts := strings.Split(a.pkg, "/")
|
||||
// Remove vendor prefix.
|
||||
for i := 0; i < len(parts); i++ {
|
||||
if parts[i] == "vendor" {
|
||||
parts = parts[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
switch strings.Join(parts, "/") {
|
||||
case "google.golang.org/api/compute/v1":
|
||||
return "ga."
|
||||
case "google.golang.org/api/compute/v0.alpha":
|
||||
return "alpha."
|
||||
case "google.golang.org/api/compute/v0.beta":
|
||||
return "beta."
|
||||
default:
|
||||
panic(fmt.Errorf("unhandled package %q", a.pkg))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *arg) String() string {
|
||||
var ret string
|
||||
for i := 0; i < a.numPtr; i++ {
|
||||
ret += "*"
|
||||
}
|
||||
ret += a.normalizedPkg()
|
||||
ret += a.typeName
|
||||
return ret
|
||||
}
|
||||
|
||||
// newMethod returns a newly initialized method.
|
||||
func newMethod(s *ServiceInfo, m reflect.Method) *Method {
|
||||
ret := &Method{
|
||||
ServiceInfo: s,
|
||||
m: m,
|
||||
kind: MethodOperation,
|
||||
ReturnType: "",
|
||||
}
|
||||
ret.init()
|
||||
return ret
|
||||
}
|
||||
|
||||
// MethodKind is the type of method that we are generated code for.
|
||||
type MethodKind int
|
||||
|
||||
const (
|
||||
// MethodOperation is a long running method that returns an operation.
|
||||
MethodOperation MethodKind = iota
|
||||
// MethodGet is a method that immediately returns some data.
|
||||
MethodGet MethodKind = iota
|
||||
// MethodPaged is a method that returns a paged set of data.
|
||||
MethodPaged MethodKind = iota
|
||||
)
|
||||
|
||||
// Method is used to generate the calling code for non-standard methods.
|
||||
type Method struct {
|
||||
*ServiceInfo
|
||||
m reflect.Method
|
||||
|
||||
kind MethodKind
|
||||
// ReturnType is the return type for the method.
|
||||
ReturnType string
|
||||
// ItemType is the type of the individual elements returns from a
|
||||
// Pages() call. This is only applicable for MethodPaged kind.
|
||||
ItemType string
|
||||
}
|
||||
|
||||
// IsOperation is true if the method is an Operation.
|
||||
func (m *Method) IsOperation() bool {
|
||||
return m.kind == MethodOperation
|
||||
}
|
||||
|
||||
// IsPaged is true if the method paged.
|
||||
func (m *Method) IsPaged() bool {
|
||||
return m.kind == MethodPaged
|
||||
}
|
||||
|
||||
// IsGet is true if the method simple get.
|
||||
func (m *Method) IsGet() bool {
|
||||
return m.kind == MethodGet
|
||||
}
|
||||
|
||||
// argsSkip is the number of arguments to skip when generating the
|
||||
// synthesized method.
|
||||
func (m *Method) argsSkip() int {
|
||||
switch m.keyType {
|
||||
case Zonal:
|
||||
return 4
|
||||
case Regional:
|
||||
return 4
|
||||
case Global:
|
||||
return 3
|
||||
}
|
||||
panic(fmt.Errorf("invalid KeyType %v", m.keyType))
|
||||
}
|
||||
|
||||
// args return a list of arguments to the method, skipping the first skip
|
||||
// elements. If nameArgs is true, then the arguments will include a generated
|
||||
// parameter name (arg<N>). prefix will be added to the parameters.
|
||||
func (m *Method) args(skip int, nameArgs bool, prefix []string) []string {
|
||||
var args []*arg
|
||||
fType := m.m.Func.Type()
|
||||
for i := 0; i < fType.NumIn(); i++ {
|
||||
t := fType.In(i)
|
||||
args = append(args, newArg(t))
|
||||
}
|
||||
|
||||
var a []string
|
||||
for i := skip; i < fType.NumIn(); i++ {
|
||||
if nameArgs {
|
||||
a = append(a, fmt.Sprintf("arg%d %s", i-skip, args[i]))
|
||||
} else {
|
||||
a = append(a, args[i].String())
|
||||
}
|
||||
}
|
||||
return append(prefix, a...)
|
||||
}
|
||||
|
||||
// init the method. This performs some rudimentary static checking as well as
|
||||
// determines the kind of method by looking at the shape (method signature) of
|
||||
// the object.
|
||||
func (m *Method) init() {
|
||||
fType := m.m.Func.Type()
|
||||
if fType.NumIn() < m.argsSkip() {
|
||||
err := fmt.Errorf("method %q.%q, arity = %d which is less than required (< %d)",
|
||||
m.Service, m.Name(), fType.NumIn(), m.argsSkip())
|
||||
panic(err)
|
||||
}
|
||||
// Skipped args should all be string (they will be projectID, zone, region etc).
|
||||
for i := 1; i < m.argsSkip(); i++ {
|
||||
if fType.In(i).Kind() != reflect.String {
|
||||
panic(fmt.Errorf("method %q.%q: skipped args can only be strings", m.Service, m.Name()))
|
||||
}
|
||||
}
|
||||
// Return of the method must return a single value of type *xxxCall.
|
||||
if fType.NumOut() != 1 || fType.Out(0).Kind() != reflect.Ptr || !strings.HasSuffix(fType.Out(0).Elem().Name(), "Call") {
|
||||
panic(fmt.Errorf("method %q.%q: generator only supports methods returning an *xxxCall object",
|
||||
m.Service, m.Name()))
|
||||
}
|
||||
returnType := fType.Out(0)
|
||||
returnTypeName := fType.Out(0).Elem().Name()
|
||||
// xxxCall must have a Do() method.
|
||||
doMethod, ok := returnType.MethodByName("Do")
|
||||
if !ok {
|
||||
panic(fmt.Errorf("method %q.%q: return type %q does not have a Do() method",
|
||||
m.Service, m.Name(), returnTypeName))
|
||||
}
|
||||
_, hasPages := returnType.MethodByName("Pages")
|
||||
// Do() method must return (*T, error).
|
||||
switch doMethod.Func.Type().NumOut() {
|
||||
case 2:
|
||||
out0 := doMethod.Func.Type().Out(0)
|
||||
if out0.Kind() != reflect.Ptr {
|
||||
panic(fmt.Errorf("method %q.%q: return type %q of Do() = S, _; S must be pointer type (%v)",
|
||||
m.Service, m.Name(), returnTypeName, out0))
|
||||
}
|
||||
m.ReturnType = out0.Elem().Name()
|
||||
switch {
|
||||
case out0.Elem().Name() == "Operation":
|
||||
m.kind = MethodOperation
|
||||
case hasPages:
|
||||
m.kind = MethodPaged
|
||||
// Pages() returns a xxxList that has the actual list
|
||||
// of objects in the xxxList.Items field.
|
||||
listType := out0.Elem()
|
||||
itemsField, ok := listType.FieldByName("Items")
|
||||
if !ok {
|
||||
panic(fmt.Errorf("method %q.%q: paged return type %q does not have a .Items field", m.Service, m.Name(), listType.Name()))
|
||||
}
|
||||
// itemsField will be a []*ItemType. Dereference to
|
||||
// extract the ItemType.
|
||||
itemsType := itemsField.Type
|
||||
if itemsType.Kind() != reflect.Slice && itemsType.Elem().Kind() != reflect.Ptr {
|
||||
panic(fmt.Errorf("method %q.%q: paged return type %q.Items is not an array of pointers", m.Service, m.Name(), listType.Name()))
|
||||
}
|
||||
m.ItemType = itemsType.Elem().Elem().Name()
|
||||
default:
|
||||
m.kind = MethodGet
|
||||
}
|
||||
// Second argument must be "error".
|
||||
if doMethod.Func.Type().Out(1).Name() != "error" {
|
||||
panic(fmt.Errorf("method %q.%q: return type %q of Do() = S, T; T must be 'error'",
|
||||
m.Service, m.Name(), returnTypeName))
|
||||
}
|
||||
break
|
||||
default:
|
||||
panic(fmt.Errorf("method %q.%q: %q Do() return type is not handled by the generator",
|
||||
m.Service, m.Name(), returnTypeName))
|
||||
}
|
||||
}
|
||||
|
||||
// Name is the name of the method.
|
||||
func (m *Method) Name() string {
|
||||
return m.m.Name
|
||||
}
|
||||
|
||||
// CallArgs is a list of comma separated "argN" used for calling the method.
|
||||
// For example, if the method has two additional arguments, this will return
|
||||
// "arg0, arg1".
|
||||
func (m *Method) CallArgs() string {
|
||||
var args []string
|
||||
for i := m.argsSkip(); i < m.m.Func.Type().NumIn(); i++ {
|
||||
args = append(args, fmt.Sprintf("arg%d", i-m.argsSkip()))
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(", %s", strings.Join(args, ", "))
|
||||
}
|
||||
|
||||
// MockHookName is the name of the hook function in the mock.
|
||||
func (m *Method) MockHookName() string {
|
||||
return m.m.Name + "Hook"
|
||||
}
|
||||
|
||||
// MockHook is the definition of the hook function.
|
||||
func (m *Method) MockHook() string {
|
||||
args := m.args(m.argsSkip(), false, []string{
|
||||
"context.Context",
|
||||
"*meta.Key",
|
||||
})
|
||||
if m.kind == MethodPaged {
|
||||
args = append(args, "*filter.F")
|
||||
}
|
||||
|
||||
args = append(args, fmt.Sprintf("*%s", m.MockWrapType()))
|
||||
|
||||
switch m.kind {
|
||||
case MethodOperation:
|
||||
return fmt.Sprintf("%v func(%v) error", m.MockHookName(), strings.Join(args, ", "))
|
||||
case MethodGet:
|
||||
return fmt.Sprintf("%v func(%v) (*%v.%v, error)", m.MockHookName(), strings.Join(args, ", "), m.Version(), m.ReturnType)
|
||||
case MethodPaged:
|
||||
return fmt.Sprintf("%v func(%v) ([]*%v.%v, error)", m.MockHookName(), strings.Join(args, ", "), m.Version(), m.ItemType)
|
||||
default:
|
||||
panic(fmt.Errorf("invalid method kind: %v", m.kind))
|
||||
}
|
||||
}
|
||||
|
||||
// FcnArgs is the function signature for the definition of the method.
|
||||
func (m *Method) FcnArgs() string {
|
||||
args := m.args(m.argsSkip(), true, []string{
|
||||
"ctx context.Context",
|
||||
"key *meta.Key",
|
||||
})
|
||||
if m.kind == MethodPaged {
|
||||
args = append(args, "fl *filter.F")
|
||||
}
|
||||
|
||||
switch m.kind {
|
||||
case MethodOperation:
|
||||
return fmt.Sprintf("%v(%v) error", m.m.Name, strings.Join(args, ", "))
|
||||
case MethodGet:
|
||||
return fmt.Sprintf("%v(%v) (*%v.%v, error)", m.m.Name, strings.Join(args, ", "), m.Version(), m.ReturnType)
|
||||
case MethodPaged:
|
||||
return fmt.Sprintf("%v(%v) ([]*%v.%v, error)", m.m.Name, strings.Join(args, ", "), m.Version(), m.ItemType)
|
||||
default:
|
||||
panic(fmt.Errorf("invalid method kind: %v", m.kind))
|
||||
}
|
||||
}
|
||||
|
||||
// InterfaceFunc is the function declaration of the method in the interface.
|
||||
func (m *Method) InterfaceFunc() string {
|
||||
args := []string{
|
||||
"context.Context",
|
||||
"*meta.Key",
|
||||
}
|
||||
args = m.args(m.argsSkip(), false, args)
|
||||
if m.kind == MethodPaged {
|
||||
args = append(args, "*filter.F")
|
||||
}
|
||||
|
||||
switch m.kind {
|
||||
case MethodOperation:
|
||||
return fmt.Sprintf("%v(%v) error", m.m.Name, strings.Join(args, ", "))
|
||||
case MethodGet:
|
||||
return fmt.Sprintf("%v(%v) (*%v.%v, error)", m.m.Name, strings.Join(args, ", "), m.Version(), m.ReturnType)
|
||||
case MethodPaged:
|
||||
return fmt.Sprintf("%v(%v) ([]*%v.%v, error)", m.m.Name, strings.Join(args, ", "), m.Version(), m.ItemType)
|
||||
default:
|
||||
panic(fmt.Errorf("invalid method kind: %v", m.kind))
|
||||
}
|
||||
}
|
277
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/service.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/service.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// ServiceInfo defines the entry for a Service that code will be generated for.
|
||||
type ServiceInfo struct {
|
||||
// Object is the Go name of the object type that the service deals
|
||||
// with. Example: "ForwardingRule".
|
||||
Object string
|
||||
// Service is the Go name of the service struct i.e. where the methods
|
||||
// are defined. Examples: "GlobalForwardingRules".
|
||||
Service string
|
||||
// Resource is the plural noun of the resource in the compute API URL (e.g.
|
||||
// "forwardingRules").
|
||||
Resource string
|
||||
// version if unspecified will be assumed to be VersionGA.
|
||||
version Version
|
||||
keyType KeyType
|
||||
serviceType reflect.Type
|
||||
|
||||
additionalMethods []string
|
||||
options int
|
||||
aggregatedListField string
|
||||
}
|
||||
|
||||
// Version returns the version of the Service, defaulting to GA if APIVersion
|
||||
// is empty.
|
||||
func (i *ServiceInfo) Version() Version {
|
||||
if i.version == "" {
|
||||
return VersionGA
|
||||
}
|
||||
return i.version
|
||||
}
|
||||
|
||||
// VersionTitle returns the capitalized golang CamelCase name for the version.
|
||||
func (i *ServiceInfo) VersionTitle() string {
|
||||
switch i.Version() {
|
||||
case VersionGA:
|
||||
return "GA"
|
||||
case VersionAlpha:
|
||||
return "Alpha"
|
||||
case VersionBeta:
|
||||
return "Beta"
|
||||
}
|
||||
panic(fmt.Errorf("invalid version %q", i.Version()))
|
||||
}
|
||||
|
||||
// WrapType is the name of the wrapper service type.
|
||||
func (i *ServiceInfo) WrapType() string {
|
||||
switch i.Version() {
|
||||
case VersionGA:
|
||||
return i.Service
|
||||
case VersionAlpha:
|
||||
return "Alpha" + i.Service
|
||||
case VersionBeta:
|
||||
return "Beta" + i.Service
|
||||
}
|
||||
return "Invalid"
|
||||
}
|
||||
|
||||
// WrapTypeOps is the name of the additional operations type.
|
||||
func (i *ServiceInfo) WrapTypeOps() string {
|
||||
return i.WrapType() + "Ops"
|
||||
}
|
||||
|
||||
// FQObjectType is fully qualified name of the object (e.g. compute.Instance).
|
||||
func (i *ServiceInfo) FQObjectType() string {
|
||||
return fmt.Sprintf("%v.%v", i.Version(), i.Object)
|
||||
}
|
||||
|
||||
// ObjectListType is the compute List type for the object (contains Items field).
|
||||
func (i *ServiceInfo) ObjectListType() string {
|
||||
return fmt.Sprintf("%v.%vList", i.Version(), i.Object)
|
||||
}
|
||||
|
||||
// ObjectAggregatedListType is the compute List type for the object (contains Items field).
|
||||
func (i *ServiceInfo) ObjectAggregatedListType() string {
|
||||
return fmt.Sprintf("%v.%vAggregatedList", i.Version(), i.Object)
|
||||
}
|
||||
|
||||
// MockWrapType is the name of the concrete mock for this type.
|
||||
func (i *ServiceInfo) MockWrapType() string {
|
||||
return "Mock" + i.WrapType()
|
||||
}
|
||||
|
||||
// MockField is the name of the field in the mock struct.
|
||||
func (i *ServiceInfo) MockField() string {
|
||||
return "Mock" + i.WrapType()
|
||||
}
|
||||
|
||||
// GCEWrapType is the name of the GCE wrapper type.
|
||||
func (i *ServiceInfo) GCEWrapType() string {
|
||||
return "GCE" + i.WrapType()
|
||||
}
|
||||
|
||||
// Field is the name of the GCE struct.
|
||||
func (i *ServiceInfo) Field() string {
|
||||
return "gce" + i.WrapType()
|
||||
}
|
||||
|
||||
// Methods returns a list of additional methods to generate code for.
|
||||
func (i *ServiceInfo) Methods() []*Method {
|
||||
methods := map[string]bool{}
|
||||
for _, m := range i.additionalMethods {
|
||||
methods[m] = true
|
||||
}
|
||||
|
||||
var ret []*Method
|
||||
for j := 0; j < i.serviceType.NumMethod(); j++ {
|
||||
m := i.serviceType.Method(j)
|
||||
if _, ok := methods[m.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, newMethod(i, m))
|
||||
methods[m.Name] = false
|
||||
}
|
||||
|
||||
for k, b := range methods {
|
||||
if b {
|
||||
panic(fmt.Errorf("method %q was not found in service %q", k, i.Service))
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// KeyIsGlobal is true if the key is global.
|
||||
func (i *ServiceInfo) KeyIsGlobal() bool {
|
||||
return i.keyType == Global
|
||||
}
|
||||
|
||||
// KeyIsRegional is true if the key is regional.
|
||||
func (i *ServiceInfo) KeyIsRegional() bool {
|
||||
return i.keyType == Regional
|
||||
}
|
||||
|
||||
// KeyIsZonal is true if the key is zonal.
|
||||
func (i *ServiceInfo) KeyIsZonal() bool {
|
||||
return i.keyType == Zonal
|
||||
}
|
||||
|
||||
// MakeKey returns the call used to create the appropriate key type.
|
||||
func (i *ServiceInfo) MakeKey(name, location string) string {
|
||||
switch i.keyType {
|
||||
case Global:
|
||||
return fmt.Sprintf("GlobalKey(%q)", name)
|
||||
case Regional:
|
||||
return fmt.Sprintf("RegionalKey(%q, %q)", name, location)
|
||||
case Zonal:
|
||||
return fmt.Sprintf("ZonalKey(%q, %q)", name, location)
|
||||
}
|
||||
return "Invalid"
|
||||
}
|
||||
|
||||
// GenerateGet is true if the method is to be generated.
|
||||
func (i *ServiceInfo) GenerateGet() bool {
|
||||
return i.options&NoGet == 0
|
||||
}
|
||||
|
||||
// GenerateList is true if the method is to be generated.
|
||||
func (i *ServiceInfo) GenerateList() bool {
|
||||
return i.options&NoList == 0
|
||||
}
|
||||
|
||||
// GenerateDelete is true if the method is to be generated.
|
||||
func (i *ServiceInfo) GenerateDelete() bool {
|
||||
return i.options&NoDelete == 0
|
||||
}
|
||||
|
||||
// GenerateInsert is true if the method is to be generated.
|
||||
func (i *ServiceInfo) GenerateInsert() bool {
|
||||
return i.options&NoInsert == 0
|
||||
}
|
||||
|
||||
// GenerateCustomOps is true if we should generated a xxxOps interface for
|
||||
// adding additional methods to the generated interface.
|
||||
func (i *ServiceInfo) GenerateCustomOps() bool {
|
||||
return i.options&CustomOps != 0
|
||||
}
|
||||
|
||||
// AggregatedList is true if the method is to be generated.
|
||||
func (i *ServiceInfo) AggregatedList() bool {
|
||||
return i.options&AggregatedList != 0
|
||||
}
|
||||
|
||||
// AggregatedListField is the name of the field used for the aggregated list
|
||||
// call. This is typically the same as the name of the service, but can be
|
||||
// customized by setting the aggregatedListField field.
|
||||
func (i *ServiceInfo) AggregatedListField() string {
|
||||
if i.aggregatedListField == "" {
|
||||
return i.Service
|
||||
}
|
||||
return i.aggregatedListField
|
||||
}
|
||||
|
||||
// ServiceGroup is a grouping of the same service but at different API versions.
|
||||
type ServiceGroup struct {
|
||||
Alpha *ServiceInfo
|
||||
Beta *ServiceInfo
|
||||
GA *ServiceInfo
|
||||
}
|
||||
|
||||
// Service returns any ServiceInfo object belonging to the ServiceGroup.
|
||||
func (sg *ServiceGroup) Service() string {
|
||||
switch {
|
||||
case sg.GA != nil:
|
||||
return sg.GA.Service
|
||||
case sg.Alpha != nil:
|
||||
return sg.Alpha.Service
|
||||
case sg.Beta != nil:
|
||||
return sg.Beta.Service
|
||||
default:
|
||||
panic(errors.New("service group is empty"))
|
||||
}
|
||||
}
|
||||
|
||||
// HasGA returns true if this object has a GA representation.
|
||||
func (sg *ServiceGroup) HasGA() bool {
|
||||
return sg.GA != nil
|
||||
}
|
||||
|
||||
// HasAlpha returns true if this object has a Alpha representation.
|
||||
func (sg *ServiceGroup) HasAlpha() bool {
|
||||
return sg.Alpha != nil
|
||||
}
|
||||
|
||||
// HasBeta returns true if this object has a Beta representation.
|
||||
func (sg *ServiceGroup) HasBeta() bool {
|
||||
return sg.Beta != nil
|
||||
}
|
||||
|
||||
// groupServices together by version.
|
||||
func groupServices(services []*ServiceInfo) map[string]*ServiceGroup {
|
||||
ret := map[string]*ServiceGroup{}
|
||||
for _, si := range services {
|
||||
if _, ok := ret[si.Service]; !ok {
|
||||
ret[si.Service] = &ServiceGroup{}
|
||||
}
|
||||
group := ret[si.Service]
|
||||
switch si.Version() {
|
||||
case VersionAlpha:
|
||||
group.Alpha = si
|
||||
case VersionBeta:
|
||||
group.Beta = si
|
||||
case VersionGA:
|
||||
group.GA = si
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// AllServicesByGroup is a map of service name to ServicesGroup.
|
||||
var AllServicesByGroup map[string]*ServiceGroup
|
||||
|
||||
func init() {
|
||||
AllServicesByGroup = groupServices(AllServices)
|
||||
}
|
30
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["mock.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce/cloud:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
228
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go
generated
vendored
Normal file
228
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go
generated
vendored
Normal file
@ -0,0 +1,228 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package mock encapsulates mocks for testing GCE provider functionality.
|
||||
// These methods are used to override the mock objects' methods in order to
|
||||
// intercept the standard processing and to add custom logic for test purposes.
|
||||
//
|
||||
// // Example usage:
|
||||
// cloud := cloud.NewMockGCE()
|
||||
// cloud.MockTargetPools.AddInstanceHook = mock.AddInstanceHook
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
alpha "google.golang.org/api/compute/v0.alpha"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
// gceObject is an abstraction of all GCE API object in go client
|
||||
type gceObject interface {
|
||||
MarshalJSON() ([]byte, error)
|
||||
}
|
||||
|
||||
// AddInstanceHook mocks adding a Instance to MockTargetPools
|
||||
func AddInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsAddInstanceRequest, m *cloud.MockTargetPools) error {
|
||||
pool, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in TargetPools", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
for _, instance := range req.Instances {
|
||||
pool.Instances = append(pool.Instances, instance.Instance)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveInstanceHook mocks removing a Instance from MockTargetPools
|
||||
func RemoveInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsRemoveInstanceRequest, m *cloud.MockTargetPools) error {
|
||||
pool, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return &googleapi.Error{
|
||||
Code: http.StatusNotFound,
|
||||
Message: fmt.Sprintf("Key: %s was not found in TargetPools", key.String()),
|
||||
}
|
||||
}
|
||||
|
||||
for _, instanceToRemove := range req.Instances {
|
||||
for i, instance := range pool.Instances {
|
||||
if instanceToRemove.Instance == instance {
|
||||
// Delete instance from pool.Instances without preserving order
|
||||
pool.Instances[i] = pool.Instances[len(pool.Instances)-1]
|
||||
pool.Instances = pool.Instances[:len(pool.Instances)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules map[meta.Key]*cloud.MockForwardingRulesObj, version meta.Version, projectID string) (bool, error) {
|
||||
if !key.Valid() {
|
||||
return false, fmt.Errorf("invalid GCE key (%+v)", key)
|
||||
}
|
||||
|
||||
if _, ok := mRules[*key]; ok {
|
||||
err := &googleapi.Error{
|
||||
Code: http.StatusConflict,
|
||||
Message: fmt.Sprintf("MockForwardingRule %v exists", key),
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
enc, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var fwdRule alpha.ForwardingRule
|
||||
if err := json.Unmarshal(enc, &fwdRule); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Set the default values for the Alpha fields.
|
||||
if fwdRule.NetworkTier == "" {
|
||||
fwdRule.NetworkTier = cloud.NetworkTierDefault.ToGCEValue()
|
||||
}
|
||||
|
||||
fwdRule.Name = key.Name
|
||||
if fwdRule.SelfLink == "" {
|
||||
fwdRule.SelfLink = cloud.SelfLink(version, projectID, "forwardingRules", key)
|
||||
}
|
||||
|
||||
mRules[*key] = &cloud.MockForwardingRulesObj{Obj: fwdRule}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InsertFwdRuleHook mocks inserting a ForwardingRule. ForwardingRules are
|
||||
// expected to default to Premium tier if no NetworkTier is specified.
|
||||
func InsertFwdRuleHook(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) {
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "forwardingRules")
|
||||
return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionGA, projectID)
|
||||
}
|
||||
|
||||
// InsertBetaFwdRuleHook mocks inserting a BetaForwardingRule.
|
||||
func InsertBetaFwdRuleHook(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) {
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "forwardingRules")
|
||||
return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionBeta, projectID)
|
||||
}
|
||||
|
||||
// InsertAlphaFwdRuleHook mocks inserting an AlphaForwardingRule.
|
||||
func InsertAlphaFwdRuleHook(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) {
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionAlpha, "forwardingRules")
|
||||
return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionAlpha, projectID)
|
||||
}
|
||||
|
||||
// Used to assign Addresses with no IP a unique IP address
|
||||
var ipCounter = 1
|
||||
|
||||
func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.Key]*cloud.MockAddressesObj, version meta.Version, projectID string) (bool, error) {
|
||||
if !key.Valid() {
|
||||
return false, fmt.Errorf("invalid GCE key (%+v)", key)
|
||||
}
|
||||
|
||||
if _, ok := mAddrs[*key]; ok {
|
||||
err := &googleapi.Error{
|
||||
Code: http.StatusConflict,
|
||||
Message: fmt.Sprintf("MockAddresses %v exists", key),
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
enc, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var addr alpha.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Set default address type if not present.
|
||||
if addr.AddressType == "" {
|
||||
addr.AddressType = string(cloud.SchemeExternal)
|
||||
}
|
||||
|
||||
var existingAddresses []*ga.Address
|
||||
for _, obj := range mAddrs {
|
||||
existingAddresses = append(existingAddresses, obj.ToGA())
|
||||
}
|
||||
|
||||
for _, existingAddr := range existingAddresses {
|
||||
if addr.Address == existingAddr.Address {
|
||||
msg := fmt.Sprintf("MockAddresses IP %v in use", addr.Address)
|
||||
|
||||
// When the IP is already in use, this call returns a StatusBadRequest
|
||||
// if the address is an external address, and StatusConflict if an
|
||||
// internal address. This is to be consistent with actual GCE API.
|
||||
errorCode := http.StatusConflict
|
||||
if addr.AddressType == string(cloud.SchemeExternal) {
|
||||
errorCode = http.StatusBadRequest
|
||||
}
|
||||
|
||||
return false, &googleapi.Error{Code: errorCode, Message: msg}
|
||||
}
|
||||
}
|
||||
|
||||
// Set default values used in tests
|
||||
addr.Name = key.Name
|
||||
if addr.SelfLink == "" {
|
||||
addr.SelfLink = cloud.SelfLink(version, projectID, "addresses", key)
|
||||
}
|
||||
|
||||
if addr.Address == "" {
|
||||
addr.Address = fmt.Sprintf("1.2.3.%d", ipCounter)
|
||||
ipCounter++
|
||||
}
|
||||
|
||||
// Set the default values for the Alpha fields.
|
||||
if addr.NetworkTier == "" {
|
||||
addr.NetworkTier = cloud.NetworkTierDefault.ToGCEValue()
|
||||
}
|
||||
|
||||
mAddrs[*key] = &cloud.MockAddressesObj{Obj: addr}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InsertAddressHook mocks inserting an Address.
|
||||
func InsertAddressHook(ctx context.Context, key *meta.Key, obj *ga.Address, m *cloud.MockAddresses) (bool, error) {
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "addresses")
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionGA, projectID)
|
||||
}
|
||||
|
||||
// InsertBetaAddressHook mocks inserting a BetaAddress.
|
||||
func InsertBetaAddressHook(ctx context.Context, key *meta.Key, obj *beta.Address, m *cloud.MockAddresses) (bool, error) {
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses")
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionBeta, projectID)
|
||||
}
|
||||
|
||||
// InsertAlphaAddressHook mocks inserting an Address. Addresses are expected to
|
||||
// default to Premium tier if no NetworkTier is specified.
|
||||
func InsertAlphaAddressHook(ctx context.Context, key *meta.Key, obj *alpha.Address, m *cloud.MockAlphaAddresses) (bool, error) {
|
||||
projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses")
|
||||
return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionAlpha, projectID)
|
||||
}
|
151
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock_test.go
generated
vendored
Normal file
151
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock_test.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
alpha "google.golang.org/api/compute/v0.alpha"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
func TestMocks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// This test uses Addresses, but the logic that is generated is the same for
|
||||
// other basic objects.
|
||||
const region = "us-central1"
|
||||
|
||||
ctx := context.Background()
|
||||
pr := &SingleProjectRouter{"mock-project"}
|
||||
mock := NewMockGCE(pr)
|
||||
|
||||
keyAlpha := meta.RegionalKey("key-alpha", region)
|
||||
keyBeta := meta.RegionalKey("key-beta", region)
|
||||
keyGA := meta.RegionalKey("key-ga", region)
|
||||
key := keyAlpha
|
||||
|
||||
// Get not found.
|
||||
if _, err := mock.AlphaAddresses().Get(ctx, key); err == nil {
|
||||
t.Errorf("AlphaAddresses().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.BetaAddresses().Get(ctx, key); err == nil {
|
||||
t.Errorf("BetaAddresses().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.Addresses().Get(ctx, key); err == nil {
|
||||
t.Errorf("Addresses().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
// Insert.
|
||||
{
|
||||
obj := &alpha.Address{}
|
||||
if err := mock.AlphaAddresses().Insert(ctx, keyAlpha, obj); err != nil {
|
||||
t.Errorf("AlphaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &beta.Address{}
|
||||
if err := mock.BetaAddresses().Insert(ctx, keyBeta, obj); err != nil {
|
||||
t.Errorf("BetaAddresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &ga.Address{}
|
||||
if err := mock.Addresses().Insert(ctx, keyGA, &ga.Address{Name: "ga"}); err != nil {
|
||||
t.Errorf("Addresses().Insert(%v, %v, %v) = %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
}
|
||||
// Get across versions.
|
||||
if obj, err := mock.AlphaAddresses().Get(ctx, key); err != nil {
|
||||
t.Errorf("AlphaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.BetaAddresses().Get(ctx, key); err != nil {
|
||||
t.Errorf("BetaAddresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.Addresses().Get(ctx, key); err != nil {
|
||||
t.Errorf("Addresses().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
// List across versions.
|
||||
want := map[string]bool{"key-alpha": true, "key-beta": true, "key-ga": true}
|
||||
{
|
||||
objs, err := mock.AlphaAddresses().List(ctx, region, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("AlphaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.BetaAddresses().List(ctx, region, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("BetaAddresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.Addresses().List(ctx, region, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("Addresses().List(%v, %v, %v) = %v, %v; want _, nil", ctx, region, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Delete across versions.
|
||||
if err := mock.AlphaAddresses().Delete(ctx, keyAlpha); err != nil {
|
||||
t.Errorf("AlphaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err)
|
||||
}
|
||||
if err := mock.BetaAddresses().Delete(ctx, keyBeta); err != nil {
|
||||
t.Errorf("BetaAddresses().Delete(%v, %v) = %v; want nil", ctx, key, err)
|
||||
}
|
||||
if err := mock.Addresses().Delete(ctx, keyGA); err != nil {
|
||||
t.Errorf("Addresses().Delete(%v, %v) = %v; want nil", ctx, key, err)
|
||||
}
|
||||
// Delete not found.
|
||||
if err := mock.AlphaAddresses().Delete(ctx, keyAlpha); err == nil {
|
||||
t.Errorf("AlphaAddresses().Delete(%v, %v) = nil; want error", ctx, key)
|
||||
}
|
||||
if err := mock.BetaAddresses().Delete(ctx, keyBeta); err == nil {
|
||||
t.Errorf("BetaAddresses().Delete(%v, %v) = nil; want error", ctx, key)
|
||||
}
|
||||
if err := mock.Addresses().Delete(ctx, keyGA); err == nil {
|
||||
t.Errorf("Addresses().Delete(%v, %v) = nil; want error", ctx, key)
|
||||
}
|
||||
}
|
172
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go
generated
vendored
Normal file
172
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
alpha "google.golang.org/api/compute/v0.alpha"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
// operation is a GCE operation that can be watied on.
|
||||
type operation interface {
|
||||
// isDone queries GCE for the done status. This call can block.
|
||||
isDone(ctx context.Context) (bool, error)
|
||||
// rateLimitKey returns the rate limit key to use for the given operation.
|
||||
// This rate limit will govern how fast the server will be polled for
|
||||
// operation completion status.
|
||||
rateLimitKey() *RateLimitKey
|
||||
}
|
||||
|
||||
type gaOperation struct {
|
||||
s *Service
|
||||
projectID string
|
||||
key *meta.Key
|
||||
}
|
||||
|
||||
func (o *gaOperation) String() string {
|
||||
return fmt.Sprintf("gaOperation{%q, %v}", o.projectID, o.key)
|
||||
}
|
||||
|
||||
func (o *gaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
var (
|
||||
op *ga.Operation
|
||||
err error
|
||||
)
|
||||
|
||||
switch o.key.Type() {
|
||||
case meta.Regional:
|
||||
op, err = o.s.GA.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
|
||||
case meta.Zonal:
|
||||
op, err = o.s.GA.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
|
||||
case meta.Global:
|
||||
op, err = o.s.GA.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
|
||||
default:
|
||||
return false, fmt.Errorf("invalid key type: %#v", o.key)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return op != nil && op.Status == "DONE", nil
|
||||
}
|
||||
|
||||
func (o *gaOperation) rateLimitKey() *RateLimitKey {
|
||||
return &RateLimitKey{
|
||||
ProjectID: o.projectID,
|
||||
Operation: "Get",
|
||||
Service: "Operations",
|
||||
Version: meta.VersionGA,
|
||||
}
|
||||
}
|
||||
|
||||
type alphaOperation struct {
|
||||
s *Service
|
||||
projectID string
|
||||
key *meta.Key
|
||||
}
|
||||
|
||||
func (o *alphaOperation) String() string {
|
||||
return fmt.Sprintf("alphaOperation{%q, %v}", o.projectID, o.key)
|
||||
}
|
||||
|
||||
func (o *alphaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
var (
|
||||
op *alpha.Operation
|
||||
err error
|
||||
)
|
||||
|
||||
switch o.key.Type() {
|
||||
case meta.Regional:
|
||||
op, err = o.s.Alpha.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
|
||||
case meta.Zonal:
|
||||
op, err = o.s.Alpha.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
|
||||
case meta.Global:
|
||||
op, err = o.s.Alpha.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
|
||||
default:
|
||||
return false, fmt.Errorf("invalid key type: %#v", o.key)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return op != nil && op.Status == "DONE", nil
|
||||
}
|
||||
|
||||
func (o *alphaOperation) rateLimitKey() *RateLimitKey {
|
||||
return &RateLimitKey{
|
||||
ProjectID: o.projectID,
|
||||
Operation: "Get",
|
||||
Service: "Operations",
|
||||
Version: meta.VersionAlpha,
|
||||
}
|
||||
}
|
||||
|
||||
type betaOperation struct {
|
||||
s *Service
|
||||
projectID string
|
||||
key *meta.Key
|
||||
}
|
||||
|
||||
func (o *betaOperation) String() string {
|
||||
return fmt.Sprintf("betaOperation{%q, %v}", o.projectID, o.key)
|
||||
}
|
||||
|
||||
func (o *betaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
var (
|
||||
op *beta.Operation
|
||||
err error
|
||||
)
|
||||
|
||||
switch o.key.Type() {
|
||||
case meta.Regional:
|
||||
op, err = o.s.Beta.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx)
|
||||
case meta.Zonal:
|
||||
op, err = o.s.Beta.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx)
|
||||
case meta.Global:
|
||||
op, err = o.s.Beta.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do()
|
||||
glog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx)
|
||||
default:
|
||||
return false, fmt.Errorf("invalid key type: %#v", o.key)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return op != nil && op.Status == "DONE", nil
|
||||
}
|
||||
|
||||
func (o *betaOperation) rateLimitKey() *RateLimitKey {
|
||||
return &RateLimitKey{
|
||||
ProjectID: o.projectID,
|
||||
Operation: "Get",
|
||||
Service: "Operations",
|
||||
Version: meta.VersionBeta,
|
||||
}
|
||||
}
|
45
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/project.go
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/project.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
// ProjectRouter routes service calls to the appropriate GCE project.
|
||||
type ProjectRouter interface {
|
||||
// ProjectID returns the project ID (non-numeric) to be used for a call
|
||||
// to an API (version,service). Example tuples: ("ga", "ForwardingRules"),
|
||||
// ("alpha", "GlobalAddresses").
|
||||
//
|
||||
// This allows for plumbing different service calls to the appropriate
|
||||
// project, for instance, networking services to a separate project
|
||||
// than instance management.
|
||||
ProjectID(ctx context.Context, version meta.Version, service string) string
|
||||
}
|
||||
|
||||
// SingleProjectRouter routes all service calls to the same project ID.
|
||||
type SingleProjectRouter struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
// ProjectID returns the project ID to be used for a call to the API.
|
||||
func (r *SingleProjectRouter) ProjectID(ctx context.Context, version meta.Version, service string) string {
|
||||
return r.ID
|
||||
}
|
68
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/ratelimit.go
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/ratelimit.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
// RateLimitKey is a key identifying the operation to be rate limited. The rate limit
|
||||
// queue will be determined based on the contents of RateKey.
|
||||
type RateLimitKey struct {
|
||||
// ProjectID is the non-numeric ID of the project.
|
||||
ProjectID string
|
||||
// Operation is the specific method being invoked (e.g. "Get", "List").
|
||||
Operation string
|
||||
// Version is the API version of the call.
|
||||
Version meta.Version
|
||||
// Service is the service being invoked (e.g. "Firewalls", "BackendServices")
|
||||
Service string
|
||||
}
|
||||
|
||||
// RateLimiter is the interface for a rate limiting policy.
|
||||
type RateLimiter interface {
|
||||
// Accept uses the RateLimitKey to derive a sleep time for the calling
|
||||
// goroutine. This call will block until the operation is ready for
|
||||
// execution.
|
||||
//
|
||||
// Accept returns an error if the given context ctx was canceled
|
||||
// while waiting for acceptance into the queue.
|
||||
Accept(ctx context.Context, key *RateLimitKey) error
|
||||
}
|
||||
|
||||
// NopRateLimiter is a rate limiter that performs no rate limiting.
|
||||
type NopRateLimiter struct {
|
||||
}
|
||||
|
||||
// Accept the operation to be rate limited.
|
||||
func (*NopRateLimiter) Accept(ctx context.Context, key *RateLimitKey) error {
|
||||
// Rate limit polling of the Operation status to avoid hammering GCE
|
||||
// for the status of an operation.
|
||||
const pollTime = time.Duration(1) * time.Second
|
||||
if key.Operation == "Get" && key.Service == "Operations" {
|
||||
select {
|
||||
case <-time.NewTimer(pollTime).C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
alpha "google.golang.org/api/compute/v0.alpha"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// Service is the top-level adapter for all of the different compute API
|
||||
// versions.
|
||||
type Service struct {
|
||||
GA *ga.Service
|
||||
Alpha *alpha.Service
|
||||
Beta *beta.Service
|
||||
ProjectRouter ProjectRouter
|
||||
RateLimiter RateLimiter
|
||||
}
|
||||
|
||||
// wrapOperation wraps a GCE anyOP in a version generic operation type.
|
||||
func (s *Service) wrapOperation(anyOp interface{}) (operation, error) {
|
||||
switch o := anyOp.(type) {
|
||||
case *ga.Operation:
|
||||
r, err := ParseResourceURL(o.SelfLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &gaOperation{s, r.ProjectID, r.Key}, nil
|
||||
case *alpha.Operation:
|
||||
r, err := ParseResourceURL(o.SelfLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &alphaOperation{s, r.ProjectID, r.Key}, nil
|
||||
case *beta.Operation:
|
||||
r, err := ParseResourceURL(o.SelfLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &betaOperation{s, r.ProjectID, r.Key}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid type %T", anyOp)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForCompletion of a long running operation. This will poll the state of
|
||||
// GCE for the completion status of the given operation. genericOp can be one
|
||||
// of alpha, beta, ga Operation types.
|
||||
func (s *Service) WaitForCompletion(ctx context.Context, genericOp interface{}) error {
|
||||
op, err := s.wrapOperation(genericOp)
|
||||
if err != nil {
|
||||
glog.Errorf("wrapOperation(%+v) error: %v", genericOp, err)
|
||||
return err
|
||||
}
|
||||
for done, err := op.isDone(ctx); !done; done, err = op.isDone(ctx) {
|
||||
if err != nil {
|
||||
glog.V(4).Infof("op.isDone(%v) error; op = %v, err = %v", ctx, op, err)
|
||||
return err
|
||||
}
|
||||
glog.V(5).Infof("op.isDone(%v) waiting; op = %v", ctx, op)
|
||||
s.RateLimiter.Accept(ctx, op.rateLimitKey())
|
||||
}
|
||||
glog.V(5).Infof("op.isDone(%v) complete; op = %v", ctx, op)
|
||||
return nil
|
||||
}
|
158
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
const (
|
||||
gaPrefix = "https://www.googleapis.com/compute/v1/"
|
||||
alphaPrefix = "https://www.googleapis.com/compute/alpha/"
|
||||
betaPrefix = "https://www.googleapis.com/compute/beta/"
|
||||
)
|
||||
|
||||
// ResourceID identifies a GCE resource as parsed from compute resource URL.
|
||||
type ResourceID struct {
|
||||
ProjectID string
|
||||
Resource string
|
||||
Key *meta.Key
|
||||
}
|
||||
|
||||
// Equal returns true if two resource IDs are equal.
|
||||
func (r *ResourceID) Equal(other *ResourceID) bool {
|
||||
if r.ProjectID != other.ProjectID || r.Resource != other.Resource {
|
||||
return false
|
||||
}
|
||||
if r.Key != nil && other.Key != nil {
|
||||
return *r.Key == *other.Key
|
||||
}
|
||||
if r.Key == nil && other.Key == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParseResourceURL parses resource URLs of the following formats:
|
||||
//
|
||||
// projects/<proj>/global/<res>/<name>
|
||||
// projects/<proj>/regions/<region>/<res>/<name>
|
||||
// projects/<proj>/zones/<zone>/<res>/<name>
|
||||
// [https://www.googleapis.com/compute/<ver>]/projects/<proj>/global/<res>/<name>
|
||||
// [https://www.googleapis.com/compute/<ver>]/projects/<proj>/regions/<region>/<res>/<name>
|
||||
// [https://www.googleapis.com/compute/<ver>]/projects/<proj>/zones/<zone>/<res>/<name>
|
||||
func ParseResourceURL(url string) (*ResourceID, error) {
|
||||
errNotValid := fmt.Errorf("%q is not a valid resource URL", url)
|
||||
|
||||
// Remove the prefix up to ...projects/
|
||||
projectsIndex := strings.Index(url, "/projects/")
|
||||
if projectsIndex >= 0 {
|
||||
url = url[projectsIndex+1:]
|
||||
}
|
||||
|
||||
parts := strings.Split(url, "/")
|
||||
if len(parts) < 2 || parts[0] != "projects" {
|
||||
return nil, errNotValid
|
||||
}
|
||||
|
||||
ret := &ResourceID{ProjectID: parts[1]}
|
||||
if len(parts) == 2 {
|
||||
ret.Resource = "projects"
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
if len(parts) < 4 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
|
||||
if len(parts) == 4 {
|
||||
switch parts[2] {
|
||||
case "regions":
|
||||
ret.Resource = "regions"
|
||||
ret.Key = meta.GlobalKey(parts[3])
|
||||
return ret, nil
|
||||
case "zones":
|
||||
ret.Resource = "zones"
|
||||
ret.Key = meta.GlobalKey(parts[3])
|
||||
return ret, nil
|
||||
default:
|
||||
return nil, errNotValid
|
||||
}
|
||||
}
|
||||
|
||||
switch parts[2] {
|
||||
case "global":
|
||||
if len(parts) != 5 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
ret.Resource = parts[3]
|
||||
ret.Key = meta.GlobalKey(parts[4])
|
||||
return ret, nil
|
||||
case "regions":
|
||||
if len(parts) != 6 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
ret.Resource = parts[4]
|
||||
ret.Key = meta.RegionalKey(parts[5], parts[3])
|
||||
return ret, nil
|
||||
case "zones":
|
||||
if len(parts) != 6 {
|
||||
return nil, errNotValid
|
||||
}
|
||||
ret.Resource = parts[4]
|
||||
ret.Key = meta.ZonalKey(parts[5], parts[3])
|
||||
return ret, nil
|
||||
}
|
||||
return nil, errNotValid
|
||||
}
|
||||
|
||||
func copyViaJSON(dest, src interface{}) error {
|
||||
bytes, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(bytes, dest)
|
||||
}
|
||||
|
||||
// SelfLink returns the self link URL for the given object.
|
||||
func SelfLink(ver meta.Version, project, resource string, key *meta.Key) string {
|
||||
var prefix string
|
||||
switch ver {
|
||||
case meta.VersionAlpha:
|
||||
prefix = alphaPrefix
|
||||
case meta.VersionBeta:
|
||||
prefix = betaPrefix
|
||||
case meta.VersionGA:
|
||||
prefix = gaPrefix
|
||||
default:
|
||||
prefix = "invalid-prefix"
|
||||
}
|
||||
|
||||
switch key.Type() {
|
||||
case meta.Zonal:
|
||||
return fmt.Sprintf("%sprojects/%s/zones/%s/%s/%s", prefix, project, key.Zone, resource, key.Name)
|
||||
case meta.Regional:
|
||||
return fmt.Sprintf("%sprojects/%s/regions/%s/%s/%s", prefix, project, key.Region, resource, key.Name)
|
||||
case meta.Global:
|
||||
return fmt.Sprintf("%sprojects/%s/%s/%s", prefix, project, resource, key.Name)
|
||||
}
|
||||
return "invalid-self-link"
|
||||
}
|
208
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils_test.go
generated
vendored
Normal file
208
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/utils_test.go
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
func TestParseResourceURL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
in string
|
||||
r *ResourceID
|
||||
}{
|
||||
{
|
||||
"https://www.googleapis.com/compute/v1/projects/some-gce-project",
|
||||
&ResourceID{"some-gce-project", "projects", nil},
|
||||
},
|
||||
{
|
||||
"https://www.googleapis.com/compute/v1/projects/some-gce-project/regions/us-central1",
|
||||
&ResourceID{"some-gce-project", "regions", meta.GlobalKey("us-central1")},
|
||||
},
|
||||
{
|
||||
"https://www.googleapis.com/compute/v1/projects/some-gce-project/zones/us-central1-b",
|
||||
&ResourceID{"some-gce-project", "zones", meta.GlobalKey("us-central1-b")},
|
||||
},
|
||||
{
|
||||
"https://www.googleapis.com/compute/v1/projects/some-gce-project/global/operations/operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf",
|
||||
&ResourceID{"some-gce-project", "operations", meta.GlobalKey("operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf")},
|
||||
},
|
||||
{
|
||||
"https://www.googleapis.com/compute/alpha/projects/some-gce-project/regions/us-central1/addresses/my-address",
|
||||
&ResourceID{"some-gce-project", "addresses", meta.RegionalKey("my-address", "us-central1")},
|
||||
},
|
||||
{
|
||||
"https://www.googleapis.com/compute/v1/projects/some-gce-project/zones/us-central1-c/instances/instance-1",
|
||||
&ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "us-central1-c")},
|
||||
},
|
||||
{
|
||||
"http://localhost:3990/compute/beta/projects/some-gce-project/global/operations/operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf",
|
||||
&ResourceID{"some-gce-project", "operations", meta.GlobalKey("operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf")},
|
||||
},
|
||||
{
|
||||
"http://localhost:3990/compute/alpha/projects/some-gce-project/regions/dev-central1/addresses/my-address",
|
||||
&ResourceID{"some-gce-project", "addresses", meta.RegionalKey("my-address", "dev-central1")},
|
||||
},
|
||||
{
|
||||
"http://localhost:3990/compute/v1/projects/some-gce-project/zones/dev-central1-std/instances/instance-1",
|
||||
&ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "dev-central1-std")},
|
||||
},
|
||||
{
|
||||
"projects/some-gce-project",
|
||||
&ResourceID{"some-gce-project", "projects", nil},
|
||||
},
|
||||
{
|
||||
"projects/some-gce-project/regions/us-central1",
|
||||
&ResourceID{"some-gce-project", "regions", meta.GlobalKey("us-central1")},
|
||||
},
|
||||
{
|
||||
"projects/some-gce-project/zones/us-central1-b",
|
||||
&ResourceID{"some-gce-project", "zones", meta.GlobalKey("us-central1-b")},
|
||||
},
|
||||
{
|
||||
"projects/some-gce-project/global/operations/operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf",
|
||||
&ResourceID{"some-gce-project", "operations", meta.GlobalKey("operation-1513289952196-56054460af5a0-b1dae0c3-9bbf9dbf")},
|
||||
},
|
||||
{
|
||||
"projects/some-gce-project/regions/us-central1/addresses/my-address",
|
||||
&ResourceID{"some-gce-project", "addresses", meta.RegionalKey("my-address", "us-central1")},
|
||||
},
|
||||
{
|
||||
"projects/some-gce-project/zones/us-central1-c/instances/instance-1",
|
||||
&ResourceID{"some-gce-project", "instances", meta.ZonalKey("instance-1", "us-central1-c")},
|
||||
},
|
||||
} {
|
||||
r, err := ParseResourceURL(tc.in)
|
||||
if err != nil {
|
||||
t.Errorf("ParseResourceURL(%q) = %+v, %v; want _, nil", tc.in, r, err)
|
||||
continue
|
||||
}
|
||||
if !r.Equal(tc.r) {
|
||||
t.Errorf("ParseResourceURL(%q) = %+v, nil; want %+v, nil", tc.in, r, tc.r)
|
||||
}
|
||||
}
|
||||
// Malformed URLs.
|
||||
for _, tc := range []string{
|
||||
"",
|
||||
"/",
|
||||
"/a",
|
||||
"/a/b",
|
||||
"/a/b/c",
|
||||
"/a/b/c/d",
|
||||
"/a/b/c/d/e",
|
||||
"/a/b/c/d/e/f",
|
||||
"https://www.googleapis.com/compute/v1/projects/some-gce-project/global",
|
||||
"projects/some-gce-project/global",
|
||||
"projects/some-gce-project/global/foo/bar/baz",
|
||||
"projects/some-gce-project/zones/us-central1-c/res",
|
||||
"projects/some-gce-project/zones/us-central1-c/res/name/extra",
|
||||
} {
|
||||
r, err := ParseResourceURL(tc)
|
||||
if err == nil {
|
||||
t.Errorf("ParseResourceURL(%q) = %+v, %v, want _, error", tc, r, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type A struct {
|
||||
A, B, C string
|
||||
}
|
||||
|
||||
type B struct {
|
||||
A, B, D string
|
||||
}
|
||||
|
||||
type E struct{}
|
||||
|
||||
func (*E) MarshalJSON() ([]byte, error) {
|
||||
return nil, errors.New("injected error")
|
||||
}
|
||||
|
||||
func TestCopyVisJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var b B
|
||||
srcA := &A{"aa", "bb", "cc"}
|
||||
err := copyViaJSON(&b, srcA)
|
||||
if err != nil {
|
||||
t.Errorf(`copyViaJSON(&b, %+v) = %v, want nil`, srcA, err)
|
||||
} else {
|
||||
expectedB := B{"aa", "bb", ""}
|
||||
if b != expectedB {
|
||||
t.Errorf("b == %+v, want %+v", b, expectedB)
|
||||
}
|
||||
}
|
||||
|
||||
var a A
|
||||
srcB := &B{"aaa", "bbb", "ccc"}
|
||||
err = copyViaJSON(&a, srcB)
|
||||
if err != nil {
|
||||
t.Errorf(`copyViaJSON(&a, %+v) = %v, want nil`, srcB, err)
|
||||
} else {
|
||||
expectedA := A{"aaa", "bbb", ""}
|
||||
if a != expectedA {
|
||||
t.Errorf("a == %+v, want %+v", a, expectedA)
|
||||
}
|
||||
}
|
||||
|
||||
if err := copyViaJSON(&a, &E{}); err == nil {
|
||||
t.Errorf("copyViaJSON(&a, &E{}) = nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelfLink(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
ver meta.Version
|
||||
project string
|
||||
resource string
|
||||
key *meta.Key
|
||||
want string
|
||||
}{
|
||||
{
|
||||
meta.VersionAlpha,
|
||||
"proj1",
|
||||
"addresses",
|
||||
meta.RegionalKey("key1", "us-central1"),
|
||||
"https://www.googleapis.com/compute/alpha/projects/proj1/regions/us-central1/addresses/key1",
|
||||
},
|
||||
{
|
||||
meta.VersionBeta,
|
||||
"proj3",
|
||||
"disks",
|
||||
meta.ZonalKey("key2", "us-central1-b"),
|
||||
"https://www.googleapis.com/compute/beta/projects/proj3/zones/us-central1-b/disks/key2",
|
||||
},
|
||||
{
|
||||
meta.VersionGA,
|
||||
"proj4",
|
||||
"urlMaps",
|
||||
meta.GlobalKey("key3"),
|
||||
"https://www.googleapis.com/compute/v1/projects/proj4/urlMaps/key3",
|
||||
},
|
||||
} {
|
||||
if link := SelfLink(tc.ver, tc.project, tc.resource, tc.key); link != tc.want {
|
||||
t.Errorf("SelfLink(%v, %q, %q, %v) = %v, want %q", tc.ver, tc.project, tc.resource, tc.key, link, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
98
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
98
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -31,6 +30,13 @@ import (
|
||||
gcfg "gopkg.in/gcfg.v1"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
container "google.golang.org/api/container/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -42,18 +48,12 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
container "google.golang.org/api/container/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -107,6 +107,7 @@ type GCECloud struct {
|
||||
serviceBeta *computebeta.Service
|
||||
serviceAlpha *computealpha.Service
|
||||
containerService *container.Service
|
||||
tpuService *tpuService
|
||||
client clientset.Interface
|
||||
clientBuilder controller.ControllerClientBuilder
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
@ -148,6 +149,9 @@ type GCECloud struct {
|
||||
// the corresponding api is enabled.
|
||||
// If not enabled, it should return error.
|
||||
AlphaFeatureGate *AlphaFeatureGate
|
||||
|
||||
// New code generated interface to the GCE compute library.
|
||||
c cloud.Cloud
|
||||
}
|
||||
|
||||
// TODO: replace gcfg with json
|
||||
@ -213,9 +217,22 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Raw access to the underlying GCE service, probably should only be used for e2e tests
|
||||
func (g *GCECloud) GetComputeService() *compute.Service {
|
||||
return g.service
|
||||
// Services is the set of all versions of the compute service.
|
||||
type Services struct {
|
||||
// GA, Alpha, Beta versions of the compute API.
|
||||
GA *compute.Service
|
||||
Alpha *computealpha.Service
|
||||
Beta *computebeta.Service
|
||||
}
|
||||
|
||||
// ComputeServices returns access to the internal compute services.
|
||||
func (g *GCECloud) ComputeServices() *Services {
|
||||
return &Services{g.service, g.serviceAlpha, g.serviceBeta}
|
||||
}
|
||||
|
||||
// Compute returns the generated stubs for the compute API.
|
||||
func (g *GCECloud) Compute() cloud.Cloud {
|
||||
return g.c
|
||||
}
|
||||
|
||||
// newGCECloud creates a new instance of GCECloud.
|
||||
@ -236,7 +253,6 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) {
|
||||
return nil, err
|
||||
}
|
||||
return CreateGCECloud(cloudConfig)
|
||||
|
||||
}
|
||||
|
||||
func readConfig(reader io.Reader) (*ConfigFile, error) {
|
||||
@ -356,11 +372,12 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
|
||||
// If no tokenSource is specified, uses oauth2.DefaultTokenSource.
|
||||
// If managedZones is nil / empty all zones in the region will be managed.
|
||||
func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
// Remove any pre-release version and build metadata from the semver, leaving only the MAJOR.MINOR.PATCH portion.
|
||||
// See http://semver.org/.
|
||||
// Remove any pre-release version and build metadata from the semver,
|
||||
// leaving only the MAJOR.MINOR.PATCH portion. See http://semver.org/.
|
||||
version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v")
|
||||
|
||||
// Create a user-agent header append string to supply to the Google API clients, to identify Kubernetes as the origin of the GCP API calls.
|
||||
// Create a user-agent header append string to supply to the Google API
|
||||
// clients, to identify Kubernetes as the origin of the GCP API calls.
|
||||
userAgent := fmt.Sprintf("Kubernetes/%s (%s %s)", version, runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
// Use ProjectID for NetworkProjectID, if it wasn't explicitly set.
|
||||
@ -414,6 +431,11 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
}
|
||||
containerService.UserAgent = userAgent
|
||||
|
||||
tpuService, err := newTPUService(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ProjectID and.NetworkProjectID may be project number or name.
|
||||
projID, netProjID := tryConvertToProjectNames(config.ProjectID, config.NetworkProjectID, service)
|
||||
onXPN := projID != netProjID
|
||||
@ -480,6 +502,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
serviceAlpha: serviceAlpha,
|
||||
serviceBeta: serviceBeta,
|
||||
containerService: containerService,
|
||||
tpuService: tpuService,
|
||||
projectID: projID,
|
||||
networkProjectID: netProjID,
|
||||
onXPN: onXPN,
|
||||
@ -499,6 +522,13 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
}
|
||||
|
||||
gce.manager = &gceServiceManager{gce}
|
||||
gce.c = cloud.NewGCE(&cloud.Service{
|
||||
GA: service,
|
||||
Alpha: serviceAlpha,
|
||||
Beta: serviceBeta,
|
||||
ProjectRouter: &gceProjectRouter{gce},
|
||||
RateLimiter: &gceRateLimiter{gce},
|
||||
})
|
||||
|
||||
return gce, nil
|
||||
}
|
||||
@ -694,20 +724,6 @@ func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) {
|
||||
}
|
||||
}
|
||||
|
||||
// Known-useless DNS search path.
|
||||
var uselessDNSSearchRE = regexp.MustCompile(`^[0-9]+.google.internal.$`)
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (gce *GCECloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
// GCE has too many search paths by default. Filter the ones we know are useless.
|
||||
for _, s := range searches {
|
||||
if !uselessDNSSearchRE.MatchString(s) {
|
||||
srchOut = append(srchOut, s)
|
||||
}
|
||||
}
|
||||
return nameservers, srchOut
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (gce *GCECloud) HasClusterID() bool {
|
||||
return true
|
||||
@ -737,20 +753,6 @@ func gceSubnetworkURL(apiEndpoint, project, region, subnetwork string) string {
|
||||
return apiEndpoint + strings.Join([]string{"projects", project, "regions", region, "subnetworks", subnetwork}, "/")
|
||||
}
|
||||
|
||||
// getProjectIDInURL parses full resource URLS and shorter URLS
|
||||
// https://www.googleapis.com/compute/v1/projects/myproject/global/networks/mycustom
|
||||
// projects/myproject/global/networks/mycustom
|
||||
// All return "myproject"
|
||||
func getProjectIDInURL(urlStr string) (string, error) {
|
||||
fields := strings.Split(urlStr, "/")
|
||||
for i, v := range fields {
|
||||
if v == "projects" && i < len(fields)-1 {
|
||||
return fields[i+1], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("could not find project field in url: %v", urlStr)
|
||||
}
|
||||
|
||||
// getRegionInURL parses full resource URLS and shorter URLS
|
||||
// https://www.googleapis.com/compute/v1/projects/myproject/regions/us-central1/subnetworks/a
|
||||
// projects/myproject/regions/us-central1/subnetworks/a
|
||||
@ -848,7 +850,13 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
|
||||
glog.Infof("Using existing Token Source %#v", tokenSource)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
backoff := wait.Backoff{
|
||||
// These values will add up to about a minute. See #56293 for background.
|
||||
Duration: time.Second,
|
||||
Factor: 1.4,
|
||||
Steps: 10,
|
||||
}
|
||||
if err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
if _, err := tokenSource.Token(); err != nil {
|
||||
glog.Errorf("error fetching initial token: %v", err)
|
||||
return false, nil
|
||||
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go
generated
vendored
@ -20,9 +20,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
)
|
||||
|
||||
type addressManager struct {
|
||||
@ -31,13 +32,13 @@ type addressManager struct {
|
||||
name string
|
||||
serviceName string
|
||||
targetIP string
|
||||
addressType lbScheme
|
||||
addressType cloud.LbScheme
|
||||
region string
|
||||
subnetURL string
|
||||
tryRelease bool
|
||||
}
|
||||
|
||||
func newAddressManager(svc CloudAddressService, serviceName, region, subnetURL, name, targetIP string, addressType lbScheme) *addressManager {
|
||||
func newAddressManager(svc CloudAddressService, serviceName, region, subnetURL, name, targetIP string, addressType cloud.LbScheme) *addressManager {
|
||||
return &addressManager{
|
||||
svc: svc,
|
||||
logPrefix: fmt.Sprintf("AddressManager(%q)", name),
|
||||
@ -63,7 +64,7 @@ func (am *addressManager) HoldAddress() (string, error) {
|
||||
// calls since it indicates whether a Delete is necessary before Reserve.
|
||||
glog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType)
|
||||
// Get the address in case it was orphaned earlier
|
||||
addr, err := am.svc.GetBetaRegionAddress(am.name, am.region)
|
||||
addr, err := am.svc.GetRegionAddress(am.name, am.region)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return "", err
|
||||
}
|
||||
@ -118,7 +119,7 @@ func (am *addressManager) ReleaseAddress() error {
|
||||
func (am *addressManager) ensureAddressReservation() (string, error) {
|
||||
// Try reserving the IP with controller-owned address name
|
||||
// If am.targetIP is an empty string, a new IP will be created.
|
||||
newAddr := &computebeta.Address{
|
||||
newAddr := &compute.Address{
|
||||
Name: am.name,
|
||||
Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, am.serviceName),
|
||||
Address: am.targetIP,
|
||||
@ -126,7 +127,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
|
||||
Subnetwork: am.subnetURL,
|
||||
}
|
||||
|
||||
reserveErr := am.svc.ReserveBetaRegionAddress(newAddr, am.region)
|
||||
reserveErr := am.svc.ReserveRegionAddress(newAddr, am.region)
|
||||
if reserveErr == nil {
|
||||
if newAddr.Address != "" {
|
||||
glog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name)
|
||||
@ -155,7 +156,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
|
||||
|
||||
// Reserving the address failed due to a conflict or bad request. The address manager just checked that no address
|
||||
// exists with the name, so it may belong to the user.
|
||||
addr, err := am.svc.GetBetaRegionAddressByIP(am.region, am.targetIP)
|
||||
addr, err := am.svc.GetRegionAddressByIP(am.region, am.targetIP)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get address by IP %q after reservation attempt, err: %q, reservation err: %q", am.targetIP, err, reserveErr)
|
||||
}
|
||||
@ -178,7 +179,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
|
||||
return addr.Address, nil
|
||||
}
|
||||
|
||||
func (am *addressManager) validateAddress(addr *computebeta.Address) error {
|
||||
func (am *addressManager) validateAddress(addr *compute.Address) error {
|
||||
if am.targetIP != "" && am.targetIP != addr.Address {
|
||||
return fmt.Errorf("address %q does not have the expected IP %q, actual: %q", addr.Name, am.targetIP, addr.Address)
|
||||
}
|
||||
@ -189,7 +190,7 @@ func (am *addressManager) validateAddress(addr *computebeta.Address) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *addressManager) isManagedAddress(addr *computebeta.Address) bool {
|
||||
func (am *addressManager) isManagedAddress(addr *compute.Address) bool {
|
||||
return addr.Name == am.name
|
||||
}
|
||||
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager_test.go
generated
vendored
@ -21,7 +21,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
)
|
||||
|
||||
const testSvcName = "my-service"
|
||||
@ -34,8 +35,8 @@ func TestAddressManagerNoRequestedIP(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := ""
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
@ -44,8 +45,8 @@ func TestAddressManagerBasic(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
@ -55,12 +56,12 @@ func TestAddressManagerOrphaned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: testLBName, Address: targetIP, AddressType: string(schemeInternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
addr := &compute.Address{Name: testLBName, Address: targetIP, AddressType: string(cloud.SchemeInternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
@ -71,12 +72,12 @@ func TestAddressManagerOutdatedOrphan(t *testing.T) {
|
||||
previousAddress := "1.1.0.0"
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: testLBName, Address: previousAddress, AddressType: string(schemeExternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
addr := &compute.Address{Name: testLBName, Address: previousAddress, AddressType: string(cloud.SchemeExternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
@ -86,11 +87,11 @@ func TestAddressManagerExternallyOwned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeInternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(cloud.SchemeInternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
ipToUse, err := mgr.HoldAddress()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, ipToUse)
|
||||
@ -107,11 +108,11 @@ func TestAddressManagerBadExternallyOwned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeExternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(cloud.SchemeExternal)}
|
||||
err := svc.ReserveRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal)
|
||||
_, err = mgr.HoldAddress()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
@ -121,7 +122,7 @@ func testHoldAddress(t *testing.T, mgr *addressManager, svc CloudAddressService,
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, ipToUse)
|
||||
|
||||
addr, err := svc.GetBetaRegionAddress(name, region)
|
||||
addr, err := svc.GetRegionAddress(name, region)
|
||||
require.NoError(t, err)
|
||||
if targetIP != "" {
|
||||
assert.EqualValues(t, targetIP, addr.Address)
|
||||
@ -132,6 +133,6 @@ func testHoldAddress(t *testing.T, mgr *addressManager, svc CloudAddressService,
|
||||
func testReleaseAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, name, region string) {
|
||||
err := mgr.ReleaseAddress()
|
||||
require.NoError(t, err)
|
||||
_, err = svc.GetBetaRegionAddress(name, region)
|
||||
_, err = svc.GetRegionAddress(name, region)
|
||||
assert.True(t, isNotFound(err))
|
||||
}
|
||||
|
104
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go
generated
vendored
104
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -24,6 +25,10 @@ import (
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
func newAddressMetricContext(request, region string) *metricContext {
|
||||
@ -40,110 +45,81 @@ func newAddressMetricContextWithVersion(request, region, version string) *metric
|
||||
// ephemeral IP associated with a global forwarding rule.
|
||||
func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error {
|
||||
mc := newAddressMetricContext("reserve", "")
|
||||
op, err := gce.service.GlobalAddresses.Insert(gce.projectID, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
return mc.Observe(gce.c.GlobalAddresses().Insert(context.Background(), meta.GlobalKey(addr.Name), addr))
|
||||
}
|
||||
|
||||
// DeleteGlobalAddress deletes a global address by name.
|
||||
func (gce *GCECloud) DeleteGlobalAddress(name string) error {
|
||||
mc := newAddressMetricContext("delete", "")
|
||||
op, err := gce.service.GlobalAddresses.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
return mc.Observe(gce.c.GlobalAddresses().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// GetGlobalAddress returns the global address by name.
|
||||
func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) {
|
||||
mc := newAddressMetricContext("get", "")
|
||||
v, err := gce.service.GlobalAddresses.Get(gce.projectID, name).Do()
|
||||
v, err := gce.c.GlobalAddresses().Get(context.Background(), meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ReserveRegionAddress creates a region address
|
||||
func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error {
|
||||
mc := newAddressMetricContext("reserve", region)
|
||||
op, err := gce.service.Addresses.Insert(gce.projectID, region, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
return mc.Observe(gce.c.Addresses().Insert(context.Background(), meta.RegionalKey(addr.Name, region), addr))
|
||||
}
|
||||
|
||||
// ReserveAlphaRegionAddress creates an Alpha, regional address.
|
||||
func (gce *GCECloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error {
|
||||
mc := newAddressMetricContextWithVersion("reserve", region, computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.Addresses.Insert(gce.projectID, region, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
mc := newAddressMetricContext("reserve", region)
|
||||
return mc.Observe(gce.c.AlphaAddresses().Insert(context.Background(), meta.RegionalKey(addr.Name, region), addr))
|
||||
}
|
||||
|
||||
// ReserveBetaRegionAddress creates a beta region address
|
||||
func (gce *GCECloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error {
|
||||
mc := newAddressMetricContextWithVersion("reserve", region, computeBetaVersion)
|
||||
op, err := gce.serviceBeta.Addresses.Insert(gce.projectID, region, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
mc := newAddressMetricContext("reserve", region)
|
||||
return mc.Observe(gce.c.BetaAddresses().Insert(context.Background(), meta.RegionalKey(addr.Name, region), addr))
|
||||
}
|
||||
|
||||
// DeleteRegionAddress deletes a region address by name.
|
||||
func (gce *GCECloud) DeleteRegionAddress(name, region string) error {
|
||||
mc := newAddressMetricContext("delete", region)
|
||||
op, err := gce.service.Addresses.Delete(gce.projectID, region, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
return mc.Observe(gce.c.Addresses().Delete(context.Background(), meta.RegionalKey(name, region)))
|
||||
}
|
||||
|
||||
// GetRegionAddress returns the region address by name
|
||||
func (gce *GCECloud) GetRegionAddress(name, region string) (*compute.Address, error) {
|
||||
mc := newAddressMetricContext("get", region)
|
||||
v, err := gce.service.Addresses.Get(gce.projectID, region, name).Do()
|
||||
v, err := gce.c.Addresses().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaRegionAddress returns the Alpha, regional address by name.
|
||||
func (gce *GCECloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) {
|
||||
mc := newAddressMetricContextWithVersion("get", region, computeAlphaVersion)
|
||||
v, err := gce.serviceAlpha.Addresses.Get(gce.projectID, region, name).Do()
|
||||
mc := newAddressMetricContext("get", region)
|
||||
v, err := gce.c.AlphaAddresses().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetBetaRegionAddress returns the beta region address by name
|
||||
func (gce *GCECloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) {
|
||||
mc := newAddressMetricContextWithVersion("get", region, computeBetaVersion)
|
||||
v, err := gce.serviceBeta.Addresses.Get(gce.projectID, region, name).Do()
|
||||
mc := newAddressMetricContext("get", region)
|
||||
v, err := gce.c.BetaAddresses().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionAddressByIP returns the regional address matching the given IP address.
|
||||
func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) {
|
||||
mc := newAddressMetricContext("list", region)
|
||||
addrs, err := gce.service.Addresses.List(gce.projectID, region).Filter("address eq " + ipAddress).Do()
|
||||
// Record the metrics for the call.
|
||||
addrs, err := gce.c.Addresses().List(context.Background(), region, filter.Regexp("address", ipAddress))
|
||||
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(addrs.Items) > 1 {
|
||||
// We don't expect more than one match.
|
||||
addrsToPrint := []compute.Address{}
|
||||
for _, addr := range addrs.Items {
|
||||
addrsToPrint = append(addrsToPrint, *addr)
|
||||
}
|
||||
glog.Errorf("More than one addresses matching the IP %q: %+v", ipAddress, addrsToPrint)
|
||||
if len(addrs) > 1 {
|
||||
glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs))
|
||||
}
|
||||
for _, addr := range addrs.Items {
|
||||
for _, addr := range addrs {
|
||||
if addr.Address == ipAddress {
|
||||
return addr, nil
|
||||
}
|
||||
@ -154,22 +130,17 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad
|
||||
// GetBetaRegionAddressByIP returns the beta regional address matching the given IP address.
|
||||
func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) {
|
||||
mc := newAddressMetricContext("list", region)
|
||||
addrs, err := gce.serviceBeta.Addresses.List(gce.projectID, region).Filter("address eq " + ipAddress).Do()
|
||||
// Record the metrics for the call.
|
||||
addrs, err := gce.c.BetaAddresses().List(context.Background(), region, filter.Regexp("address", ipAddress))
|
||||
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(addrs.Items) > 1 {
|
||||
// We don't expect more than one match.
|
||||
addrsToPrint := []computebeta.Address{}
|
||||
for _, addr := range addrs.Items {
|
||||
addrsToPrint = append(addrsToPrint, *addr)
|
||||
}
|
||||
glog.Errorf("More than one addresses matching the IP %q: %+v", ipAddress, addrsToPrint)
|
||||
if len(addrs) > 1 {
|
||||
glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs))
|
||||
}
|
||||
for _, addr := range addrs.Items {
|
||||
for _, addr := range addrs {
|
||||
if addr.Address == ipAddress {
|
||||
return addr, nil
|
||||
}
|
||||
@ -180,7 +151,7 @@ func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*comput
|
||||
// TODO(#51665): retire this function once Network Tiers becomes Beta in GCP.
|
||||
func (gce *GCECloud) getNetworkTierFromAddress(name, region string) (string, error) {
|
||||
if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) {
|
||||
return NetworkTierDefault.ToGCEValue(), nil
|
||||
return cloud.NetworkTierDefault.ToGCEValue(), nil
|
||||
}
|
||||
addr, err := gce.GetAlphaRegionAddress(name, region)
|
||||
if err != nil {
|
||||
@ -188,3 +159,18 @@ func (gce *GCECloud) getNetworkTierFromAddress(name, region string) (string, err
|
||||
}
|
||||
return addr.NetworkTier, nil
|
||||
}
|
||||
|
||||
func addrNames(items interface{}) []string {
|
||||
var ret []string
|
||||
switch items := items.(type) {
|
||||
case []compute.Address:
|
||||
for _, a := range items {
|
||||
ret = append(ret, a.Name)
|
||||
}
|
||||
case []computebeta.Address:
|
||||
for _, a := range items {
|
||||
ret = append(ret, a.Name)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
)
|
||||
|
||||
// test
|
||||
@ -49,7 +51,7 @@ func NewFakeCloudAddressService() *FakeCloudAddressService {
|
||||
}
|
||||
}
|
||||
|
||||
// SetRegionalAddresses sets the addresses of ther region. This is used for
|
||||
// SetRegionalAddresses sets the addresses of there region. This is used for
|
||||
// setting the test environment.
|
||||
func (cas *FakeCloudAddressService) SetRegionalAddresses(region string, addrs []*computealpha.Address) {
|
||||
// Reset addresses in the region.
|
||||
@ -68,7 +70,7 @@ func (cas *FakeCloudAddressService) ReserveAlphaRegionAddress(addr *computealpha
|
||||
}
|
||||
|
||||
if addr.AddressType == "" {
|
||||
addr.AddressType = string(schemeExternal)
|
||||
addr.AddressType = string(cloud.SchemeExternal)
|
||||
}
|
||||
|
||||
if cas.reservedAddrs[addr.Address] {
|
||||
@ -76,8 +78,8 @@ func (cas *FakeCloudAddressService) ReserveAlphaRegionAddress(addr *computealpha
|
||||
// When the IP is already in use, this call returns an error code based
|
||||
// on the type (internal vs external) of the address. This is to be
|
||||
// consistent with actual GCE API.
|
||||
switch lbScheme(addr.AddressType) {
|
||||
case schemeExternal:
|
||||
switch cloud.LbScheme(addr.AddressType) {
|
||||
case cloud.SchemeExternal:
|
||||
return makeGoogleAPIError(http.StatusBadRequest, msg)
|
||||
default:
|
||||
return makeGoogleAPIError(http.StatusConflict, msg)
|
||||
@ -209,7 +211,7 @@ func convertToAlphaAddress(object gceObject) *computealpha.Address {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to alpha address: %v", object, err))
|
||||
}
|
||||
// Set the default values for the Alpha fields.
|
||||
addr.NetworkTier = NetworkTierDefault.ToGCEValue()
|
||||
addr.NetworkTier = cloud.NetworkTierDefault.ToGCEValue()
|
||||
return &addr
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
@ -29,15 +29,12 @@ const (
|
||||
// tier to use. Currently supports "Standard" and "Premium" (default).
|
||||
AlphaFeatureNetworkTiers = "NetworkTiers"
|
||||
|
||||
AlphaFeatureGCEDisk = "DiskAlphaAPI"
|
||||
|
||||
AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup"
|
||||
)
|
||||
|
||||
// All known alpha features
|
||||
var knownAlphaFeatures = map[string]bool{
|
||||
AlphaFeatureNetworkTiers: true,
|
||||
AlphaFeatureGCEDisk: true,
|
||||
AlphaFeatureNetworkEndpointGroup: true,
|
||||
}
|
||||
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go
generated
vendored
@ -18,15 +18,14 @@ package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
)
|
||||
|
||||
type LoadBalancerType string
|
||||
type NetworkTier string
|
||||
|
||||
const (
|
||||
// ServiceAnnotationLoadBalancerType is annotated on a service with type LoadBalancer
|
||||
@ -49,12 +48,8 @@ const (
|
||||
// network tier a GCP LB should use. The valid values are "Standard" and
|
||||
// "Premium" (default).
|
||||
NetworkTierAnnotationKey = "cloud.google.com/network-tier"
|
||||
NetworkTierAnnotationStandard = "Standard"
|
||||
NetworkTierAnnotationPremium = "Premium"
|
||||
|
||||
NetworkTierStandard NetworkTier = NetworkTierAnnotationStandard
|
||||
NetworkTierPremium NetworkTier = NetworkTierAnnotationPremium
|
||||
NetworkTierDefault NetworkTier = NetworkTierPremium
|
||||
NetworkTierAnnotationStandard = cloud.NetworkTierStandard
|
||||
NetworkTierAnnotationPremium = cloud.NetworkTierPremium
|
||||
)
|
||||
|
||||
// GetLoadBalancerAnnotationType returns the type of GCP load balancer which should be assembled.
|
||||
@ -97,38 +92,19 @@ func GetLoadBalancerAnnotationBackendShare(service *v1.Service) bool {
|
||||
// GetServiceNetworkTier returns the network tier of GCP load balancer
|
||||
// which should be assembled, and an error if the specified tier is not
|
||||
// supported.
|
||||
func GetServiceNetworkTier(service *v1.Service) (NetworkTier, error) {
|
||||
func GetServiceNetworkTier(service *v1.Service) (cloud.NetworkTier, error) {
|
||||
l, ok := service.Annotations[NetworkTierAnnotationKey]
|
||||
if !ok {
|
||||
return NetworkTierDefault, nil
|
||||
return cloud.NetworkTierDefault, nil
|
||||
}
|
||||
|
||||
v := NetworkTier(l)
|
||||
v := cloud.NetworkTier(l)
|
||||
switch v {
|
||||
case NetworkTierStandard:
|
||||
case cloud.NetworkTierStandard:
|
||||
fallthrough
|
||||
case NetworkTierPremium:
|
||||
case cloud.NetworkTierPremium:
|
||||
return v, nil
|
||||
default:
|
||||
return NetworkTierDefault, fmt.Errorf("unsupported network tier: %q", v)
|
||||
}
|
||||
}
|
||||
|
||||
// ToGCEValue converts NetworkTier to a string that we can populate the
|
||||
// NetworkTier field of GCE objects.
|
||||
func (n NetworkTier) ToGCEValue() string {
|
||||
return strings.ToUpper(string(n))
|
||||
}
|
||||
|
||||
// NetworkTierGCEValueToType converts the value of the NetworkTier field of a
|
||||
// GCE object to the NetworkTier type.
|
||||
func NetworkTierGCEValueToType(s string) NetworkTier {
|
||||
switch s {
|
||||
case "STANDARD":
|
||||
return NetworkTierStandard
|
||||
case "PREMIUM":
|
||||
return NetworkTierPremium
|
||||
default:
|
||||
return NetworkTier(s)
|
||||
return cloud.NetworkTierDefault, fmt.Errorf("unsupported network tier: %q", v)
|
||||
}
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations_test.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations_test.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -38,24 +39,24 @@ func TestServiceNetworkTierAnnotationKey(t *testing.T) {
|
||||
|
||||
for testName, testCase := range map[string]struct {
|
||||
annotations map[string]string
|
||||
expectedTier NetworkTier
|
||||
expectedTier cloud.NetworkTier
|
||||
expectErr bool
|
||||
}{
|
||||
"Use the default when the annotation does not exist": {
|
||||
annotations: nil,
|
||||
expectedTier: NetworkTierDefault,
|
||||
expectedTier: cloud.NetworkTierDefault,
|
||||
},
|
||||
"Standard tier": {
|
||||
annotations: map[string]string{NetworkTierAnnotationKey: "Standard"},
|
||||
expectedTier: NetworkTierStandard,
|
||||
expectedTier: cloud.NetworkTierStandard,
|
||||
},
|
||||
"Premium tier": {
|
||||
annotations: map[string]string{NetworkTierAnnotationKey: "Premium"},
|
||||
expectedTier: NetworkTierPremium,
|
||||
expectedTier: cloud.NetworkTierPremium,
|
||||
},
|
||||
"Report an error on invalid network tier value": {
|
||||
annotations: map[string]string{NetworkTierAnnotationKey: "Unknown-tier"},
|
||||
expectedTier: NetworkTierPremium,
|
||||
expectedTier: cloud.NetworkTierPremium,
|
||||
expectErr: true,
|
||||
},
|
||||
} {
|
||||
|
114
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
114
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
@ -17,10 +17,13 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"context"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
func newBackendServiceMetricContext(request, region string) *metricContext {
|
||||
@ -34,150 +37,105 @@ func newBackendServiceMetricContextWithVersion(request, region, version string)
|
||||
// GetGlobalBackendService retrieves a backend by name.
|
||||
func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendService, error) {
|
||||
mc := newBackendServiceMetricContext("get", "")
|
||||
v, err := gce.service.BackendServices.Get(gce.projectID, name).Do()
|
||||
v, err := gce.c.BackendServices().Get(context.Background(), meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaGlobalBackendService retrieves alpha backend by name.
|
||||
func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) {
|
||||
mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion)
|
||||
v, err := gce.serviceAlpha.BackendServices.Get(gce.projectID, name).Do()
|
||||
v, err := gce.c.AlphaBackendServices().Get(context.Background(), meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateGlobalBackendService applies the given BackendService as an update to an existing service.
|
||||
// UpdateGlobalBackendService applies the given BackendService as an update to
|
||||
// an existing service.
|
||||
func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) error {
|
||||
mc := newBackendServiceMetricContext("update", "")
|
||||
op, err := gce.service.BackendServices.Update(gce.projectID, bg.Name, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
return mc.Observe(gce.c.BackendServices().Update(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// UpdateAlphaGlobalBackendService applies the given alpha BackendService as an update to an existing service.
|
||||
// UpdateAlphaGlobalBackendService applies the given alpha BackendService as an
|
||||
// update to an existing service.
|
||||
func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
mc := newBackendServiceMetricContextWithVersion("update", "", computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.BackendServices.Update(gce.projectID, bg.Name, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
mc := newBackendServiceMetricContext("update", "")
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Update(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// DeleteGlobalBackendService deletes the given BackendService by name.
|
||||
func (gce *GCECloud) DeleteGlobalBackendService(name string) error {
|
||||
mc := newBackendServiceMetricContext("delete", "")
|
||||
op, err := gce.service.BackendServices.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil
|
||||
}
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
return mc.Observe(gce.c.BackendServices().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// CreateGlobalBackendService creates the given BackendService.
|
||||
func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) error {
|
||||
mc := newBackendServiceMetricContext("create", "")
|
||||
op, err := gce.service.BackendServices.Insert(gce.projectID, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
return mc.Observe(gce.c.BackendServices().Insert(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// CreateAlphaGlobalBackendService creates the given alpha BackendService.
|
||||
func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
mc := newBackendServiceMetricContextWithVersion("create", "", computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.BackendServices.Insert(gce.projectID, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
mc := newBackendServiceMetricContext("create", "")
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Insert(context.Background(), meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// ListGlobalBackendServices lists all backend services in the project.
|
||||
func (gce *GCECloud) ListGlobalBackendServices() (*compute.BackendServiceList, error) {
|
||||
func (gce *GCECloud) ListGlobalBackendServices() ([]*compute.BackendService, error) {
|
||||
mc := newBackendServiceMetricContext("list", "")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.BackendServices.List(gce.projectID).Do()
|
||||
v, err := gce.c.BackendServices().List(context.Background(), filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetGlobalBackendServiceHealth returns the health of the BackendService identified by the given
|
||||
// name, in the given instanceGroup. The instanceGroupLink is the fully
|
||||
// qualified self link of an instance group.
|
||||
// GetGlobalBackendServiceHealth returns the health of the BackendService
|
||||
// identified by the given name, in the given instanceGroup. The
|
||||
// instanceGroupLink is the fully qualified self link of an instance group.
|
||||
func (gce *GCECloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
mc := newBackendServiceMetricContext("get_health", "")
|
||||
groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink}
|
||||
v, err := gce.service.BackendServices.GetHealth(gce.projectID, name, groupRef).Do()
|
||||
v, err := gce.c.BackendServices().GetHealth(context.Background(), meta.GlobalKey(name), groupRef)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionBackendService retrieves a backend by name.
|
||||
func (gce *GCECloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) {
|
||||
mc := newBackendServiceMetricContext("get", region)
|
||||
v, err := gce.service.RegionBackendServices.Get(gce.projectID, region, name).Do()
|
||||
v, err := gce.c.RegionBackendServices().Get(context.Background(), meta.RegionalKey(name, region))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateRegionBackendService applies the given BackendService as an update to an existing service.
|
||||
// UpdateRegionBackendService applies the given BackendService as an update to
|
||||
// an existing service.
|
||||
func (gce *GCECloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error {
|
||||
mc := newBackendServiceMetricContext("update", region)
|
||||
op, err := gce.service.RegionBackendServices.Update(gce.projectID, region, bg.Name, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
return mc.Observe(gce.c.RegionBackendServices().Update(context.Background(), meta.RegionalKey(bg.Name, region), bg))
|
||||
}
|
||||
|
||||
// DeleteRegionBackendService deletes the given BackendService by name.
|
||||
func (gce *GCECloud) DeleteRegionBackendService(name, region string) error {
|
||||
mc := newBackendServiceMetricContext("delete", region)
|
||||
op, err := gce.service.RegionBackendServices.Delete(gce.projectID, region, name).Do()
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil
|
||||
}
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
return mc.Observe(gce.c.RegionBackendServices().Delete(context.Background(), meta.RegionalKey(name, region)))
|
||||
}
|
||||
|
||||
// CreateRegionBackendService creates the given BackendService.
|
||||
func (gce *GCECloud) CreateRegionBackendService(bg *compute.BackendService, region string) error {
|
||||
mc := newBackendServiceMetricContext("create", region)
|
||||
op, err := gce.service.RegionBackendServices.Insert(gce.projectID, region, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
return mc.Observe(gce.c.RegionBackendServices().Insert(context.Background(), meta.RegionalKey(bg.Name, region), bg))
|
||||
}
|
||||
|
||||
// ListRegionBackendServices lists all backend services in the project.
|
||||
func (gce *GCECloud) ListRegionBackendServices(region string) (*compute.BackendServiceList, error) {
|
||||
func (gce *GCECloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) {
|
||||
mc := newBackendServiceMetricContext("list", region)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.RegionBackendServices.List(gce.projectID, region).Do()
|
||||
v, err := gce.c.RegionBackendServices().List(context.Background(), region, filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionalBackendServiceHealth returns the health of the BackendService identified by the given
|
||||
// name, in the given instanceGroup. The instanceGroupLink is the fully
|
||||
// qualified self link of an instance group.
|
||||
// GetRegionalBackendServiceHealth returns the health of the BackendService
|
||||
// identified by the given name, in the given instanceGroup. The
|
||||
// instanceGroupLink is the fully qualified self link of an instance group.
|
||||
func (gce *GCECloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
mc := newBackendServiceMetricContext("get_health", region)
|
||||
groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink}
|
||||
v, err := gce.service.RegionBackendServices.GetHealth(gce.projectID, region, name, groupRef).Do()
|
||||
ref := &compute.ResourceGroupReference{Group: instanceGroupLink}
|
||||
v, err := gce.c.RegionBackendServices().GetHealth(context.Background(), meta.RegionalKey(name, region), ref)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
32
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go
generated
vendored
@ -17,9 +17,12 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"context"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
|
||||
func newCertMetricContext(request string) *metricContext {
|
||||
@ -29,46 +32,29 @@ func newCertMetricContext(request string) *metricContext {
|
||||
// GetSslCertificate returns the SslCertificate by name.
|
||||
func (gce *GCECloud) GetSslCertificate(name string) (*compute.SslCertificate, error) {
|
||||
mc := newCertMetricContext("get")
|
||||
v, err := gce.service.SslCertificates.Get(gce.projectID, name).Do()
|
||||
v, err := gce.c.SslCertificates().Get(context.Background(), meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateSslCertificate creates and returns a SslCertificate.
|
||||
func (gce *GCECloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) {
|
||||
mc := newCertMetricContext("create")
|
||||
op, err := gce.service.SslCertificates.Insert(gce.projectID, sslCerts).Do()
|
||||
|
||||
err := gce.c.SslCertificates().Insert(context.Background(), meta.GlobalKey(sslCerts.Name), sslCerts)
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
|
||||
if err = gce.waitForGlobalOp(op, mc); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.GetSslCertificate(sslCerts.Name)
|
||||
}
|
||||
|
||||
// DeleteSslCertificate deletes the SslCertificate by name.
|
||||
func (gce *GCECloud) DeleteSslCertificate(name string) error {
|
||||
mc := newCertMetricContext("delete")
|
||||
op, err := gce.service.SslCertificates.Delete(gce.projectID, name).Do()
|
||||
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
return mc.Observe(gce.c.SslCertificates().Delete(context.Background(), meta.GlobalKey(name)))
|
||||
}
|
||||
|
||||
// ListSslCertificates lists all SslCertificates in the project.
|
||||
func (gce *GCECloud) ListSslCertificates() (*compute.SslCertificateList, error) {
|
||||
func (gce *GCECloud) ListSslCertificates() ([]*compute.SslCertificate, error) {
|
||||
mc := newCertMetricContext("list")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.SslCertificates.List(gce.projectID).Do()
|
||||
v, err := gce.c.SslCertificates().List(context.Background(), filter.None)
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go
generated
vendored
@ -16,11 +16,13 @@ limitations under the License.
|
||||
|
||||
package gce
|
||||
|
||||
import "context"
|
||||
|
||||
func newClustersMetricContext(request, zone string) *metricContext {
|
||||
return newGenericMetricContext("clusters", request, unusedMetricLabel, zone, computeV1Version)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListClusters() ([]string, error) {
|
||||
func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) {
|
||||
allClusters := []string{}
|
||||
|
||||
for _, zone := range gce.managedZones {
|
||||
@ -35,7 +37,7 @@ func (gce *GCECloud) ListClusters() ([]string, error) {
|
||||
return allClusters, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) Master(clusterName string) (string, error) {
|
||||
func (gce *GCECloud) Master(ctx context.Context, clusterName string) (string, error) {
|
||||
return "k8s-" + clusterName + "-master.internal", nil
|
||||
}
|
||||
|
||||
|
223
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
223
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@ -36,6 +37,8 @@ import (
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
type DiskType string
|
||||
@ -111,6 +114,8 @@ type gceServiceManager struct {
|
||||
gce *GCECloud
|
||||
}
|
||||
|
||||
var _ diskServiceManager = &gceServiceManager{}
|
||||
|
||||
func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
||||
name string,
|
||||
sizeGb int64,
|
||||
@ -118,23 +123,11 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider(
|
||||
diskType string,
|
||||
zone string) (gceObject, error) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, singleZone{zone}, diskType)
|
||||
manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useAlphaAPI */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
diskToCreateAlpha := &computealpha.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
Description: tagsStr,
|
||||
Type: diskTypeURI,
|
||||
}
|
||||
|
||||
return manager.gce.serviceAlpha.Disks.Insert(
|
||||
manager.gce.projectID, zone, diskToCreateAlpha).Do()
|
||||
}
|
||||
|
||||
diskToCreateV1 := &compute.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
@ -151,17 +144,19 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
tagsStr string,
|
||||
diskType string,
|
||||
replicaZones sets.String) (gceObject, error) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullyQualifiedReplicaZones := []string{}
|
||||
for _, replicaZone := range replicaZones.UnsortedList() {
|
||||
fullyQualifiedReplicaZones = append(
|
||||
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone))
|
||||
}
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
diskTypeURI, err := manager.getDiskTypeURI(
|
||||
manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useAlphaAPI */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullyQualifiedReplicaZones := []string{}
|
||||
for _, replicaZone := range replicaZones.UnsortedList() {
|
||||
fullyQualifiedReplicaZones = append(
|
||||
fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true))
|
||||
}
|
||||
|
||||
diskToCreateAlpha := &computealpha.Disk{
|
||||
Name: name,
|
||||
SizeGb: sizeGb,
|
||||
@ -173,7 +168,7 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider(
|
||||
manager.gce.projectID, manager.gce.region, diskToCreateAlpha).Do()
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.")
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) AttachDiskOnCloudProvider(
|
||||
@ -186,24 +181,12 @@ func (manager *gceServiceManager) AttachDiskOnCloudProvider(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
attachedDiskAlpha := &computealpha.AttachedDisk{
|
||||
DeviceName: disk.Name,
|
||||
Kind: disk.Kind,
|
||||
Mode: readWrite,
|
||||
Source: source,
|
||||
Type: diskTypePersistent,
|
||||
}
|
||||
return manager.gce.serviceAlpha.Instances.AttachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, attachedDiskAlpha).Do()
|
||||
}
|
||||
|
||||
attachedDiskV1 := &compute.AttachedDisk{
|
||||
DeviceName: disk.Name,
|
||||
Kind: disk.Kind,
|
||||
Mode: readWrite,
|
||||
Source: source,
|
||||
Type: disk.Type,
|
||||
Type: diskTypePersistent,
|
||||
}
|
||||
return manager.gce.service.Instances.AttachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, attachedDiskV1).Do()
|
||||
@ -213,11 +196,6 @@ func (manager *gceServiceManager) DetachDiskOnCloudProvider(
|
||||
instanceZone string,
|
||||
instanceName string,
|
||||
devicePath string) (gceObject, error) {
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
manager.gce.serviceAlpha.Instances.DetachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, devicePath).Do()
|
||||
}
|
||||
|
||||
return manager.gce.service.Instances.DetachDisk(
|
||||
manager.gce.projectID, instanceZone, instanceName, devicePath).Do()
|
||||
}
|
||||
@ -233,45 +211,6 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
|
||||
return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone)
|
||||
}
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
diskAlpha, err := manager.gce.serviceAlpha.Disks.Get(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zoneInfo zoneType
|
||||
if len(diskAlpha.ReplicaZones) > 1 {
|
||||
zones := sets.NewString()
|
||||
for _, zoneURI := range diskAlpha.ReplicaZones {
|
||||
zones.Insert(lastComponent(zoneURI))
|
||||
}
|
||||
zoneInfo = multiZone{zones}
|
||||
} else {
|
||||
zoneInfo = singleZone{lastComponent(diskAlpha.Zone)}
|
||||
if diskAlpha.Zone == "" {
|
||||
zoneInfo = singleZone{lastComponent(zone)}
|
||||
}
|
||||
}
|
||||
|
||||
region := strings.TrimSpace(lastComponent(diskAlpha.Region))
|
||||
if region == "" {
|
||||
region, err = manager.getRegionFromZone(zoneInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &GCEDisk{
|
||||
ZoneInfo: zoneInfo,
|
||||
Region: region,
|
||||
Name: diskAlpha.Name,
|
||||
Kind: diskAlpha.Kind,
|
||||
Type: diskAlpha.Type,
|
||||
SizeGb: diskAlpha.SizeGb,
|
||||
}, nil
|
||||
}
|
||||
|
||||
diskStable, err := manager.gce.service.Disks.Get(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
if err != nil {
|
||||
@ -301,7 +240,7 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider(
|
||||
func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
|
||||
diskName string) (*GCEDisk, error) {
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
diskAlpha, err := manager.gce.serviceAlpha.RegionDisks.Get(
|
||||
manager.gce.projectID, manager.gce.region, diskName).Do()
|
||||
if err != nil {
|
||||
@ -323,30 +262,24 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider(
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.")
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) DeleteDiskOnCloudProvider(
|
||||
zone string,
|
||||
diskName string) (gceObject, error) {
|
||||
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
return manager.gce.serviceAlpha.Disks.Delete(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
}
|
||||
|
||||
return manager.gce.service.Disks.Delete(
|
||||
manager.gce.projectID, zone, diskName).Do()
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider(
|
||||
diskName string) (gceObject, error) {
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
return manager.gce.serviceAlpha.RegionDisks.Delete(
|
||||
manager.gce.projectID, manager.gce.region, diskName).Do()
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("DeleteRegionalDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.")
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) WaitForZoneOp(
|
||||
@ -361,9 +294,6 @@ func (manager *gceServiceManager) WaitForRegionalOp(
|
||||
|
||||
func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) {
|
||||
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
}
|
||||
|
||||
switch zoneInfo := disk.ZoneInfo.(type) {
|
||||
case singleZone:
|
||||
@ -397,10 +327,13 @@ func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getDiskTypeURI(
|
||||
diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) {
|
||||
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
diskRegion string, diskZoneInfo zoneType, diskType string, useAlphaAPI bool) (string, error) {
|
||||
|
||||
var getProjectsAPIEndpoint string
|
||||
if useAlphaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
} else {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
|
||||
}
|
||||
|
||||
switch zoneInfo := diskZoneInfo.(type) {
|
||||
@ -430,10 +363,12 @@ func (manager *gceServiceManager) getDiskTypeURI(
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getReplicaZoneURI(zone string) string {
|
||||
getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint()
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
func (manager *gceServiceManager) getReplicaZoneURI(zone string, useAlphaAPI bool) string {
|
||||
var getProjectsAPIEndpoint string
|
||||
if useAlphaAPI {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha()
|
||||
} else {
|
||||
getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint()
|
||||
}
|
||||
|
||||
return getProjectsAPIEndpoint + fmt.Sprintf(
|
||||
@ -477,13 +412,6 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string,
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error) {
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
resizeServiceRequest := &computealpha.DisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
return manager.gce.serviceAlpha.Disks.Resize(manager.gce.projectID, zone, disk.Name, resizeServiceRequest).Do()
|
||||
|
||||
}
|
||||
resizeServiceRequest := &compute.DisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
@ -491,20 +419,20 @@ func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeG
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) (gceObject, error) {
|
||||
if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
resizeServiceRequest := &computealpha.RegionDisksResizeRequest{
|
||||
SizeGb: sizeGb,
|
||||
}
|
||||
return manager.gce.serviceAlpha.RegionDisks.Resize(manager.gce.projectID, disk.Region, disk.Name, resizeServiceRequest).Do()
|
||||
}
|
||||
return nil, fmt.Errorf("RegionalResizeDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.")
|
||||
return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk)
|
||||
}
|
||||
|
||||
// Disks is interface for manipulation with GCE PDs.
|
||||
type Disks interface {
|
||||
// AttachDisk attaches given disk to the node with the specified NodeName.
|
||||
// Current instance is used when instanceID is empty string.
|
||||
AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error
|
||||
AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error
|
||||
|
||||
// DetachDisk detaches given disk to the node with the specified NodeName.
|
||||
// Current instance is used when nodeName is empty string.
|
||||
@ -577,7 +505,7 @@ func newDiskMetricContextRegional(request, region string) *metricContext {
|
||||
return newGenericMetricContext("disk", request, region, unusedMetricLabel, computeV1Version)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
// Ignore any volumes that are being provisioned
|
||||
if pv.Spec.GCEPersistentDisk.PDName == volume.ProvisionedVolumeName {
|
||||
return nil, nil
|
||||
@ -594,7 +522,7 @@ func (gce *GCECloud) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]str
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
|
||||
func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
@ -604,7 +532,7 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn
|
||||
// Try fetching as regional PD
|
||||
var disk *GCEDisk
|
||||
var mc *metricContext
|
||||
if gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
if regional && utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
disk, err = gce.getRegionalDiskByName(diskName)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("Could not find regional PD named %q to Attach. Will look for a zonal PD", diskName)
|
||||
@ -845,7 +773,7 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
|
||||
|
||||
requestBytes := newSize.Value()
|
||||
// GCE resizes in chunks of GBs (not GiB)
|
||||
requestGB := volume.RoundUpSize(requestBytes, 1000*1000*1000)
|
||||
requestGB := volumeutil.RoundUpSize(requestBytes, 1000*1000*1000)
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB))
|
||||
|
||||
// If disk is already of size equal or greater than requested size, we simply return
|
||||
@ -869,17 +797,20 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
|
||||
}
|
||||
return newSizeQuant, nil
|
||||
case multiZone:
|
||||
mc = newDiskMetricContextRegional("resize", disk.Region)
|
||||
resizeOp, err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
mc = newDiskMetricContextRegional("resize", disk.Region)
|
||||
resizeOp, err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
|
||||
|
||||
if err != nil {
|
||||
return oldSize, mc.Observe(err)
|
||||
if err != nil {
|
||||
return oldSize, mc.Observe(err)
|
||||
}
|
||||
waitErr := gce.manager.WaitForRegionalOp(resizeOp, mc)
|
||||
if waitErr != nil {
|
||||
return oldSize, waitErr
|
||||
}
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
waitErr := gce.manager.WaitForRegionalOp(resizeOp, mc)
|
||||
if waitErr != nil {
|
||||
return oldSize, waitErr
|
||||
}
|
||||
return newSizeQuant, nil
|
||||
return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
|
||||
case nil:
|
||||
return oldSize, fmt.Errorf("PD has nil ZoneInfo: %v", disk)
|
||||
default:
|
||||
@ -912,19 +843,26 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st
|
||||
// We could assume the disks exists; we have all the information we need
|
||||
// However it is more consistent to ensure the disk exists,
|
||||
// and in future we may gather addition information (e.g. disk type, IOPS etc)
|
||||
zoneSet, err := volumeutil.LabelZonesToSet(zone)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone)
|
||||
}
|
||||
|
||||
if len(zoneSet) > 1 {
|
||||
// Regional PD
|
||||
disk, err = gce.getRegionalDiskByName(name)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
zoneSet, err := volumeutil.LabelZonesToSet(zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
glog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone)
|
||||
}
|
||||
|
||||
if len(zoneSet) > 1 {
|
||||
// Regional PD
|
||||
disk, err = gce.getRegionalDiskByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Zonal PD
|
||||
disk, err = gce.getDiskByName(name, zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Zonal PD
|
||||
disk, err = gce.getDiskByName(name, zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1010,7 +948,7 @@ func (gce *GCECloud) getRegionalDiskByName(diskName string) (*GCEDisk, error) {
|
||||
// Prefer getDiskByName, if the zone can be established
|
||||
// Return cloudprovider.DiskNotFound if the given disk cannot be found in any zone
|
||||
func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) {
|
||||
if gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
regionalDisk, err := gce.getRegionalDiskByName(diskName)
|
||||
if err == nil {
|
||||
return regionalDisk, err
|
||||
@ -1020,7 +958,7 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error)
|
||||
// Note: this is the gotcha right now with GCE PD support:
|
||||
// disk names are not unique per-region.
|
||||
// (I can create two volumes with name "myvol" in e.g. us-central1-b & us-central1-f)
|
||||
// For now, this is simply undefined behvaiour.
|
||||
// For now, this is simply undefined behaviour.
|
||||
//
|
||||
// In future, we will have to require users to qualify their disk
|
||||
// "us-central1-a/mydisk". We could do this for them as part of
|
||||
@ -1094,12 +1032,15 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error {
|
||||
}
|
||||
return gce.manager.WaitForZoneOp(deleteOp, zoneInfo.zone, mc)
|
||||
case multiZone:
|
||||
mc = newDiskMetricContextRegional("delete", disk.Region)
|
||||
deleteOp, err := gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
|
||||
mc = newDiskMetricContextRegional("delete", disk.Region)
|
||||
deleteOp, err := gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.manager.WaitForRegionalOp(deleteOp, mc)
|
||||
}
|
||||
return gce.manager.WaitForRegionalOp(deleteOp, mc)
|
||||
return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo)
|
||||
case nil:
|
||||
return fmt.Errorf("PD has nil ZoneInfo: %v", disk)
|
||||
default:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user