mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cloud.go",
|
||||
"doc.go",
|
||||
"plugins.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider",
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
approvers:
|
||||
- mikedanese
|
||||
- dims
|
||||
- wlan0
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- vishh
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
- sttts
|
||||
- quinton-hoole
|
||||
- dchen1107
|
||||
- saad-ali
|
||||
- zmerlynn
|
||||
- luxas
|
||||
- justinsb
|
||||
- roberthbailey
|
||||
- eparis
|
||||
- jlowdermilk
|
||||
- piosz
|
||||
- jsafrane
|
||||
- dims
|
||||
- krousey
|
||||
- rootfs
|
||||
- jszczepkowski
|
||||
- markturansky
|
||||
- girishkalele
|
||||
- jdef
|
||||
- freehan
|
||||
- jingxu97
|
||||
- wlan0
|
||||
- cheftako
|
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Cloud Providers in this directory will continue to be actively developed or maintained and supported at their current level of support as a longer-term solution evolves.
|
||||
|
||||
## Overview:
|
||||
The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development.
|
||||
|
||||
#### Guidance for potential cloud providers:
|
||||
* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md).
|
||||
* In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information).
|
||||
* Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers.
|
||||
* It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved.
|
||||
* Though the fully productized support envisioned in the proposal is still 2 - 3 releases out, the foundational work is underway, and a motivated cloud provider could accomplish the work in a forward-looking way. Contributors are encouraged to assist with the implementation of the design outlined in the proposal.
|
||||
|
||||
#### Some additional context on status / direction:
|
||||
* 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow.
|
||||
* Setting cloud-provider=external allows for creation of a separate controller-manager binary
|
||||
* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization.
|
216
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
Normal file
216
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
Normal file
@ -0,0 +1,216 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
// Interface is an abstract, pluggable interface for cloud providers.
|
||||
type Interface interface {
|
||||
// Initialize provides the cloud with a kubernetes client builder and may spawn goroutines
|
||||
// to perform housekeeping activities within the cloud provider.
|
||||
Initialize(clientBuilder controller.ControllerClientBuilder)
|
||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
|
||||
LoadBalancer() (LoadBalancer, bool)
|
||||
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
|
||||
Instances() (Instances, bool)
|
||||
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
|
||||
Zones() (Zones, bool)
|
||||
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
|
||||
Clusters() (Clusters, bool)
|
||||
// Routes returns a routes interface along with whether the interface is supported.
|
||||
Routes() (Routes, bool)
|
||||
// ProviderName returns the cloud provider ID.
|
||||
ProviderName() string
|
||||
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
|
||||
ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
|
||||
// HasClusterID returns true if a ClusterID is required and set
|
||||
HasClusterID() bool
|
||||
}
|
||||
|
||||
type InformerUser interface {
|
||||
// SetInformers sets the informer on the cloud object.
|
||||
SetInformers(informerFactory informers.SharedInformerFactory)
|
||||
}
|
||||
|
||||
// Clusters is an abstract, pluggable interface for clusters of containers.
|
||||
type Clusters interface {
|
||||
// ListClusters lists the names of the available clusters.
|
||||
ListClusters() ([]string, error)
|
||||
// Master gets back the address (either DNS name or IP address) of the master node for the cluster.
|
||||
Master(clusterName string) (string, error)
|
||||
}
|
||||
|
||||
// TODO(#6812): Use a shorter name that's less likely to be longer than cloud
|
||||
// providers' name length limits.
|
||||
func GetLoadBalancerName(service *v1.Service) string {
|
||||
//GCE requires that the name of a load balancer starts with a lower case letter.
|
||||
ret := "a" + string(service.UID)
|
||||
ret = strings.Replace(ret, "-", "", -1)
|
||||
//AWS requires that the name of a load balancer is shorter than 32 bytes.
|
||||
if len(ret) > 32 {
|
||||
ret = ret[:32]
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// GetInstanceProviderID builds a ProviderID for a node in a cloud.
|
||||
func GetInstanceProviderID(cloud Interface, nodeName types.NodeName) (string, error) {
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to get instances from cloud provider")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
|
||||
}
|
||||
return cloud.ProviderName() + "://" + instanceID, nil
|
||||
}
|
||||
|
||||
// LoadBalancer is an abstract, pluggable interface for load balancers.
|
||||
type LoadBalancer interface {
|
||||
// TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// if so, what its status is.
|
||||
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
// Implementations must treat the *v1.Service and *v1.Node
|
||||
// parameters as read-only and not modify them.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
// Implementations must treat the *v1.Service and *v1.Node
|
||||
// parameters as read-only and not modify them.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it
|
||||
// exists, returning nil if the load balancer specified either didn't exist or
|
||||
// was successfully deleted.
|
||||
// This construction is useful because many cloud providers' load balancers
|
||||
// have multiple underlying components, meaning a Get could say that the LB
|
||||
// doesn't exist even if some part of it is still laying around.
|
||||
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
|
||||
}
|
||||
|
||||
// Instances is an abstract, pluggable interface for sets of instances.
|
||||
type Instances interface {
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
// TODO(roberthbailey): This currently is only used in such a way that it
|
||||
// returns the address of the calling instance. We should do a rename to
|
||||
// make this clearer.
|
||||
NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
|
||||
// NodeAddressesByProviderID returns the addresses of the specified instance.
|
||||
// The instance is specified using the providerID of the node. The
|
||||
// ProviderID is a unique identifier of the node. This will not be called
|
||||
// from the node whose nodeaddresses are being queried. i.e. local metadata
|
||||
// services cannot be used in this method to obtain nodeaddresses
|
||||
NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error)
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
ExternalID(nodeName types.NodeName) (string, error)
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
// InstanceType returns the type of the specified instance.
|
||||
InstanceType(name types.NodeName) (string, error)
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
InstanceTypeByProviderID(providerID string) (string, error)
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
AddSSHKeyToAllInstances(user string, keyData []byte) error
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
CurrentNodeName(hostname string) (types.NodeName, error)
|
||||
// InstanceExistsByProviderID returns true if the instance for the given provider id still is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
InstanceExistsByProviderID(providerID string) (bool, error)
|
||||
}
|
||||
|
||||
// Route is a representation of an advanced routing rule.
|
||||
type Route struct {
|
||||
// Name is the name of the routing rule in the cloud-provider.
|
||||
// It will be ignored in a Create (although nameHint may influence it)
|
||||
Name string
|
||||
// TargetNode is the NodeName of the target instance.
|
||||
TargetNode types.NodeName
|
||||
// DestinationCIDR is the CIDR format IP range that this routing rule
|
||||
// applies to.
|
||||
DestinationCIDR string
|
||||
// Blackhole is set to true if this is a blackhole route
|
||||
// The node controller will delete the route if it is in the managed range.
|
||||
Blackhole bool
|
||||
}
|
||||
|
||||
// Routes is an abstract, pluggable interface for advanced routing rules.
|
||||
type Routes interface {
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
ListRoutes(clusterName string) ([]*Route, error)
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
CreateRoute(clusterName string, nameHint string, route *Route) error
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
DeleteRoute(clusterName string, route *Route) error
|
||||
}
|
||||
|
||||
var (
|
||||
InstanceNotFound = errors.New("instance not found")
|
||||
DiskNotFound = errors.New("disk is not found")
|
||||
NotImplemented = errors.New("unimplemented")
|
||||
)
|
||||
|
||||
// Zone represents the location of a particular machine.
|
||||
type Zone struct {
|
||||
FailureDomain string
|
||||
Region string
|
||||
}
|
||||
|
||||
// Zones is an abstract, pluggable interface for zone enumeration.
|
||||
type Zones interface {
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
// In most cases, this method is called from the kubelet querying a local metadata service to aquire its zone.
|
||||
// For the case of external cloud providers, use GetZoneByProviderID or GetZoneByNodeName since GetZone
|
||||
// can no longer be called from the kubelets.
|
||||
GetZone() (Zone, error)
|
||||
|
||||
// GetZoneByProviderID returns the Zone containing the current zone and locality region of the node specified by providerId
|
||||
// This method is particularly used in the context of external cloud providers where node initialization must be down
|
||||
// outside the kubelets.
|
||||
GetZoneByProviderID(providerID string) (Zone, error)
|
||||
|
||||
// GetZoneByNodeName returns the Zone containing the current zone and locality region of the node specified by node name
|
||||
// This method is particularly used in the context of external cloud providers where node initialization must be down
|
||||
// outside the kubelets.
|
||||
GetZoneByNodeName(nodeName types.NodeName) (Zone, error)
|
||||
}
|
||||
|
||||
// PVLabeler is an abstract, pluggable interface for fetching labels for volumes
|
||||
type PVLabeler interface {
|
||||
GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error)
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cloudprovider supplies interfaces and implementations for cloud service providers.
|
||||
package cloudprovider // import "k8s.io/kubernetes/pkg/cloudprovider"
|
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Factory is a function that returns a cloudprovider.Interface.
|
||||
// The config parameter provides an io.Reader handler to the factory in
|
||||
// order to load specific configurations. If no configuration is provided
|
||||
// the parameter is nil.
|
||||
type Factory func(config io.Reader) (Interface, error)
|
||||
|
||||
// All registered cloud providers.
|
||||
var (
|
||||
providersMutex sync.Mutex
|
||||
providers = make(map[string]Factory)
|
||||
)
|
||||
|
||||
const externalCloudProvider = "external"
|
||||
|
||||
// RegisterCloudProvider registers a cloudprovider.Factory by name. This
|
||||
// is expected to happen during app startup.
|
||||
func RegisterCloudProvider(name string, cloud Factory) {
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
if _, found := providers[name]; found {
|
||||
glog.Fatalf("Cloud provider %q was registered twice", name)
|
||||
}
|
||||
glog.V(1).Infof("Registered cloud provider %q", name)
|
||||
providers[name] = cloud
|
||||
}
|
||||
|
||||
// IsCloudProvider returns true if name corresponds to an already registered
|
||||
// cloud provider.
|
||||
func IsCloudProvider(name string) bool {
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
_, found := providers[name]
|
||||
return found
|
||||
}
|
||||
|
||||
// CloudProviders returns the name of all registered cloud providers in a
|
||||
// string slice
|
||||
func CloudProviders() []string {
|
||||
names := []string{}
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
for name := range providers {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// GetCloudProvider creates an instance of the named cloud provider, or nil if
|
||||
// the name is unknown. The error return is only used if the named provider
|
||||
// was known but failed to initialize. The config parameter specifies the
|
||||
// io.Reader handler of the configuration file for the cloud provider, or nil
|
||||
// for no configuation.
|
||||
func GetCloudProvider(name string, config io.Reader) (Interface, error) {
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
f, found := providers[name]
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
return f(config)
|
||||
}
|
||||
|
||||
// Detects if the string is an external cloud provider
|
||||
func IsExternal(name string) bool {
|
||||
return name == externalCloudProvider
|
||||
}
|
||||
|
||||
// InitCloudProvider creates an instance of the named cloud provider.
|
||||
func InitCloudProvider(name string, configFilePath string) (Interface, error) {
|
||||
var cloud Interface
|
||||
var err error
|
||||
|
||||
if name == "" {
|
||||
glog.Info("No cloud provider specified.")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if IsExternal(name) {
|
||||
glog.Info("External cloud provider specified")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if configFilePath != "" {
|
||||
var config *os.File
|
||||
config, err = os.Open(configFilePath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Couldn't open cloud provider configuration %s: %#v",
|
||||
configFilePath, err)
|
||||
}
|
||||
|
||||
defer config.Close()
|
||||
cloud, err = GetCloudProvider(name, config)
|
||||
} else {
|
||||
// Pass explicit nil so plugins can actually check for nil. See
|
||||
// "Why is my nil error value not equal to nil?" in golang.org/doc/faq.
|
||||
cloud, err = GetCloudProvider(name, nil)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not init cloud provider %q: %v", name, err)
|
||||
}
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("unknown cloud provider %q", name)
|
||||
}
|
||||
|
||||
return cloud, nil
|
||||
}
|
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["providers.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers",
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/cloudstack:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/cloudprovider/providers/openstack:go_default_library",
|
||||
"//pkg/cloudprovider/providers/ovirt:go_default_library",
|
||||
"//pkg/cloudprovider/providers/photon:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/aws:all-srcs",
|
||||
"//pkg/cloudprovider/providers/azure:all-srcs",
|
||||
"//pkg/cloudprovider/providers/cloudstack:all-srcs",
|
||||
"//pkg/cloudprovider/providers/fake:all-srcs",
|
||||
"//pkg/cloudprovider/providers/gce:all-srcs",
|
||||
"//pkg/cloudprovider/providers/openstack:all-srcs",
|
||||
"//pkg/cloudprovider/providers/ovirt:all-srcs",
|
||||
"//pkg/cloudprovider/providers/photon:all-srcs",
|
||||
"//pkg/cloudprovider/providers/vsphere:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
104
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD
generated
vendored
Normal file
104
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"aws.go",
|
||||
"aws_fakes.go",
|
||||
"aws_instancegroups.go",
|
||||
"aws_loadbalancer.go",
|
||||
"aws_metrics.go",
|
||||
"aws_routes.go",
|
||||
"aws_utils.go",
|
||||
"device_allocator.go",
|
||||
"instances.go",
|
||||
"log_handler.go",
|
||||
"regions.go",
|
||||
"retry_handler.go",
|
||||
"sets_ippermissions.go",
|
||||
"tags.go",
|
||||
"volumes.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/credentialprovider/aws:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/ec2metadata:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"aws_loadbalancer_test.go",
|
||||
"aws_test.go",
|
||||
"device_allocator_test.go",
|
||||
"instances_test.go",
|
||||
"regions_test.go",
|
||||
"retry_handler_test.go",
|
||||
"tags_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/mock:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/OWNERS
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/OWNERS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
approvers:
|
||||
- justinsb
|
||||
- zmerlynn
|
||||
reviewers:
|
||||
- gnufied
|
||||
- jsafrane
|
||||
- justinsb
|
||||
- zmerlynn
|
4273
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
Normal file
4273
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
534
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go
generated
vendored
Normal file
534
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go
generated
vendored
Normal file
@ -0,0 +1,534 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/elb"
|
||||
"github.com/aws/aws-sdk-go/service/elbv2"
|
||||
"github.com/aws/aws-sdk-go/service/kms"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type FakeAWSServices struct {
|
||||
region string
|
||||
instances []*ec2.Instance
|
||||
selfInstance *ec2.Instance
|
||||
networkInterfacesMacs []string
|
||||
networkInterfacesPrivateIPs [][]string
|
||||
networkInterfacesVpcIDs []string
|
||||
|
||||
ec2 FakeEC2
|
||||
elb ELB
|
||||
elbv2 ELBV2
|
||||
asg *FakeASG
|
||||
metadata *FakeMetadata
|
||||
kms *FakeKMS
|
||||
}
|
||||
|
||||
func NewFakeAWSServices(clusterId string) *FakeAWSServices {
|
||||
s := &FakeAWSServices{}
|
||||
s.region = "us-east-1"
|
||||
s.ec2 = &FakeEC2Impl{aws: s}
|
||||
s.elb = &FakeELB{aws: s}
|
||||
s.elbv2 = &FakeELBV2{aws: s}
|
||||
s.asg = &FakeASG{aws: s}
|
||||
s.metadata = &FakeMetadata{aws: s}
|
||||
s.kms = &FakeKMS{aws: s}
|
||||
|
||||
s.networkInterfacesMacs = []string{"aa:bb:cc:dd:ee:00", "aa:bb:cc:dd:ee:01"}
|
||||
s.networkInterfacesVpcIDs = []string{"vpc-mac0", "vpc-mac1"}
|
||||
|
||||
selfInstance := &ec2.Instance{}
|
||||
selfInstance.InstanceId = aws.String("i-self")
|
||||
selfInstance.Placement = &ec2.Placement{
|
||||
AvailabilityZone: aws.String("us-east-1a"),
|
||||
}
|
||||
selfInstance.PrivateDnsName = aws.String("ip-172-20-0-100.ec2.internal")
|
||||
selfInstance.PrivateIpAddress = aws.String("192.168.0.1")
|
||||
selfInstance.PublicIpAddress = aws.String("1.2.3.4")
|
||||
s.selfInstance = selfInstance
|
||||
s.instances = []*ec2.Instance{selfInstance}
|
||||
|
||||
var tag ec2.Tag
|
||||
tag.Key = aws.String(TagNameKubernetesClusterLegacy)
|
||||
tag.Value = aws.String(clusterId)
|
||||
selfInstance.Tags = []*ec2.Tag{&tag}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices {
|
||||
if s.selfInstance.Placement == nil {
|
||||
s.selfInstance.Placement = &ec2.Placement{}
|
||||
}
|
||||
s.selfInstance.Placement.AvailabilityZone = aws.String(az)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *FakeAWSServices) Compute(region string) (EC2, error) {
|
||||
return s.ec2, nil
|
||||
}
|
||||
|
||||
func (s *FakeAWSServices) LoadBalancing(region string) (ELB, error) {
|
||||
return s.elb, nil
|
||||
}
|
||||
|
||||
func (s *FakeAWSServices) LoadBalancingV2(region string) (ELBV2, error) {
|
||||
return s.elbv2, nil
|
||||
}
|
||||
|
||||
func (s *FakeAWSServices) Autoscaling(region string) (ASG, error) {
|
||||
return s.asg, nil
|
||||
}
|
||||
|
||||
func (s *FakeAWSServices) Metadata() (EC2Metadata, error) {
|
||||
return s.metadata, nil
|
||||
}
|
||||
|
||||
func (s *FakeAWSServices) KeyManagement(region string) (KMS, error) {
|
||||
return s.kms, nil
|
||||
}
|
||||
|
||||
type FakeEC2 interface {
|
||||
EC2
|
||||
CreateSubnet(*ec2.Subnet) (*ec2.CreateSubnetOutput, error)
|
||||
RemoveSubnets()
|
||||
CreateRouteTable(*ec2.RouteTable) (*ec2.CreateRouteTableOutput, error)
|
||||
RemoveRouteTables()
|
||||
}
|
||||
|
||||
type FakeEC2Impl struct {
|
||||
aws *FakeAWSServices
|
||||
Subnets []*ec2.Subnet
|
||||
DescribeSubnetsInput *ec2.DescribeSubnetsInput
|
||||
RouteTables []*ec2.RouteTable
|
||||
DescribeRouteTablesInput *ec2.DescribeRouteTablesInput
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {
|
||||
matches := []*ec2.Instance{}
|
||||
for _, instance := range ec2i.aws.instances {
|
||||
if request.InstanceIds != nil {
|
||||
if instance.InstanceId == nil {
|
||||
glog.Warning("Instance with no instance id: ", instance)
|
||||
continue
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, instanceID := range request.InstanceIds {
|
||||
if *instanceID == *instance.InstanceId {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if request.Filters != nil {
|
||||
allMatch := true
|
||||
for _, filter := range request.Filters {
|
||||
if !instanceMatchesFilter(instance, filter) {
|
||||
allMatch = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !allMatch {
|
||||
continue
|
||||
}
|
||||
}
|
||||
matches = append(matches, instance)
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) AttachVolume(request *ec2.AttachVolumeInput) (resp *ec2.VolumeAttachment, err error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DeleteVolume(request *ec2.DeleteVolumeInput) (resp *ec2.DeleteVolumeOutput, err error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOutput, error) {
|
||||
ec2i.Subnets = append(ec2i.Subnets, request)
|
||||
response := &ec2.CreateSubnetOutput{
|
||||
Subnet: request,
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
|
||||
ec2i.DescribeSubnetsInput = request
|
||||
return ec2i.Subnets, nil
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) RemoveSubnets() {
|
||||
ec2i.Subnets = ec2i.Subnets[:0]
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) {
|
||||
ec2i.DescribeRouteTablesInput = request
|
||||
return ec2i.RouteTables, nil
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateRouteTableOutput, error) {
|
||||
ec2i.RouteTables = append(ec2i.RouteTables, request)
|
||||
response := &ec2.CreateRouteTableOutput{
|
||||
RouteTable: request,
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) RemoveRouteTables() {
|
||||
ec2i.RouteTables = ec2i.RouteTables[:0]
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (ec2i *FakeEC2Impl) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) {
|
||||
return &ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{{CidrBlock: aws.String("172.20.0.0/16")}}}, nil
|
||||
}
|
||||
|
||||
type FakeMetadata struct {
|
||||
aws *FakeAWSServices
|
||||
}
|
||||
|
||||
func (m *FakeMetadata) GetMetadata(key string) (string, error) {
|
||||
networkInterfacesPrefix := "network/interfaces/macs/"
|
||||
i := m.aws.selfInstance
|
||||
if key == "placement/availability-zone" {
|
||||
az := ""
|
||||
if i.Placement != nil {
|
||||
az = aws.StringValue(i.Placement.AvailabilityZone)
|
||||
}
|
||||
return az, nil
|
||||
} else if key == "instance-id" {
|
||||
return aws.StringValue(i.InstanceId), nil
|
||||
} else if key == "local-hostname" {
|
||||
return aws.StringValue(i.PrivateDnsName), nil
|
||||
} else if key == "public-hostname" {
|
||||
return aws.StringValue(i.PublicDnsName), nil
|
||||
} else if key == "local-ipv4" {
|
||||
return aws.StringValue(i.PrivateIpAddress), nil
|
||||
} else if key == "public-ipv4" {
|
||||
return aws.StringValue(i.PublicIpAddress), nil
|
||||
} else if strings.HasPrefix(key, networkInterfacesPrefix) {
|
||||
if key == networkInterfacesPrefix {
|
||||
return strings.Join(m.aws.networkInterfacesMacs, "/\n") + "/\n", nil
|
||||
} else {
|
||||
keySplit := strings.Split(key, "/")
|
||||
macParam := keySplit[3]
|
||||
if len(keySplit) == 5 && keySplit[4] == "vpc-id" {
|
||||
for i, macElem := range m.aws.networkInterfacesMacs {
|
||||
if macParam == macElem {
|
||||
return m.aws.networkInterfacesVpcIDs[i], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" {
|
||||
for i, macElem := range m.aws.networkInterfacesMacs {
|
||||
if macParam == macElem {
|
||||
return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
} else {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
type FakeELB struct {
|
||||
aws *FakeAWSServices
|
||||
}
|
||||
|
||||
func (elb *FakeELB) CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) DeleteLoadBalancer(input *elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) DescribeLoadBalancers(input *elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) AddTags(input *elb.AddTagsInput) (*elb.AddTagsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (elb *FakeELB) ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
type FakeELBV2 struct {
|
||||
aws *FakeAWSServices
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
func (self *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (self *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
type FakeASG struct {
|
||||
aws *FakeAWSServices
|
||||
}
|
||||
|
||||
func (a *FakeASG) UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func (a *FakeASG) DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
type FakeKMS struct {
|
||||
aws *FakeAWSServices
|
||||
}
|
||||
|
||||
func (kms *FakeKMS) DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func instanceMatchesFilter(instance *ec2.Instance, filter *ec2.Filter) bool {
|
||||
name := *filter.Name
|
||||
if name == "private-dns-name" {
|
||||
if instance.PrivateDnsName == nil {
|
||||
return false
|
||||
}
|
||||
return contains(filter.Values, *instance.PrivateDnsName)
|
||||
}
|
||||
|
||||
if name == "instance-state-name" {
|
||||
return contains(filter.Values, *instance.State.Name)
|
||||
}
|
||||
|
||||
if name == "tag-key" {
|
||||
for _, instanceTag := range instance.Tags {
|
||||
if contains(filter.Values, aws.StringValue(instanceTag.Key)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "tag:") {
|
||||
tagName := name[4:]
|
||||
for _, instanceTag := range instance.Tags {
|
||||
if aws.StringValue(instanceTag.Key) == tagName && contains(filter.Values, aws.StringValue(instanceTag.Value)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
panic("Unknown filter name: " + name)
|
||||
}
|
||||
|
||||
func contains(haystack []*string, needle string) bool {
|
||||
for _, s := range haystack {
|
||||
// (deliberately panic if s == nil)
|
||||
if needle == *s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// AWSCloud implements InstanceGroups
|
||||
var _ InstanceGroups = &Cloud{}
|
||||
|
||||
// ResizeInstanceGroup sets the size of the specificed instancegroup Exported
|
||||
// so it can be used by the e2e tests, which don't want to instantiate a full
|
||||
// cloudprovider.
|
||||
func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error {
|
||||
request := &autoscaling.UpdateAutoScalingGroupInput{
|
||||
AutoScalingGroupName: aws.String(instanceGroupName),
|
||||
MinSize: aws.Int64(int64(size)),
|
||||
MaxSize: aws.Int64(int64(size)),
|
||||
}
|
||||
if _, err := asg.UpdateAutoScalingGroup(request); err != nil {
|
||||
return fmt.Errorf("error resizing AWS autoscaling group: %q", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implement InstanceGroups.ResizeInstanceGroup
|
||||
// Set the size to the fixed size
|
||||
func (c *Cloud) ResizeInstanceGroup(instanceGroupName string, size int) error {
|
||||
return ResizeInstanceGroup(c.asg, instanceGroupName, size)
|
||||
}
|
||||
|
||||
// DescribeInstanceGroup gets info about the specified instancegroup
|
||||
// Exported so it can be used by the e2e tests,
|
||||
// which don't want to instantiate a full cloudprovider.
|
||||
func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo, error) {
|
||||
request := &autoscaling.DescribeAutoScalingGroupsInput{
|
||||
AutoScalingGroupNames: []*string{aws.String(instanceGroupName)},
|
||||
}
|
||||
response, err := asg.DescribeAutoScalingGroups(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing AWS autoscaling group (%s): %q", instanceGroupName, err)
|
||||
}
|
||||
|
||||
if len(response.AutoScalingGroups) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(response.AutoScalingGroups) > 1 {
|
||||
glog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName)
|
||||
}
|
||||
group := response.AutoScalingGroups[0]
|
||||
return &awsInstanceGroup{group: group}, nil
|
||||
}
|
||||
|
||||
// Implement InstanceGroups.DescribeInstanceGroup
|
||||
// Queries the cloud provider for information about the specified instance group
|
||||
func (c *Cloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) {
|
||||
return DescribeInstanceGroup(c.asg, instanceGroupName)
|
||||
}
|
||||
|
||||
// awsInstanceGroup implements InstanceGroupInfo
|
||||
var _ InstanceGroupInfo = &awsInstanceGroup{}
|
||||
|
||||
type awsInstanceGroup struct {
|
||||
group *autoscaling.Group
|
||||
}
|
||||
|
||||
// Implement InstanceGroupInfo.CurrentSize
|
||||
// The number of instances currently running under control of this group
|
||||
func (g *awsInstanceGroup) CurrentSize() (int, error) {
|
||||
return len(g.group.Instances), nil
|
||||
}
|
1453
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
Normal file
1453
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
161
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer_test.go
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer_test.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestElbProtocolsAreEqual(t *testing.T) {
|
||||
grid := []struct {
|
||||
L *string
|
||||
R *string
|
||||
Expected bool
|
||||
}{
|
||||
{
|
||||
L: aws.String("http"),
|
||||
R: aws.String("http"),
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
L: aws.String("HTTP"),
|
||||
R: aws.String("http"),
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
L: aws.String("HTTP"),
|
||||
R: aws.String("TCP"),
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
L: aws.String(""),
|
||||
R: aws.String("TCP"),
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
L: aws.String(""),
|
||||
R: aws.String(""),
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
L: nil,
|
||||
R: aws.String(""),
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
L: aws.String(""),
|
||||
R: nil,
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
L: nil,
|
||||
R: nil,
|
||||
Expected: true,
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
actual := elbProtocolsAreEqual(g.L, g.R)
|
||||
if actual != g.Expected {
|
||||
t.Errorf("unexpected result from protocolsEquals(%v, %v)", g.L, g.R)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAWSARNEquals(t *testing.T) {
|
||||
grid := []struct {
|
||||
L *string
|
||||
R *string
|
||||
Expected bool
|
||||
}{
|
||||
{
|
||||
L: aws.String("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"),
|
||||
R: aws.String("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"),
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
L: aws.String("ARN:AWS:ACM:US-EAST-1:123456789012:CERTIFICATE/12345678-1234-1234-1234-123456789012"),
|
||||
R: aws.String("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"),
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
L: aws.String("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"),
|
||||
R: aws.String(""),
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
L: aws.String(""),
|
||||
R: aws.String(""),
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
L: nil,
|
||||
R: aws.String(""),
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
L: aws.String(""),
|
||||
R: nil,
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
L: nil,
|
||||
R: nil,
|
||||
Expected: true,
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
actual := awsArnEquals(g.L, g.R)
|
||||
if actual != g.Expected {
|
||||
t.Errorf("unexpected result from awsArnEquals(%v, %v)", g.L, g.R)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNLB(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
annotations map[string]string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
"NLB annotation provided",
|
||||
map[string]string{"service.beta.kubernetes.io/aws-load-balancer-type": "nlb"},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"NLB annotation has invalid value",
|
||||
map[string]string{"service.beta.kubernetes.io/aws-load-balancer-type": "elb"},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"NLB annotation absent",
|
||||
map[string]string{},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Logf("Running test case %s", test.name)
|
||||
got := isNLB(test.annotations)
|
||||
|
||||
if got != test.want {
|
||||
t.Errorf("Incorrect value for isNLB() case %s. Got %t, expected %t.", test.name, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_metrics.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_metrics.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var awsApiMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_aws_api_request_duration_seconds",
|
||||
Help: "Latency of aws api call",
|
||||
},
|
||||
[]string{"request"},
|
||||
)
|
||||
|
||||
var awsApiErrorMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_aws_api_request_errors",
|
||||
Help: "AWS Api errors",
|
||||
},
|
||||
[]string{"request"},
|
||||
)
|
||||
|
||||
func registerMetrics() {
|
||||
prometheus.MustRegister(awsApiMetric)
|
||||
prometheus.MustRegister(awsApiErrorMetric)
|
||||
}
|
217
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
Normal file
217
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) {
|
||||
// This should be unnecessary (we already filter on TagNameKubernetesCluster,
|
||||
// and something is broken if cluster name doesn't match, but anyway...
|
||||
// TODO: All clouds should be cluster-aware by default
|
||||
var tables []*ec2.RouteTable
|
||||
|
||||
if c.cfg.Global.RouteTableID != "" {
|
||||
request := &ec2.DescribeRouteTablesInput{Filters: []*ec2.Filter{newEc2Filter("route-table-id", c.cfg.Global.RouteTableID)}}
|
||||
response, err := c.ec2.DescribeRouteTables(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tables = response
|
||||
} else {
|
||||
request := &ec2.DescribeRouteTablesInput{Filters: c.tagging.addFilters(nil)}
|
||||
response, err := c.ec2.DescribeRouteTables(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, table := range response {
|
||||
if c.tagging.hasClusterTag(table.Tags) {
|
||||
tables = append(tables, table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(tables) == 0 {
|
||||
return nil, fmt.Errorf("unable to find route table for AWS cluster: %s", clusterName)
|
||||
}
|
||||
|
||||
if len(tables) != 1 {
|
||||
return nil, fmt.Errorf("found multiple matching AWS route tables for AWS cluster: %s", clusterName)
|
||||
}
|
||||
return tables[0], nil
|
||||
}
|
||||
|
||||
// ListRoutes implements Routes.ListRoutes
|
||||
// List all routes that match the filter
|
||||
func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var routes []*cloudprovider.Route
|
||||
var instanceIDs []*string
|
||||
|
||||
for _, r := range table.Routes {
|
||||
instanceID := aws.StringValue(r.InstanceId)
|
||||
|
||||
if instanceID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
instanceIDs = append(instanceIDs, &instanceID)
|
||||
}
|
||||
|
||||
instances, err := c.getInstancesByIDs(instanceIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, r := range table.Routes {
|
||||
destinationCIDR := aws.StringValue(r.DestinationCidrBlock)
|
||||
if destinationCIDR == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
route := &cloudprovider.Route{
|
||||
Name: clusterName + "-" + destinationCIDR,
|
||||
DestinationCIDR: destinationCIDR,
|
||||
}
|
||||
|
||||
// Capture blackhole routes
|
||||
if aws.StringValue(r.State) == ec2.RouteStateBlackhole {
|
||||
route.Blackhole = true
|
||||
routes = append(routes, route)
|
||||
continue
|
||||
}
|
||||
|
||||
// Capture instance routes
|
||||
instanceID := aws.StringValue(r.InstanceId)
|
||||
if instanceID != "" {
|
||||
instance, found := instances[instanceID]
|
||||
if found {
|
||||
route.TargetNode = mapInstanceToNodeName(instance)
|
||||
routes = append(routes, route)
|
||||
} else {
|
||||
glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
// Sets the instance attribute "source-dest-check" to the specified value
|
||||
func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCheck bool) error {
|
||||
request := &ec2.ModifyInstanceAttributeInput{}
|
||||
request.InstanceId = aws.String(instanceID)
|
||||
request.SourceDestCheck = &ec2.AttributeBooleanValue{Value: aws.Bool(sourceDestCheck)}
|
||||
|
||||
_, err := c.ec2.ModifyInstanceAttribute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error configuring source-dest-check on instance %s: %q", instanceID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateRoute implements Routes.CreateRoute
|
||||
// Create the described route
|
||||
func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
instance, err := c.getInstanceByNodeName(route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// In addition to configuring the route itself, we also need to configure the instance to accept that traffic
|
||||
// On AWS, this requires turning source-dest checks off
|
||||
err = c.configureInstanceSourceDestCheck(aws.StringValue(instance.InstanceId), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var deleteRoute *ec2.Route
|
||||
for _, r := range table.Routes {
|
||||
destinationCIDR := aws.StringValue(r.DestinationCidrBlock)
|
||||
|
||||
if destinationCIDR != route.DestinationCIDR {
|
||||
continue
|
||||
}
|
||||
|
||||
if aws.StringValue(r.State) == ec2.RouteStateBlackhole {
|
||||
deleteRoute = r
|
||||
}
|
||||
}
|
||||
|
||||
if deleteRoute != nil {
|
||||
glog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock))
|
||||
|
||||
request := &ec2.DeleteRouteInput{}
|
||||
request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock
|
||||
request.RouteTableId = table.RouteTableId
|
||||
|
||||
_, err = c.ec2.DeleteRoute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting blackholed AWS route (%s): %q", aws.StringValue(deleteRoute.DestinationCidrBlock), err)
|
||||
}
|
||||
}
|
||||
|
||||
request := &ec2.CreateRouteInput{}
|
||||
// TODO: use ClientToken for idempotency?
|
||||
request.DestinationCidrBlock = aws.String(route.DestinationCIDR)
|
||||
request.InstanceId = instance.InstanceId
|
||||
request.RouteTableId = table.RouteTableId
|
||||
|
||||
_, err = c.ec2.CreateRoute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating AWS route (%s): %q", route.DestinationCIDR, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute implements Routes.DeleteRoute
|
||||
// Delete the specified route
|
||||
func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request := &ec2.DeleteRouteInput{}
|
||||
request.DestinationCidrBlock = aws.String(route.DestinationCIDR)
|
||||
request.RouteTableId = table.RouteTableId
|
||||
|
||||
_, err = c.ec2.DeleteRoute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting AWS route (%s): %q", route.DestinationCIDR, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
1302
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
Normal file
1302
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
func stringSetToPointers(in sets.String) []*string {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := make([]*string, 0, len(in))
|
||||
for k := range in {
|
||||
out = append(out, aws.String(k))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func stringSetFromPointers(in []*string) sets.String {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := sets.NewString()
|
||||
for i := range in {
|
||||
out.Insert(aws.StringValue(in[i]))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// orZero returns the value, or 0 if the pointer is nil
|
||||
// Deprecated: prefer aws.Int64Value
|
||||
func orZero(v *int64) int64 {
|
||||
return aws.Int64Value(v)
|
||||
}
|
130
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go
generated
vendored
Normal file
130
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ExistingDevices is a map of assigned devices. Presence of a key with a device
|
||||
// name in the map means that the device is allocated. Value is irrelevant and
|
||||
// can be used for anything that DeviceAllocator user wants.
|
||||
// Only the relevant part of device name should be in the map, e.g. "ba" for
|
||||
// "/dev/xvdba".
|
||||
type ExistingDevices map[mountDevice]awsVolumeID
|
||||
|
||||
// On AWS, we should assign new (not yet used) device names to attached volumes.
|
||||
// If we reuse a previously used name, we may get the volume "attaching" forever,
|
||||
// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/.
|
||||
// DeviceAllocator finds available device name, taking into account already
|
||||
// assigned device names from ExistingDevices map. It tries to find the next
|
||||
// device name to the previously assigned one (from previous DeviceAllocator
|
||||
// call), so all available device names are used eventually and it minimizes
|
||||
// device name reuse.
|
||||
// All these allocations are in-memory, nothing is written to / read from
|
||||
// /dev directory.
|
||||
type DeviceAllocator interface {
|
||||
// GetNext returns a free device name or error when there is no free device
|
||||
// name. Only the device suffix is returned, e.g. "ba" for "/dev/xvdba".
|
||||
// It's up to the called to add appropriate "/dev/sd" or "/dev/xvd" prefix.
|
||||
GetNext(existingDevices ExistingDevices) (mountDevice, error)
|
||||
|
||||
// Deprioritize the device so as it can't be used immediately again
|
||||
Deprioritize(mountDevice)
|
||||
|
||||
// Lock the deviceAllocator
|
||||
Lock()
|
||||
|
||||
// Unlock the deviceAllocator
|
||||
Unlock()
|
||||
}
|
||||
|
||||
type deviceAllocator struct {
|
||||
possibleDevices map[mountDevice]int
|
||||
counter int
|
||||
deviceLock sync.Mutex
|
||||
}
|
||||
|
||||
var _ DeviceAllocator = &deviceAllocator{}
|
||||
|
||||
type devicePair struct {
|
||||
deviceName mountDevice
|
||||
deviceIndex int
|
||||
}
|
||||
|
||||
type devicePairList []devicePair
|
||||
|
||||
func (p devicePairList) Len() int { return len(p) }
|
||||
func (p devicePairList) Less(i, j int) bool { return p[i].deviceIndex < p[j].deviceIndex }
|
||||
func (p devicePairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// Allocates device names according to scheme ba..bz, ca..cz
|
||||
// it moves along the ring and always picks next device until
|
||||
// device list is exhausted.
|
||||
func NewDeviceAllocator() DeviceAllocator {
|
||||
possibleDevices := make(map[mountDevice]int)
|
||||
for _, firstChar := range []rune{'b', 'c'} {
|
||||
for i := 'a'; i <= 'z'; i++ {
|
||||
dev := mountDevice([]rune{firstChar, i})
|
||||
possibleDevices[dev] = 0
|
||||
}
|
||||
}
|
||||
return &deviceAllocator{
|
||||
possibleDevices: possibleDevices,
|
||||
counter: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// GetNext gets next available device from the pool, this function assumes that caller
|
||||
// holds the necessary lock on deviceAllocator
|
||||
func (d *deviceAllocator) GetNext(existingDevices ExistingDevices) (mountDevice, error) {
|
||||
for _, devicePair := range d.sortByCount() {
|
||||
if _, found := existingDevices[devicePair.deviceName]; !found {
|
||||
return devicePair.deviceName, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no devices are available")
|
||||
}
|
||||
|
||||
func (d *deviceAllocator) sortByCount() devicePairList {
|
||||
dpl := make(devicePairList, 0)
|
||||
for deviceName, deviceIndex := range d.possibleDevices {
|
||||
dpl = append(dpl, devicePair{deviceName, deviceIndex})
|
||||
}
|
||||
sort.Sort(dpl)
|
||||
return dpl
|
||||
}
|
||||
|
||||
func (d *deviceAllocator) Lock() {
|
||||
d.deviceLock.Lock()
|
||||
}
|
||||
|
||||
func (d *deviceAllocator) Unlock() {
|
||||
d.deviceLock.Unlock()
|
||||
}
|
||||
|
||||
// Deprioritize the device so as it can't be used immediately again
|
||||
func (d *deviceAllocator) Deprioritize(chosen mountDevice) {
|
||||
d.deviceLock.Lock()
|
||||
defer d.deviceLock.Unlock()
|
||||
if _, ok := d.possibleDevices[chosen]; ok {
|
||||
d.counter++
|
||||
d.possibleDevices[chosen] = d.counter
|
||||
}
|
||||
}
|
81
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator_test.go
generated
vendored
Normal file
81
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator_test.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDeviceAllocator(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existingDevices ExistingDevices
|
||||
deviceMap map[mountDevice]int
|
||||
expectedOutput mountDevice
|
||||
}{
|
||||
{
|
||||
"empty device list with wrap",
|
||||
ExistingDevices{},
|
||||
generateUnsortedDeviceList(),
|
||||
"bd", // next to 'zz' is the first one, 'ba'
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
allocator := NewDeviceAllocator().(*deviceAllocator)
|
||||
for k, v := range test.deviceMap {
|
||||
allocator.possibleDevices[k] = v
|
||||
}
|
||||
|
||||
got, err := allocator.GetNext(test.existingDevices)
|
||||
if err != nil {
|
||||
t.Errorf("text %q: unexpected error: %v", test.name, err)
|
||||
}
|
||||
if got != test.expectedOutput {
|
||||
t.Errorf("text %q: expected %q, got %q", test.name, test.expectedOutput, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateUnsortedDeviceList() map[mountDevice]int {
|
||||
possibleDevices := make(map[mountDevice]int)
|
||||
for _, firstChar := range []rune{'b', 'c'} {
|
||||
for i := 'a'; i <= 'z'; i++ {
|
||||
dev := mountDevice([]rune{firstChar, i})
|
||||
possibleDevices[dev] = 3
|
||||
}
|
||||
}
|
||||
possibleDevices["bd"] = 0
|
||||
return possibleDevices
|
||||
}
|
||||
|
||||
func TestDeviceAllocatorError(t *testing.T) {
|
||||
allocator := NewDeviceAllocator().(*deviceAllocator)
|
||||
existingDevices := ExistingDevices{}
|
||||
|
||||
// make all devices used
|
||||
var first, second byte
|
||||
for first = 'b'; first <= 'c'; first++ {
|
||||
for second = 'a'; second <= 'z'; second++ {
|
||||
device := [2]byte{first, second}
|
||||
existingDevices[mountDevice(device[:])] = "used"
|
||||
}
|
||||
}
|
||||
|
||||
device, err := allocator.GetNext(existingDevices)
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got device %q", device)
|
||||
}
|
||||
}
|
272
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go
generated
vendored
Normal file
272
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go
generated
vendored
Normal file
@ -0,0 +1,272 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// awsInstanceRegMatch represents Regex Match for AWS instance.
|
||||
var awsInstanceRegMatch = regexp.MustCompile("^i-[^/]*$")
|
||||
|
||||
// awsInstanceID represents the ID of the instance in the AWS API, e.g. i-12345678
|
||||
// The "traditional" format is "i-12345678"
|
||||
// A new longer format is also being introduced: "i-12345678abcdef01"
|
||||
// We should not assume anything about the length or format, though it seems
|
||||
// reasonable to assume that instances will continue to start with "i-".
|
||||
type awsInstanceID string
|
||||
|
||||
func (i awsInstanceID) awsString() *string {
|
||||
return aws.String(string(i))
|
||||
}
|
||||
|
||||
// kubernetesInstanceID represents the id for an instance in the kubernetes API;
|
||||
// the following form
|
||||
// * aws:///<zone>/<awsInstanceId>
|
||||
// * aws:////<awsInstanceId>
|
||||
// * <awsInstanceId>
|
||||
type kubernetesInstanceID string
|
||||
|
||||
// mapToAWSInstanceID extracts the awsInstanceID from the kubernetesInstanceID
|
||||
func (name kubernetesInstanceID) mapToAWSInstanceID() (awsInstanceID, error) {
|
||||
s := string(name)
|
||||
|
||||
if !strings.HasPrefix(s, "aws://") {
|
||||
// Assume a bare aws volume id (vol-1234...)
|
||||
// Build a URL with an empty host (AZ)
|
||||
s = "aws://" + "/" + "/" + s
|
||||
}
|
||||
url, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Invalid instance name (%s): %v", name, err)
|
||||
}
|
||||
if url.Scheme != "aws" {
|
||||
return "", fmt.Errorf("Invalid scheme for AWS instance (%s)", name)
|
||||
}
|
||||
|
||||
awsID := ""
|
||||
tokens := strings.Split(strings.Trim(url.Path, "/"), "/")
|
||||
if len(tokens) == 1 {
|
||||
// instanceId
|
||||
awsID = tokens[0]
|
||||
} else if len(tokens) == 2 {
|
||||
// az/instanceId
|
||||
awsID = tokens[1]
|
||||
}
|
||||
|
||||
// We sanity check the resulting volume; the two known formats are
|
||||
// i-12345678 and i-12345678abcdef01
|
||||
if awsID == "" || !awsInstanceRegMatch.MatchString(awsID) {
|
||||
return "", fmt.Errorf("Invalid format for AWS instance (%s)", name)
|
||||
}
|
||||
|
||||
return awsInstanceID(awsID), nil
|
||||
}
|
||||
|
||||
// mapToAWSInstanceID extracts the awsInstanceIDs from the Nodes, returning an error if a Node cannot be mapped
|
||||
func mapToAWSInstanceIDs(nodes []*v1.Node) ([]awsInstanceID, error) {
|
||||
var instanceIDs []awsInstanceID
|
||||
for _, node := range nodes {
|
||||
if node.Spec.ProviderID == "" {
|
||||
return nil, fmt.Errorf("node %q did not have ProviderID set", node.Name)
|
||||
}
|
||||
instanceID, err := kubernetesInstanceID(node.Spec.ProviderID).mapToAWSInstanceID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name)
|
||||
}
|
||||
instanceIDs = append(instanceIDs, instanceID)
|
||||
}
|
||||
|
||||
return instanceIDs, nil
|
||||
}
|
||||
|
||||
// mapToAWSInstanceIDsTolerant extracts the awsInstanceIDs from the Nodes, skipping Nodes that cannot be mapped
|
||||
func mapToAWSInstanceIDsTolerant(nodes []*v1.Node) []awsInstanceID {
|
||||
var instanceIDs []awsInstanceID
|
||||
for _, node := range nodes {
|
||||
if node.Spec.ProviderID == "" {
|
||||
glog.Warningf("node %q did not have ProviderID set", node.Name)
|
||||
continue
|
||||
}
|
||||
instanceID, err := kubernetesInstanceID(node.Spec.ProviderID).mapToAWSInstanceID()
|
||||
if err != nil {
|
||||
glog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name)
|
||||
continue
|
||||
}
|
||||
instanceIDs = append(instanceIDs, instanceID)
|
||||
}
|
||||
|
||||
return instanceIDs
|
||||
}
|
||||
|
||||
// Gets the full information about this instance from the EC2 API
|
||||
func describeInstance(ec2Client EC2, instanceID awsInstanceID) (*ec2.Instance, error) {
|
||||
request := &ec2.DescribeInstancesInput{
|
||||
InstanceIds: []*string{instanceID.awsString()},
|
||||
}
|
||||
|
||||
instances, err := ec2Client.DescribeInstances(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(instances) == 0 {
|
||||
return nil, fmt.Errorf("no instances found for instance: %s", instanceID)
|
||||
}
|
||||
if len(instances) > 1 {
|
||||
return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID)
|
||||
}
|
||||
return instances[0], nil
|
||||
}
|
||||
|
||||
// instanceCache manages the cache of DescribeInstances
|
||||
type instanceCache struct {
|
||||
// TODO: Get rid of this field, send all calls through the instanceCache
|
||||
cloud *Cloud
|
||||
|
||||
mutex sync.Mutex
|
||||
snapshot *allInstancesSnapshot
|
||||
}
|
||||
|
||||
// Gets the full information about these instance from the EC2 API
|
||||
func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, error) {
|
||||
now := time.Now()
|
||||
|
||||
glog.V(4).Infof("EC2 DescribeInstances - fetching all instances")
|
||||
|
||||
filters := []*ec2.Filter{}
|
||||
instances, err := c.cloud.describeInstances(filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := make(map[awsInstanceID]*ec2.Instance)
|
||||
for _, i := range instances {
|
||||
id := awsInstanceID(aws.StringValue(i.InstanceId))
|
||||
m[id] = i
|
||||
}
|
||||
|
||||
snapshot := &allInstancesSnapshot{now, m}
|
||||
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if c.snapshot != nil && snapshot.olderThan(c.snapshot) {
|
||||
// If this happens a lot, we could run this function in a mutex and only return one result
|
||||
glog.Infof("Not caching concurrent AWS DescribeInstances results")
|
||||
} else {
|
||||
c.snapshot = snapshot
|
||||
}
|
||||
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// cacheCriteria holds criteria that must hold to use a cached snapshot
|
||||
type cacheCriteria struct {
|
||||
// MaxAge indicates the maximum age of a cached snapshot we can accept.
|
||||
// If set to 0 (i.e. unset), cached values will not time out because of age.
|
||||
MaxAge time.Duration
|
||||
|
||||
// HasInstances is a list of awsInstanceIDs that must be in a cached snapshot for it to be considered valid.
|
||||
// If an instance is not found in the cached snapshot, the snapshot be ignored and we will re-fetch.
|
||||
HasInstances []awsInstanceID
|
||||
}
|
||||
|
||||
// describeAllInstancesCached returns all instances, using cached results if applicable
|
||||
func (c *instanceCache) describeAllInstancesCached(criteria cacheCriteria) (*allInstancesSnapshot, error) {
|
||||
var err error
|
||||
snapshot := c.getSnapshot()
|
||||
if snapshot != nil && !snapshot.MeetsCriteria(criteria) {
|
||||
snapshot = nil
|
||||
}
|
||||
|
||||
if snapshot == nil {
|
||||
snapshot, err = c.describeAllInstancesUncached()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
glog.V(6).Infof("EC2 DescribeInstances - using cached results")
|
||||
}
|
||||
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// getSnapshot returns a snapshot if one exists
|
||||
func (c *instanceCache) getSnapshot() *allInstancesSnapshot {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
return c.snapshot
|
||||
}
|
||||
|
||||
// olderThan is a simple helper to encapsulate timestamp comparison
|
||||
func (s *allInstancesSnapshot) olderThan(other *allInstancesSnapshot) bool {
|
||||
// After() is technically broken by time changes until we have monotonic time
|
||||
return other.timestamp.After(s.timestamp)
|
||||
}
|
||||
|
||||
// MeetsCriteria returns true if the snapshot meets the criteria in cacheCriteria
|
||||
func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool {
|
||||
if criteria.MaxAge > 0 {
|
||||
// Sub() is technically broken by time changes until we have monotonic time
|
||||
now := time.Now()
|
||||
if now.Sub(s.timestamp) > criteria.MaxAge {
|
||||
glog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if len(criteria.HasInstances) != 0 {
|
||||
for _, id := range criteria.HasInstances {
|
||||
if nil == s.instances[id] {
|
||||
glog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// allInstancesSnapshot holds the results from querying for all instances,
|
||||
// along with the timestamp for cache-invalidation purposes
|
||||
type allInstancesSnapshot struct {
|
||||
timestamp time.Time
|
||||
instances map[awsInstanceID]*ec2.Instance
|
||||
}
|
||||
|
||||
// FindInstances returns the instances corresponding to the specified ids. If an id is not found, it is ignored.
|
||||
func (s *allInstancesSnapshot) FindInstances(ids []awsInstanceID) map[awsInstanceID]*ec2.Instance {
|
||||
m := make(map[awsInstanceID]*ec2.Instance)
|
||||
for _, id := range ids {
|
||||
instance := s.instances[id]
|
||||
if instance != nil {
|
||||
m[id] = instance
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
199
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances_test.go
generated
vendored
Normal file
199
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances_test.go
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestParseInstance(t *testing.T) {
|
||||
tests := []struct {
|
||||
Kubernetes kubernetesInstanceID
|
||||
Aws awsInstanceID
|
||||
ExpectError bool
|
||||
}{
|
||||
{
|
||||
Kubernetes: "aws:///us-east-1a/i-12345678",
|
||||
Aws: "i-12345678",
|
||||
},
|
||||
{
|
||||
Kubernetes: "aws:////i-12345678",
|
||||
Aws: "i-12345678",
|
||||
},
|
||||
{
|
||||
Kubernetes: "i-12345678",
|
||||
Aws: "i-12345678",
|
||||
},
|
||||
{
|
||||
Kubernetes: "aws:///us-east-1a/i-12345678abcdef01",
|
||||
Aws: "i-12345678abcdef01",
|
||||
},
|
||||
{
|
||||
Kubernetes: "aws:////i-12345678abcdef01",
|
||||
Aws: "i-12345678abcdef01",
|
||||
},
|
||||
{
|
||||
Kubernetes: "i-12345678abcdef01",
|
||||
Aws: "i-12345678abcdef01",
|
||||
},
|
||||
{
|
||||
Kubernetes: "vol-123456789",
|
||||
ExpectError: true,
|
||||
},
|
||||
{
|
||||
Kubernetes: "aws:///us-east-1a/vol-12345678abcdef01",
|
||||
ExpectError: true,
|
||||
},
|
||||
{
|
||||
Kubernetes: "aws://accountid/us-east-1a/vol-12345678abcdef01",
|
||||
ExpectError: true,
|
||||
},
|
||||
{
|
||||
Kubernetes: "aws:///us-east-1a/vol-12345678abcdef01/suffix",
|
||||
ExpectError: true,
|
||||
},
|
||||
{
|
||||
Kubernetes: "",
|
||||
ExpectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
awsID, err := test.Kubernetes.mapToAWSInstanceID()
|
||||
if err != nil {
|
||||
if !test.ExpectError {
|
||||
t.Errorf("unexpected error parsing %s: %v", test.Kubernetes, err)
|
||||
}
|
||||
} else {
|
||||
if test.ExpectError {
|
||||
t.Errorf("expected error parsing %s", test.Kubernetes)
|
||||
} else if test.Aws != awsID {
|
||||
t.Errorf("unexpected value parsing %s, got %s", test.Kubernetes, awsID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
node := &v1.Node{}
|
||||
node.Spec.ProviderID = string(test.Kubernetes)
|
||||
|
||||
awsInstanceIds, err := mapToAWSInstanceIDs([]*v1.Node{node})
|
||||
if err != nil {
|
||||
if !test.ExpectError {
|
||||
t.Errorf("unexpected error parsing %s: %v", test.Kubernetes, err)
|
||||
}
|
||||
} else {
|
||||
if test.ExpectError {
|
||||
t.Errorf("expected error parsing %s", test.Kubernetes)
|
||||
} else if len(awsInstanceIds) != 1 {
|
||||
t.Errorf("unexpected value parsing %s, got %s", test.Kubernetes, awsInstanceIds)
|
||||
} else if awsInstanceIds[0] != test.Aws {
|
||||
t.Errorf("unexpected value parsing %s, got %s", test.Kubernetes, awsInstanceIds)
|
||||
}
|
||||
}
|
||||
|
||||
awsInstanceIds = mapToAWSInstanceIDsTolerant([]*v1.Node{node})
|
||||
if test.ExpectError {
|
||||
if len(awsInstanceIds) != 0 {
|
||||
t.Errorf("unexpected results parsing %s: %s", test.Kubernetes, awsInstanceIds)
|
||||
}
|
||||
} else {
|
||||
if len(awsInstanceIds) != 1 {
|
||||
t.Errorf("unexpected value parsing %s, got %s", test.Kubernetes, awsInstanceIds)
|
||||
} else if awsInstanceIds[0] != test.Aws {
|
||||
t.Errorf("unexpected value parsing %s, got %s", test.Kubernetes, awsInstanceIds)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotMeetsCriteria(t *testing.T) {
|
||||
snapshot := &allInstancesSnapshot{timestamp: time.Now().Add(-3601 * time.Second)}
|
||||
|
||||
if !snapshot.MeetsCriteria(cacheCriteria{}) {
|
||||
t.Errorf("Snapshot should always meet empty criteria")
|
||||
}
|
||||
|
||||
if snapshot.MeetsCriteria(cacheCriteria{MaxAge: time.Hour}) {
|
||||
t.Errorf("Snapshot did not honor MaxAge")
|
||||
}
|
||||
|
||||
if snapshot.MeetsCriteria(cacheCriteria{HasInstances: []awsInstanceID{awsInstanceID("i-12345678")}}) {
|
||||
t.Errorf("Snapshot did not honor HasInstances with missing instances")
|
||||
}
|
||||
|
||||
snapshot.instances = make(map[awsInstanceID]*ec2.Instance)
|
||||
snapshot.instances[awsInstanceID("i-12345678")] = &ec2.Instance{}
|
||||
|
||||
if !snapshot.MeetsCriteria(cacheCriteria{HasInstances: []awsInstanceID{awsInstanceID("i-12345678")}}) {
|
||||
t.Errorf("Snapshot did not honor HasInstances with matching instances")
|
||||
}
|
||||
|
||||
if snapshot.MeetsCriteria(cacheCriteria{HasInstances: []awsInstanceID{awsInstanceID("i-12345678"), awsInstanceID("i-00000000")}}) {
|
||||
t.Errorf("Snapshot did not honor HasInstances with partially matching instances")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOlderThan(t *testing.T) {
|
||||
t1 := time.Now()
|
||||
t2 := t1.Add(time.Second)
|
||||
|
||||
s1 := &allInstancesSnapshot{timestamp: t1}
|
||||
s2 := &allInstancesSnapshot{timestamp: t2}
|
||||
|
||||
assert.True(t, s1.olderThan(s2), "s1 should be olderThan s2")
|
||||
assert.False(t, s2.olderThan(s1), "s2 not should be olderThan s1")
|
||||
assert.False(t, s1.olderThan(s1), "s1 not should be olderThan itself")
|
||||
}
|
||||
|
||||
func TestSnapshotFindInstances(t *testing.T) {
|
||||
snapshot := &allInstancesSnapshot{}
|
||||
|
||||
snapshot.instances = make(map[awsInstanceID]*ec2.Instance)
|
||||
{
|
||||
id := awsInstanceID("i-12345678")
|
||||
snapshot.instances[id] = &ec2.Instance{InstanceId: id.awsString()}
|
||||
}
|
||||
{
|
||||
id := awsInstanceID("i-23456789")
|
||||
snapshot.instances[id] = &ec2.Instance{InstanceId: id.awsString()}
|
||||
}
|
||||
|
||||
instances := snapshot.FindInstances([]awsInstanceID{awsInstanceID("i-12345678"), awsInstanceID("i-23456789"), awsInstanceID("i-00000000")})
|
||||
if len(instances) != 2 {
|
||||
t.Errorf("findInstances returned %d results, expected 2", len(instances))
|
||||
}
|
||||
|
||||
for _, id := range []awsInstanceID{awsInstanceID("i-12345678"), awsInstanceID("i-23456789")} {
|
||||
i := instances[id]
|
||||
if i == nil {
|
||||
t.Errorf("findInstances did not return %s", id)
|
||||
continue
|
||||
}
|
||||
if aws.StringValue(i.InstanceId) != string(id) {
|
||||
t.Errorf("findInstances did not return expected instanceId for %s", id)
|
||||
}
|
||||
if i != snapshot.instances[id] {
|
||||
t.Errorf("findInstances did not return expected instance (reference equality) for %s", id)
|
||||
}
|
||||
}
|
||||
}
|
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Handler for aws-sdk-go that logs all requests
|
||||
func awsHandlerLogger(req *request.Request) {
|
||||
service, name := awsServiceAndName(req)
|
||||
glog.V(4).Infof("AWS request: %s %s", service, name)
|
||||
}
|
||||
|
||||
func awsSendHandlerLogger(req *request.Request) {
|
||||
service, name := awsServiceAndName(req)
|
||||
glog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params)
|
||||
}
|
||||
|
||||
func awsValidateResponseHandlerLogger(req *request.Request) {
|
||||
service, name := awsServiceAndName(req)
|
||||
glog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status)
|
||||
}
|
||||
|
||||
func awsServiceAndName(req *request.Request) (string, string) {
|
||||
service := req.ClientInfo.ServiceName
|
||||
|
||||
name := "?"
|
||||
if req.Operation != nil {
|
||||
name = req.Operation.Name
|
||||
}
|
||||
return service, name
|
||||
}
|
94
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go
generated
vendored
Normal file
94
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
awscredentialprovider "k8s.io/kubernetes/pkg/credentialprovider/aws"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// WellKnownRegions is the complete list of regions known to the AWS cloudprovider
|
||||
// and credentialprovider.
|
||||
var WellKnownRegions = [...]string{
|
||||
// from `aws ec2 describe-regions --region us-east-1 --query Regions[].RegionName | sort`
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2",
|
||||
|
||||
// these are not registered in many / most accounts
|
||||
"cn-north-1",
|
||||
"us-gov-west-1",
|
||||
}
|
||||
|
||||
// awsRegionsMutex protects awsRegions
|
||||
var awsRegionsMutex sync.Mutex
|
||||
|
||||
// awsRegions is a set of recognized regions
|
||||
var awsRegions sets.String
|
||||
|
||||
// RecognizeRegion is called for each AWS region we know about.
|
||||
// It currently registers a credential provider for that region.
|
||||
// There are two paths to discovering a region:
|
||||
// * we hard-code some well-known regions
|
||||
// * if a region is discovered from instance metadata, we add that
|
||||
func RecognizeRegion(region string) {
|
||||
awsRegionsMutex.Lock()
|
||||
defer awsRegionsMutex.Unlock()
|
||||
|
||||
if awsRegions == nil {
|
||||
awsRegions = sets.NewString()
|
||||
}
|
||||
|
||||
if awsRegions.Has(region) {
|
||||
glog.V(6).Infof("found AWS region %q again - ignoring", region)
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("found AWS region %q", region)
|
||||
|
||||
awscredentialprovider.RegisterCredentialsProvider(region)
|
||||
|
||||
awsRegions.Insert(region)
|
||||
}
|
||||
|
||||
// RecognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion
|
||||
func RecognizeWellKnownRegions() {
|
||||
for _, region := range WellKnownRegions {
|
||||
RecognizeRegion(region)
|
||||
}
|
||||
}
|
||||
|
||||
// isRegionValid checks if the region is in the set of known regions
|
||||
func isRegionValid(region string) bool {
|
||||
awsRegionsMutex.Lock()
|
||||
defer awsRegionsMutex.Unlock()
|
||||
|
||||
return awsRegions.Has(region)
|
||||
}
|
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions_test.go
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions_test.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestRegions does basic checking of region verification / addition
|
||||
func TestRegions(t *testing.T) {
|
||||
RecognizeWellKnownRegions()
|
||||
|
||||
tests := []struct {
|
||||
Add string
|
||||
Lookup string
|
||||
ExpectIsRegion bool
|
||||
}{
|
||||
{
|
||||
Lookup: "us-east-1",
|
||||
ExpectIsRegion: true,
|
||||
},
|
||||
{
|
||||
Lookup: "us-east-1a",
|
||||
ExpectIsRegion: false,
|
||||
},
|
||||
{
|
||||
Add: "us-test-1",
|
||||
Lookup: "us-east-1",
|
||||
ExpectIsRegion: true,
|
||||
},
|
||||
{
|
||||
Lookup: "us-test-1",
|
||||
ExpectIsRegion: true,
|
||||
},
|
||||
{
|
||||
Add: "us-test-1",
|
||||
Lookup: "us-test-1",
|
||||
ExpectIsRegion: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if test.Add != "" {
|
||||
RecognizeRegion(test.Add)
|
||||
}
|
||||
|
||||
if test.Lookup != "" {
|
||||
if isRegionValid(test.Lookup) != test.ExpectIsRegion {
|
||||
t.Fatalf("region valid mismatch: %q", test.Lookup)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecognizesNewRegion verifies that we see a region from metadata, we recognize it as valid
|
||||
func TestRecognizesNewRegion(t *testing.T) {
|
||||
region := "us-testrecognizesnewregion-1"
|
||||
if isRegionValid(region) {
|
||||
t.Fatalf("region already valid: %q", region)
|
||||
}
|
||||
|
||||
awsServices := NewFakeAWSServices(TestClusterId).WithAz(region + "a")
|
||||
_, err := newAWSCloud(nil, awsServices)
|
||||
if err != nil {
|
||||
t.Errorf("error building AWS cloud: %v", err)
|
||||
}
|
||||
|
||||
if !isRegionValid(region) {
|
||||
t.Fatalf("newly discovered region not valid: %q", region)
|
||||
}
|
||||
}
|
170
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go
generated
vendored
Normal file
170
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
decayIntervalSeconds = 20
|
||||
decayFraction = 0.8
|
||||
maxDelay = 60 * time.Second
|
||||
)
|
||||
|
||||
// CrossRequestRetryDelay inserts delays before AWS calls, when we are observing RequestLimitExceeded errors
|
||||
// Note that we share a CrossRequestRetryDelay across multiple AWS requests; this is a process-wide back-off,
|
||||
// whereas the aws-sdk-go implements a per-request exponential backoff/retry
|
||||
type CrossRequestRetryDelay struct {
|
||||
backoff Backoff
|
||||
}
|
||||
|
||||
// Create a new CrossRequestRetryDelay
|
||||
func NewCrossRequestRetryDelay() *CrossRequestRetryDelay {
|
||||
c := &CrossRequestRetryDelay{}
|
||||
c.backoff.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
return c
|
||||
}
|
||||
|
||||
// Added to the Sign chain; called before each request
|
||||
func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) {
|
||||
now := time.Now()
|
||||
delay := c.backoff.ComputeDelayForRequest(now)
|
||||
if delay > 0 {
|
||||
glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s",
|
||||
describeRequest(r), delay.String())
|
||||
|
||||
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
|
||||
// Support SleepDelay for backwards compatibility
|
||||
sleepFn(delay)
|
||||
} else if err := aws.SleepWithContext(r.Context(), delay); err != nil {
|
||||
r.Error = awserr.New(request.CanceledErrorCode, "request context canceled", err)
|
||||
r.Retryable = aws.Bool(false)
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid clock skew problems
|
||||
r.Time = now
|
||||
}
|
||||
}
|
||||
|
||||
// Return a user-friendly string describing the request, for use in log messages
|
||||
func describeRequest(r *request.Request) string {
|
||||
service := r.ClientInfo.ServiceName
|
||||
|
||||
name := "?"
|
||||
if r.Operation != nil {
|
||||
name = r.Operation.Name
|
||||
}
|
||||
|
||||
return service + "::" + name
|
||||
}
|
||||
|
||||
// Added to the AfterRetry chain; called after any error
|
||||
func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) {
|
||||
if r.Error == nil {
|
||||
return
|
||||
}
|
||||
awsError, ok := r.Error.(awserr.Error)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if awsError.Code() == "RequestLimitExceeded" {
|
||||
c.backoff.ReportError()
|
||||
glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)",
|
||||
describeRequest(r))
|
||||
}
|
||||
}
|
||||
|
||||
// Backoff manages a backoff that varies based on the recently observed failures
|
||||
type Backoff struct {
|
||||
decayIntervalSeconds int64
|
||||
decayFraction float64
|
||||
maxDelay time.Duration
|
||||
|
||||
mutex sync.Mutex
|
||||
|
||||
// We count all requests & the number of requests which hit a
|
||||
// RequestLimit. We only really care about 'recent' requests, so we
|
||||
// decay the counts exponentially to bias towards recent values.
|
||||
countErrorsRequestLimit float32
|
||||
countRequests float32
|
||||
lastDecay int64
|
||||
}
|
||||
|
||||
func (b *Backoff) init(decayIntervalSeconds int, decayFraction float64, maxDelay time.Duration) {
|
||||
b.lastDecay = time.Now().Unix()
|
||||
// Bias so that if the first request hits the limit we don't immediately apply the full delay
|
||||
b.countRequests = 4
|
||||
b.decayIntervalSeconds = int64(decayIntervalSeconds)
|
||||
b.decayFraction = decayFraction
|
||||
b.maxDelay = maxDelay
|
||||
}
|
||||
|
||||
// Computes the delay required for a request, also updating internal state to count this request
|
||||
func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
||||
// Apply exponential decay to the counters
|
||||
timeDeltaSeconds := now.Unix() - b.lastDecay
|
||||
if timeDeltaSeconds > b.decayIntervalSeconds {
|
||||
intervals := float64(timeDeltaSeconds) / float64(b.decayIntervalSeconds)
|
||||
decay := float32(math.Pow(b.decayFraction, intervals))
|
||||
b.countErrorsRequestLimit *= decay
|
||||
b.countRequests *= decay
|
||||
b.lastDecay = now.Unix()
|
||||
}
|
||||
|
||||
// Count this request
|
||||
b.countRequests += 1.0
|
||||
|
||||
// Compute the failure rate
|
||||
errorFraction := float32(0.0)
|
||||
if b.countRequests > 0.5 {
|
||||
// Avoid tiny residuals & rounding errors
|
||||
errorFraction = b.countErrorsRequestLimit / b.countRequests
|
||||
}
|
||||
|
||||
// Ignore a low fraction of errors
|
||||
// This also allows them to time-out
|
||||
if errorFraction < 0.1 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
// Delay by the max delay multiplied by the recent error rate
|
||||
// (i.e. we apply a linear delay function)
|
||||
// TODO: This is pretty arbitrary
|
||||
delay := time.Nanosecond * time.Duration(float32(b.maxDelay.Nanoseconds())*errorFraction)
|
||||
// Round down to the nearest second for sanity
|
||||
return time.Second * time.Duration(int(delay.Seconds()))
|
||||
}
|
||||
|
||||
// Called when we observe a throttling error
|
||||
func (b *Backoff) ReportError() {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
||||
b.countErrorsRequestLimit += 1.0
|
||||
}
|
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler_test.go
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler_test.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// There follows a group of tests for the backoff logic. There's nothing
|
||||
// particularly special about the values chosen: if we tweak the values in the
|
||||
// backoff logic then we might well have to update the tests. However the key
|
||||
// behavioural elements should remain (e.g. no errors => no backoff), and these
|
||||
// are each tested by one of the tests below.
|
||||
|
||||
// Test that we don't apply any delays when there are no errors
|
||||
func TestBackoffNoErrors(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
for i := 0; i < 100; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
if d.Nanoseconds() != 0 {
|
||||
t.Fatalf("unexpected delay during no-error case")
|
||||
}
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we always apply a delay when there are errors, and also that we
|
||||
// don't "flap" - that our own delay doesn't cause us to oscillate between
|
||||
// delay and no-delay.
|
||||
func TestBackoffAllErrors(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
// Warm up
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
if d.Seconds() < 5 {
|
||||
t.Fatalf("unexpected short-delay during all-error case: %v", d)
|
||||
}
|
||||
t.Logf("delay @%d %v", i, d)
|
||||
now = now.Add(d)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we do come close to our max delay, when we see all errors at 1
|
||||
// second intervals (this simulates multiple concurrent requests, because we
|
||||
// don't wait for delay in between requests)
|
||||
func TestBackoffHitsMax(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
if float32(d.Nanoseconds()) < (float32(maxDelay.Nanoseconds()) * 0.95) {
|
||||
t.Fatalf("expected delay to be >= 95 percent of max delay, was %v", d)
|
||||
}
|
||||
t.Logf("delay @%d %v", i, d)
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that after a phase of errors, we eventually stop applying a delay once there are
|
||||
// no more errors.
|
||||
func TestBackoffRecovers(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Phase of all-errors
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
if d.Seconds() < 5 {
|
||||
t.Fatalf("unexpected short-delay during all-error phase: %v", d)
|
||||
}
|
||||
t.Logf("error phase delay @%d %v", i, d)
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
// Phase of no errors
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
now = now.Add(3 * time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
if d.Seconds() != 0 {
|
||||
t.Fatalf("unexpected delay during error recovery phase: %v", d)
|
||||
}
|
||||
t.Logf("no-error phase delay @%d %v", i, d)
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
}
|
146
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go
generated
vendored
Normal file
146
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
type IPPermissionSet map[string]*ec2.IpPermission
|
||||
|
||||
func NewIPPermissionSet(items ...*ec2.IpPermission) IPPermissionSet {
|
||||
s := make(IPPermissionSet)
|
||||
s.Insert(items...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Ungroup splits permissions out into individual permissions
|
||||
// EC2 will combine permissions with the same port but different SourceRanges together, for example
|
||||
// We ungroup them so we can process them
|
||||
func (s IPPermissionSet) Ungroup() IPPermissionSet {
|
||||
l := []*ec2.IpPermission{}
|
||||
for _, p := range s.List() {
|
||||
if len(p.IpRanges) <= 1 {
|
||||
l = append(l, p)
|
||||
continue
|
||||
}
|
||||
for _, ipRange := range p.IpRanges {
|
||||
c := &ec2.IpPermission{}
|
||||
*c = *p
|
||||
c.IpRanges = []*ec2.IpRange{ipRange}
|
||||
l = append(l, c)
|
||||
}
|
||||
}
|
||||
|
||||
l2 := []*ec2.IpPermission{}
|
||||
for _, p := range l {
|
||||
if len(p.UserIdGroupPairs) <= 1 {
|
||||
l2 = append(l2, p)
|
||||
continue
|
||||
}
|
||||
for _, u := range p.UserIdGroupPairs {
|
||||
c := &ec2.IpPermission{}
|
||||
*c = *p
|
||||
c.UserIdGroupPairs = []*ec2.UserIdGroupPair{u}
|
||||
l2 = append(l, c)
|
||||
}
|
||||
}
|
||||
|
||||
l3 := []*ec2.IpPermission{}
|
||||
for _, p := range l2 {
|
||||
if len(p.PrefixListIds) <= 1 {
|
||||
l3 = append(l3, p)
|
||||
continue
|
||||
}
|
||||
for _, v := range p.PrefixListIds {
|
||||
c := &ec2.IpPermission{}
|
||||
*c = *p
|
||||
c.PrefixListIds = []*ec2.PrefixListId{v}
|
||||
l3 = append(l3, c)
|
||||
}
|
||||
}
|
||||
|
||||
return NewIPPermissionSet(l3...)
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s IPPermissionSet) Insert(items ...*ec2.IpPermission) {
|
||||
for _, p := range items {
|
||||
k := keyForIPPermission(p)
|
||||
s[k] = p
|
||||
}
|
||||
}
|
||||
|
||||
// List returns the contents as a slice. Order is not defined.
|
||||
func (s IPPermissionSet) List() []*ec2.IpPermission {
|
||||
res := make([]*ec2.IpPermission, 0, len(s))
|
||||
for _, v := range s {
|
||||
res = append(res, v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool {
|
||||
for k := range s2 {
|
||||
_, found := s1[k]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 IPPermissionSet) Equal(s2 IPPermissionSet) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s IPPermissionSet) Difference(s2 IPPermissionSet) IPPermissionSet {
|
||||
result := NewIPPermissionSet()
|
||||
for k, v := range s {
|
||||
_, found := s2[k]
|
||||
if !found {
|
||||
result[k] = v
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s IPPermissionSet) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func keyForIPPermission(p *ec2.IpPermission) string {
|
||||
v, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error building JSON representation of ec2.IpPermission: %v", err))
|
||||
}
|
||||
return string(v)
|
||||
}
|
282
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go
generated
vendored
Normal file
282
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go
generated
vendored
Normal file
@ -0,0 +1,282 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// TagNameKubernetesClusterPrefix is the tag name we use to differentiate multiple
|
||||
// logically independent clusters running in the same AZ.
|
||||
// The tag key = TagNameKubernetesClusterPrefix + clusterID
|
||||
// The tag value is an ownership value
|
||||
const TagNameKubernetesClusterPrefix = "kubernetes.io/cluster/"
|
||||
|
||||
// TagNameKubernetesClusterLegacy is the legacy tag name we use to differentiate multiple
|
||||
// logically independent clusters running in the same AZ. The problem with it was that it
|
||||
// did not allow shared resources.
|
||||
const TagNameKubernetesClusterLegacy = "KubernetesCluster"
|
||||
|
||||
type ResourceLifecycle string
|
||||
|
||||
const (
|
||||
// ResourceLifecycleOwned is the value we use when tagging resources to indicate
|
||||
// that the resource is considered owned and managed by the cluster,
|
||||
// and in particular that the lifecycle is tied to the lifecycle of the cluster.
|
||||
ResourceLifecycleOwned = "owned"
|
||||
// ResourceLifecycleShared is the value we use when tagging resources to indicate
|
||||
// that the resource is shared between multiple clusters, and should not be destroyed
|
||||
// if the cluster is destroyed.
|
||||
ResourceLifecycleShared = "shared"
|
||||
)
|
||||
|
||||
type awsTagging struct {
|
||||
// ClusterID is our cluster identifier: we tag AWS resources with this value,
|
||||
// and thus we can run two independent clusters in the same VPC or subnets.
|
||||
// This gives us similar functionality to GCE projects.
|
||||
ClusterID string
|
||||
|
||||
// usesLegacyTags is true if we are using the legacy TagNameKubernetesClusterLegacy tags
|
||||
usesLegacyTags bool
|
||||
}
|
||||
|
||||
func (t *awsTagging) init(legacyClusterID string, clusterID string) error {
|
||||
if legacyClusterID != "" {
|
||||
if clusterID != "" && legacyClusterID != clusterID {
|
||||
return fmt.Errorf("ClusterID tags did not match: %q vs %q", clusterID, legacyClusterID)
|
||||
}
|
||||
t.usesLegacyTags = true
|
||||
clusterID = legacyClusterID
|
||||
}
|
||||
|
||||
t.ClusterID = clusterID
|
||||
|
||||
if clusterID != "" {
|
||||
glog.Infof("AWS cloud filtering on ClusterID: %v", clusterID)
|
||||
} else {
|
||||
glog.Warning("AWS cloud - no clusterID filtering applied for shared resources; do not run multiple clusters in this AZ.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extracts a clusterID from the given tags, if one is present
|
||||
// If no clusterID is found, returns "", nil
|
||||
// If multiple (different) clusterIDs are found, returns an error
|
||||
func (t *awsTagging) initFromTags(tags []*ec2.Tag) error {
|
||||
legacyClusterID, newClusterID, err := findClusterIDs(tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if legacyClusterID == "" && newClusterID == "" {
|
||||
glog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...")
|
||||
}
|
||||
|
||||
return t.init(legacyClusterID, newClusterID)
|
||||
}
|
||||
|
||||
// Extracts the legacy & new cluster ids from the given tags, if they are present
|
||||
// If duplicate tags are found, returns an error
|
||||
func findClusterIDs(tags []*ec2.Tag) (string, string, error) {
|
||||
legacyClusterID := ""
|
||||
newClusterID := ""
|
||||
|
||||
for _, tag := range tags {
|
||||
tagKey := aws.StringValue(tag.Key)
|
||||
if strings.HasPrefix(tagKey, TagNameKubernetesClusterPrefix) {
|
||||
id := strings.TrimPrefix(tagKey, TagNameKubernetesClusterPrefix)
|
||||
if newClusterID != "" {
|
||||
return "", "", fmt.Errorf("Found multiple cluster tags with prefix %s (%q and %q)", TagNameKubernetesClusterPrefix, newClusterID, id)
|
||||
}
|
||||
newClusterID = id
|
||||
}
|
||||
|
||||
if tagKey == TagNameKubernetesClusterLegacy {
|
||||
id := aws.StringValue(tag.Value)
|
||||
if legacyClusterID != "" {
|
||||
return "", "", fmt.Errorf("Found multiple %s tags (%q and %q)", TagNameKubernetesClusterLegacy, legacyClusterID, id)
|
||||
}
|
||||
legacyClusterID = id
|
||||
}
|
||||
}
|
||||
|
||||
return legacyClusterID, newClusterID, nil
|
||||
}
|
||||
|
||||
func (t *awsTagging) clusterTagKey() string {
|
||||
return TagNameKubernetesClusterPrefix + t.ClusterID
|
||||
}
|
||||
|
||||
func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool {
|
||||
// if the clusterID is not configured -- we consider all instances.
|
||||
if len(t.ClusterID) == 0 {
|
||||
return true
|
||||
}
|
||||
clusterTagKey := t.clusterTagKey()
|
||||
for _, tag := range tags {
|
||||
tagKey := aws.StringValue(tag.Key)
|
||||
// For 1.6, we continue to recognize the legacy tags, for the 1.5 -> 1.6 upgrade
|
||||
if tagKey == TagNameKubernetesClusterLegacy {
|
||||
return aws.StringValue(tag.Value) == t.ClusterID
|
||||
}
|
||||
|
||||
if tagKey == clusterTagKey {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure that a resource has the correct tags
|
||||
// If it has no tags, we assume that this was a problem caused by an error in between creation and tagging,
|
||||
// and we add the tags. If it has a different cluster's tags, that is an error.
|
||||
func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string, observedTags []*ec2.Tag) error {
|
||||
actualTagMap := make(map[string]string)
|
||||
for _, tag := range observedTags {
|
||||
actualTagMap[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value)
|
||||
}
|
||||
|
||||
expectedTags := c.buildTags(lifecycle, additionalTags)
|
||||
|
||||
addTags := make(map[string]string)
|
||||
for k, expected := range expectedTags {
|
||||
actual := actualTagMap[k]
|
||||
if actual == expected {
|
||||
continue
|
||||
}
|
||||
if actual == "" {
|
||||
glog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected)
|
||||
addTags[k] = expected
|
||||
} else {
|
||||
return fmt.Errorf("resource %q has tag belonging to another cluster: %q=%q (expected %q)", resourceID, k, actual, expected)
|
||||
}
|
||||
}
|
||||
|
||||
if len(addTags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.createTags(client, resourceID, lifecycle, addTags); err != nil {
|
||||
return fmt.Errorf("error adding missing tags to resource %q: %q", resourceID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTags calls EC2 CreateTags, but adds retry-on-failure logic
|
||||
// We retry mainly because if we create an object, we cannot tag it until it is "fully created" (eventual consistency)
|
||||
// The error code varies though (depending on what we are tagging), so we simply retry on all errors
|
||||
func (t *awsTagging) createTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string) error {
|
||||
tags := t.buildTags(lifecycle, additionalTags)
|
||||
|
||||
if tags == nil || len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var awsTags []*ec2.Tag
|
||||
for k, v := range tags {
|
||||
tag := &ec2.Tag{
|
||||
Key: aws.String(k),
|
||||
Value: aws.String(v),
|
||||
}
|
||||
awsTags = append(awsTags, tag)
|
||||
}
|
||||
|
||||
backoff := wait.Backoff{
|
||||
Duration: createTagInitialDelay,
|
||||
Factor: createTagFactor,
|
||||
Steps: createTagSteps,
|
||||
}
|
||||
request := &ec2.CreateTagsInput{}
|
||||
request.Resources = []*string{&resourceID}
|
||||
request.Tags = awsTags
|
||||
|
||||
var lastErr error
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
_, err := client.CreateTags(request)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// We could check that the error is retryable, but the error code changes based on what we are tagging
|
||||
// SecurityGroup: InvalidGroup.NotFound
|
||||
glog.V(2).Infof("Failed to create tags; will retry. Error was %q", err)
|
||||
lastErr = err
|
||||
return false, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
// return real CreateTags error instead of timeout
|
||||
err = lastErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Add additional filters, to match on our tags
|
||||
// This lets us run multiple k8s clusters in a single EC2 AZ
|
||||
func (t *awsTagging) addFilters(filters []*ec2.Filter) []*ec2.Filter {
|
||||
// if there are no clusterID configured - no filtering by special tag names
|
||||
// should be applied to revert to legacy behaviour.
|
||||
if len(t.ClusterID) == 0 {
|
||||
if len(filters) == 0 {
|
||||
// We can't pass a zero-length Filters to AWS (it's an error)
|
||||
// So if we end up with no filters; just return nil
|
||||
return nil
|
||||
}
|
||||
return filters
|
||||
}
|
||||
// For 1.6, we always recognize the legacy tag, for the 1.5 -> 1.6 upgrade
|
||||
// There are no "or" filters by key, so we look for both the legacy and new key, and then we have to post-filter
|
||||
f := newEc2Filter("tag-key", TagNameKubernetesClusterLegacy, t.clusterTagKey())
|
||||
|
||||
// We can't pass a zero-length Filters to AWS (it's an error)
|
||||
// So if we end up with no filters; we need to return nil
|
||||
filters = append(filters, f)
|
||||
return filters
|
||||
}
|
||||
|
||||
func (t *awsTagging) buildTags(lifecycle ResourceLifecycle, additionalTags map[string]string) map[string]string {
|
||||
tags := make(map[string]string)
|
||||
for k, v := range additionalTags {
|
||||
tags[k] = v
|
||||
}
|
||||
|
||||
// no clusterID is a sign of misconfigured cluster, but we can't be tagging the resources with empty
|
||||
// strings
|
||||
if len(t.ClusterID) == 0 {
|
||||
return tags
|
||||
}
|
||||
|
||||
// We only create legacy tags if we are using legacy tags, i.e. if we have seen a legacy tag on our instance
|
||||
if t.usesLegacyTags {
|
||||
tags[TagNameKubernetesClusterLegacy] = t.ClusterID
|
||||
}
|
||||
tags[t.clusterTagKey()] = string(lifecycle)
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func (t *awsTagging) clusterID() string {
|
||||
return t.ClusterID
|
||||
}
|
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags_test.go
generated
vendored
Normal file
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags_test.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilterTags(t *testing.T) {
|
||||
awsServices := NewFakeAWSServices(TestClusterId)
|
||||
c, err := newAWSCloud(strings.NewReader("[global]"), awsServices)
|
||||
if err != nil {
|
||||
t.Errorf("Error building aws cloud: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if c.tagging.ClusterID != TestClusterId {
|
||||
t.Errorf("unexpected ClusterID: %v", c.tagging.ClusterID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindClusterID(t *testing.T) {
|
||||
grid := []struct {
|
||||
Tags map[string]string
|
||||
ExpectedNew string
|
||||
ExpectedLegacy string
|
||||
ExpectError bool
|
||||
}{
|
||||
{
|
||||
Tags: map[string]string{},
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
TagNameKubernetesClusterLegacy: "a",
|
||||
},
|
||||
ExpectedLegacy: "a",
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
TagNameKubernetesClusterPrefix + "a": "owned",
|
||||
},
|
||||
ExpectedNew: "a",
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
TagNameKubernetesClusterPrefix + "a": "",
|
||||
},
|
||||
ExpectedNew: "a",
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
TagNameKubernetesClusterLegacy: "a",
|
||||
TagNameKubernetesClusterPrefix + "a": "",
|
||||
},
|
||||
ExpectedLegacy: "a",
|
||||
ExpectedNew: "a",
|
||||
},
|
||||
{
|
||||
Tags: map[string]string{
|
||||
TagNameKubernetesClusterPrefix + "a": "",
|
||||
TagNameKubernetesClusterPrefix + "b": "",
|
||||
},
|
||||
ExpectError: true,
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
var ec2Tags []*ec2.Tag
|
||||
for k, v := range g.Tags {
|
||||
ec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)})
|
||||
}
|
||||
actualLegacy, actualNew, err := findClusterIDs(ec2Tags)
|
||||
if g.ExpectError {
|
||||
if err == nil {
|
||||
t.Errorf("expected error for tags %v", g.Tags)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error for tags %v: %v", g.Tags, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if g.ExpectedNew != actualNew {
|
||||
t.Errorf("unexpected new clusterid for tags %v: %s vs %s", g.Tags, g.ExpectedNew, actualNew)
|
||||
continue
|
||||
}
|
||||
|
||||
if g.ExpectedLegacy != actualLegacy {
|
||||
t.Errorf("unexpected new clusterid for tags %v: %s vs %s", g.Tags, g.ExpectedLegacy, actualLegacy)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
152
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go
generated
vendored
Normal file
152
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// awsVolumeRegMatch represents Regex Match for AWS volume.
|
||||
var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$")
|
||||
|
||||
// awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678
|
||||
// The "traditional" format is "vol-12345678"
|
||||
// A new longer format is also being introduced: "vol-12345678abcdef01"
|
||||
// We should not assume anything about the length or format, though it seems
|
||||
// reasonable to assume that volumes will continue to start with "vol-".
|
||||
type awsVolumeID string
|
||||
|
||||
func (i awsVolumeID) awsString() *string {
|
||||
return aws.String(string(i))
|
||||
}
|
||||
|
||||
// KubernetesVolumeID represents the id for a volume in the kubernetes API;
|
||||
// a few forms are recognized:
|
||||
// * aws://<zone>/<awsVolumeId>
|
||||
// * aws:///<awsVolumeId>
|
||||
// * <awsVolumeId>
|
||||
type KubernetesVolumeID string
|
||||
|
||||
// DiskInfo returns aws disk information in easy to use manner
|
||||
type diskInfo struct {
|
||||
ec2Instance *ec2.Instance
|
||||
nodeName types.NodeName
|
||||
volumeState string
|
||||
attachmentState string
|
||||
hasAttachment bool
|
||||
disk *awsDisk
|
||||
}
|
||||
|
||||
// MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
|
||||
func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) {
|
||||
// name looks like aws://availability-zone/awsVolumeId
|
||||
|
||||
// The original idea of the URL-style name was to put the AZ into the
|
||||
// host, so we could find the AZ immediately from the name without
|
||||
// querying the API. But it turns out we don't actually need it for
|
||||
// multi-AZ clusters, as we put the AZ into the labels on the PV instead.
|
||||
// However, if in future we want to support multi-AZ cluster
|
||||
// volume-awareness without using PersistentVolumes, we likely will
|
||||
// want the AZ in the host.
|
||||
|
||||
s := string(name)
|
||||
|
||||
if !strings.HasPrefix(s, "aws://") {
|
||||
// Assume a bare aws volume id (vol-1234...)
|
||||
// Build a URL with an empty host (AZ)
|
||||
s = "aws://" + "" + "/" + s
|
||||
}
|
||||
url, err := url.Parse(s)
|
||||
if err != nil {
|
||||
// TODO: Maybe we should pass a URL into the Volume functions
|
||||
return "", fmt.Errorf("Invalid disk name (%s): %v", name, err)
|
||||
}
|
||||
if url.Scheme != "aws" {
|
||||
return "", fmt.Errorf("Invalid scheme for AWS volume (%s)", name)
|
||||
}
|
||||
|
||||
awsID := url.Path
|
||||
awsID = strings.Trim(awsID, "/")
|
||||
|
||||
// We sanity check the resulting volume; the two known formats are
|
||||
// vol-12345678 and vol-12345678abcdef01
|
||||
if !awsVolumeRegMatch.MatchString(awsID) {
|
||||
return "", fmt.Errorf("Invalid format for AWS volume (%s)", name)
|
||||
}
|
||||
|
||||
return awsVolumeID(awsID), nil
|
||||
}
|
||||
|
||||
func GetAWSVolumeID(kubeVolumeID string) (string, error) {
|
||||
kid := KubernetesVolumeID(kubeVolumeID)
|
||||
awsID, err := kid.MapToAWSVolumeID()
|
||||
return string(awsID), err
|
||||
}
|
||||
|
||||
func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName types.NodeName) (*diskInfo, bool, error) {
|
||||
disk, err := newAWSDisk(c, diskName)
|
||||
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
|
||||
awsDiskInfo := &diskInfo{
|
||||
disk: disk,
|
||||
}
|
||||
|
||||
info, err := disk.describeVolume()
|
||||
|
||||
if err != nil {
|
||||
describeError := fmt.Errorf("Error describing volume %s with %v", diskName, err)
|
||||
glog.Warning(describeError)
|
||||
awsDiskInfo.volumeState = "unknown"
|
||||
return awsDiskInfo, false, describeError
|
||||
}
|
||||
|
||||
awsDiskInfo.volumeState = aws.StringValue(info.State)
|
||||
|
||||
if len(info.Attachments) > 0 {
|
||||
attachment := info.Attachments[0]
|
||||
awsDiskInfo.attachmentState = aws.StringValue(attachment.State)
|
||||
instanceID := aws.StringValue(attachment.InstanceId)
|
||||
instanceInfo, err := c.getInstanceByID(instanceID)
|
||||
|
||||
// This should never happen but if it does it could mean there was a race and instance
|
||||
// has been deleted
|
||||
if err != nil {
|
||||
fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName)
|
||||
glog.Warning(fetchErr)
|
||||
return awsDiskInfo, false, fetchErr
|
||||
}
|
||||
|
||||
awsDiskInfo.ec2Instance = instanceInfo
|
||||
awsDiskInfo.nodeName = mapInstanceToNodeName(instanceInfo)
|
||||
awsDiskInfo.hasAttachment = true
|
||||
if awsDiskInfo.nodeName == nodeName {
|
||||
return awsDiskInfo, true, nil
|
||||
}
|
||||
}
|
||||
return awsDiskInfo, false, nil
|
||||
}
|
95
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
Normal file
95
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"azure.go",
|
||||
"azure_backoff.go",
|
||||
"azure_blobDiskController.go",
|
||||
"azure_controllerCommon.go",
|
||||
"azure_fakes.go",
|
||||
"azure_file.go",
|
||||
"azure_instance_metadata.go",
|
||||
"azure_instances.go",
|
||||
"azure_loadbalancer.go",
|
||||
"azure_managedDiskController.go",
|
||||
"azure_routes.go",
|
||||
"azure_storage.go",
|
||||
"azure_storageaccount.go",
|
||||
"azure_util.go",
|
||||
"azure_util_vmss.go",
|
||||
"azure_wrap.go",
|
||||
"azure_zones.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_loadbalancer_test.go",
|
||||
"azure_test.go",
|
||||
"azure_util_test.go",
|
||||
"azure_wrap_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- jdumars
|
||||
reviewers:
|
||||
- andyzhangx
|
553
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
Normal file
553
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
Normal file
@ -0,0 +1,553 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
// CloudProviderName is the value used for the --cloud-provider flag
|
||||
CloudProviderName = "azure"
|
||||
rateLimitQPSDefault = 1.0
|
||||
rateLimitBucketDefault = 5
|
||||
backoffRetriesDefault = 6
|
||||
backoffExponentDefault = 1.5
|
||||
backoffDurationDefault = 5 // in seconds
|
||||
backoffJitterDefault = 1.0
|
||||
maximumLoadBalancerRuleCount = 148 // According to Azure LB rule default limit
|
||||
|
||||
vmTypeVMSS = "vmss"
|
||||
vmTypeStandard = "standard"
|
||||
)
|
||||
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
// All fields are required unless otherwise specified
|
||||
type Config struct {
|
||||
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
|
||||
Cloud string `json:"cloud" yaml:"cloud"`
|
||||
// The AAD Tenant ID for the Subscription that the cluster is deployed in
|
||||
TenantID string `json:"tenantId" yaml:"tenantId"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
// The name of the resource group that the cluster is deployed in
|
||||
ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"`
|
||||
// The location of the resource group that the cluster is deployed in
|
||||
Location string `json:"location" yaml:"location"`
|
||||
// The name of the VNet that the cluster is deployed in
|
||||
VnetName string `json:"vnetName" yaml:"vnetName"`
|
||||
// The name of the resource group that the Vnet is deployed in
|
||||
VnetResourceGroup string `json:"vnetResourceGroup" yaml:"vnetResourceGroup"`
|
||||
// The name of the subnet that the cluster is deployed in
|
||||
SubnetName string `json:"subnetName" yaml:"subnetName"`
|
||||
// The name of the security group attached to the cluster's subnet
|
||||
SecurityGroupName string `json:"securityGroupName" yaml:"securityGroupName"`
|
||||
// (Optional in 1.6) The name of the route table attached to the subnet that the cluster is deployed in
|
||||
RouteTableName string `json:"routeTableName" yaml:"routeTableName"`
|
||||
// (Optional) The name of the availability set that should be used as the load balancer backend
|
||||
// If this is set, the Azure cloudprovider will only add nodes from that availability set to the load
|
||||
// balancer backend pool. If this is not set, and multiple agent pools (availability sets) are used, then
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
|
||||
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
|
||||
// The type of azure nodes. Candidate valudes are: vmss and standard.
|
||||
// If not set, it will be default to standard.
|
||||
VMType string `json:"vmType" yaml:"vmType"`
|
||||
// The name of the scale set that should be used as the load balancer backend.
|
||||
// If this is set, the Azure cloudprovider will only add nodes from that scale set to the load
|
||||
// balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (scale sets), you MUST set this field.
|
||||
PrimaryScaleSetName string `json:"primaryScaleSetName" yaml:"primaryScaleSetName"`
|
||||
|
||||
// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Enable exponential backoff to manage resource request retries
|
||||
CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"`
|
||||
// Backoff retry limit
|
||||
CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries" yaml:"cloudProviderBackoffRetries"`
|
||||
// Backoff exponent
|
||||
CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent" yaml:"cloudProviderBackoffExponent"`
|
||||
// Backoff duration
|
||||
CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration" yaml:"cloudProviderBackoffDuration"`
|
||||
// Backoff jitter
|
||||
CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter" yaml:"cloudProviderBackoffJitter"`
|
||||
// Enable rate limiting
|
||||
CloudProviderRateLimit bool `json:"cloudProviderRateLimit" yaml:"cloudProviderRateLimit"`
|
||||
// Rate limit QPS
|
||||
CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS" yaml:"cloudProviderRateLimitQPS"`
|
||||
// Rate limit Bucket Size
|
||||
CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"`
|
||||
|
||||
// Use instance metadata service where possible
|
||||
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
|
||||
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
|
||||
// Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"`
|
||||
}
|
||||
|
||||
// VirtualMachinesClient defines needed functions for azure network.VirtualMachinesClient
|
||||
type VirtualMachinesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error)
|
||||
Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error)
|
||||
List(resourceGroupName string) (result compute.VirtualMachineListResult, err error)
|
||||
ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error)
|
||||
}
|
||||
|
||||
// InterfacesClient defines needed functions for azure network.InterfacesClient
|
||||
type InterfacesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error)
|
||||
Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error)
|
||||
GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error)
|
||||
}
|
||||
|
||||
// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient
|
||||
type LoadBalancersClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error)
|
||||
Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error)
|
||||
List(resourceGroupName string) (result network.LoadBalancerListResult, err error)
|
||||
ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error)
|
||||
}
|
||||
|
||||
// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient
|
||||
type PublicIPAddressesClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error)
|
||||
Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error)
|
||||
List(resourceGroupName string) (result network.PublicIPAddressListResult, err error)
|
||||
ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error)
|
||||
}
|
||||
|
||||
// SubnetsClient defines needed functions for azure network.SubnetsClient
|
||||
type SubnetsClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error)
|
||||
Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error)
|
||||
List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error)
|
||||
}
|
||||
|
||||
// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient
|
||||
type SecurityGroupsClient interface {
|
||||
CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error)
|
||||
Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error)
|
||||
Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error)
|
||||
List(resourceGroupName string) (result network.SecurityGroupListResult, err error)
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
type Cloud struct {
|
||||
Config
|
||||
Environment azure.Environment
|
||||
RoutesClient network.RoutesClient
|
||||
SubnetsClient SubnetsClient
|
||||
InterfacesClient InterfacesClient
|
||||
RouteTablesClient network.RouteTablesClient
|
||||
LoadBalancerClient LoadBalancersClient
|
||||
PublicIPAddressesClient PublicIPAddressesClient
|
||||
SecurityGroupsClient SecurityGroupsClient
|
||||
VirtualMachinesClient VirtualMachinesClient
|
||||
StorageAccountClient storage.AccountsClient
|
||||
DisksClient disk.DisksClient
|
||||
operationPollRateLimiter flowcontrol.RateLimiter
|
||||
resourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadata
|
||||
|
||||
// Clients for vmss.
|
||||
VirtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient compute.VirtualMachineScaleSetVMsClient
|
||||
|
||||
*BlobDiskController
|
||||
*ManagedDiskController
|
||||
*controllerCommon
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud)
|
||||
}
|
||||
|
||||
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
|
||||
// the private RSA key
|
||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
|
||||
}
|
||||
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
|
||||
if !isRsaKey {
|
||||
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
|
||||
}
|
||||
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
config.AADClientSecret,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding the client certificate: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
certificate,
|
||||
privateKey,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
|
||||
}
|
||||
|
||||
// NewCloud returns a Cloud with initialized clients
|
||||
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
config, env, err := ParseConfig(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
az := Cloud{
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := GetServicePrincipalToken(config, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subnetsClient := network.NewSubnetsClient(az.SubscriptionID)
|
||||
subnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
subnetsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&subnetsClient.Client)
|
||||
az.SubnetsClient = subnetsClient
|
||||
|
||||
az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID)
|
||||
az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RouteTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.RouteTablesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RouteTablesClient.Client)
|
||||
|
||||
az.RoutesClient = network.NewRoutesClient(az.SubscriptionID)
|
||||
az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RoutesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.RoutesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&az.RoutesClient.Client)
|
||||
|
||||
interfacesClient := network.NewInterfacesClient(az.SubscriptionID)
|
||||
interfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
interfacesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&interfacesClient.Client)
|
||||
az.InterfacesClient = interfacesClient
|
||||
|
||||
loadBalancerClient := network.NewLoadBalancersClient(az.SubscriptionID)
|
||||
loadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
loadBalancerClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&loadBalancerClient.Client)
|
||||
az.LoadBalancerClient = loadBalancerClient
|
||||
|
||||
virtualMachinesClient := compute.NewVirtualMachinesClient(az.SubscriptionID)
|
||||
virtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
virtualMachinesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachinesClient.Client)
|
||||
az.VirtualMachinesClient = virtualMachinesClient
|
||||
|
||||
publicIPAddressClient := network.NewPublicIPAddressesClient(az.SubscriptionID)
|
||||
publicIPAddressClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
publicIPAddressClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&publicIPAddressClient.Client)
|
||||
az.PublicIPAddressesClient = publicIPAddressClient
|
||||
|
||||
securityGroupsClient := network.NewSecurityGroupsClient(az.SubscriptionID)
|
||||
securityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
securityGroupsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&securityGroupsClient.Client)
|
||||
az.SecurityGroupsClient = securityGroupsClient
|
||||
|
||||
virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(az.SubscriptionID)
|
||||
az.VirtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.VirtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachineScaleSetVMsClient.Client)
|
||||
az.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
|
||||
virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(az.SubscriptionID)
|
||||
az.VirtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
az.VirtualMachineScaleSetsClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&virtualMachineScaleSetsClient.Client)
|
||||
az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
|
||||
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.StorageAccountClient.Client)
|
||||
|
||||
az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.DisksClient.Client)
|
||||
|
||||
// Conditionally configure rate limits
|
||||
if az.CloudProviderRateLimit {
|
||||
// Assign rate limit defaults if no configuration was passed in
|
||||
if az.CloudProviderRateLimitQPS == 0 {
|
||||
az.CloudProviderRateLimitQPS = rateLimitQPSDefault
|
||||
}
|
||||
if az.CloudProviderRateLimitBucket == 0 {
|
||||
az.CloudProviderRateLimitBucket = rateLimitBucketDefault
|
||||
}
|
||||
az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(
|
||||
az.CloudProviderRateLimitQPS,
|
||||
az.CloudProviderRateLimitBucket)
|
||||
glog.V(2).Infof("Azure cloudprovider using rate limit config: QPS=%g, bucket=%d",
|
||||
az.CloudProviderRateLimitQPS,
|
||||
az.CloudProviderRateLimitBucket)
|
||||
} else {
|
||||
// if rate limits are configured off, az.operationPollRateLimiter.Accept() is a no-op
|
||||
az.operationPollRateLimiter = flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
}
|
||||
|
||||
// Conditionally configure resource request backoff
|
||||
if az.CloudProviderBackoff {
|
||||
// Assign backoff defaults if no configuration was passed in
|
||||
if az.CloudProviderBackoffRetries == 0 {
|
||||
az.CloudProviderBackoffRetries = backoffRetriesDefault
|
||||
}
|
||||
if az.CloudProviderBackoffExponent == 0 {
|
||||
az.CloudProviderBackoffExponent = backoffExponentDefault
|
||||
}
|
||||
if az.CloudProviderBackoffDuration == 0 {
|
||||
az.CloudProviderBackoffDuration = backoffDurationDefault
|
||||
}
|
||||
if az.CloudProviderBackoffJitter == 0 {
|
||||
az.CloudProviderBackoffJitter = backoffJitterDefault
|
||||
}
|
||||
az.resourceRequestBackoff = wait.Backoff{
|
||||
Steps: az.CloudProviderBackoffRetries,
|
||||
Factor: az.CloudProviderBackoffExponent,
|
||||
Duration: time.Duration(az.CloudProviderBackoffDuration) * time.Second,
|
||||
Jitter: az.CloudProviderBackoffJitter,
|
||||
}
|
||||
glog.V(2).Infof("Azure cloudprovider using retry backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
|
||||
az.CloudProviderBackoffRetries,
|
||||
az.CloudProviderBackoffExponent,
|
||||
az.CloudProviderBackoffDuration,
|
||||
az.CloudProviderBackoffJitter)
|
||||
}
|
||||
|
||||
az.metadata = NewInstanceMetadata()
|
||||
|
||||
if az.MaximumLoadBalancerRuleCount == 0 {
|
||||
az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
|
||||
}
|
||||
|
||||
if err := initDiskControllers(&az); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &az, nil
|
||||
}
|
||||
|
||||
// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file
|
||||
func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) {
|
||||
var config Config
|
||||
var env azure.Environment
|
||||
|
||||
if configReader == nil {
|
||||
return &config, &env, nil
|
||||
}
|
||||
|
||||
configContents, err := ioutil.ReadAll(configReader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(configContents, &config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if config.Cloud == "" {
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
env, err = azure.EnvironmentFromName(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if config.VMType != "" {
|
||||
config.VMType = strings.ToLower(config.VMType)
|
||||
}
|
||||
|
||||
return &config, &env, nil
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
|
||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns a routes interface along with whether the interface is supported.
|
||||
func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
|
||||
func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (az *Cloud) HasClusterID() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (az *Cloud) ProviderName() string {
|
||||
return CloudProviderName
|
||||
}
|
||||
|
||||
// configureUserAgent configures the autorest client with a user agent that
|
||||
// includes "kubernetes" and the full kubernetes git version string
|
||||
// example:
|
||||
// Azure-SDK-for-Go/7.0.1-beta arm-network/2016-09-01; kubernetes-cloudprovider/v1.7.0-alpha.2.711+a2fadef8170bb0-dirty;
|
||||
func configureUserAgent(client *autorest.Client) {
|
||||
k8sVersion := version.Get().GitVersion
|
||||
client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion)
|
||||
}
|
||||
|
||||
func initDiskControllers(az *Cloud) error {
|
||||
// Common controller contains the function
|
||||
// needed by both blob disk and managed disk controllers
|
||||
|
||||
common := &controllerCommon{
|
||||
aadResourceEndPoint: az.Environment.ServiceManagementEndpoint,
|
||||
clientID: az.AADClientID,
|
||||
clientSecret: az.AADClientSecret,
|
||||
location: az.Location,
|
||||
storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
|
||||
managementEndpoint: az.Environment.ResourceManagerEndpoint,
|
||||
resourceGroup: az.ResourceGroup,
|
||||
tenantID: az.TenantID,
|
||||
tokenEndPoint: az.Environment.ActiveDirectoryEndpoint,
|
||||
subscriptionID: az.SubscriptionID,
|
||||
cloud: az,
|
||||
}
|
||||
|
||||
// BlobDiskController: contains the function needed to
|
||||
// create/attach/detach/delete blob based (unmanaged disks)
|
||||
blobController, err := newBlobDiskController(common)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error())
|
||||
}
|
||||
|
||||
// ManagedDiskController: contains the functions needed to
|
||||
// create/attach/detach/delete managed disks
|
||||
managedController, err := newManagedDiskController(common)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error())
|
||||
}
|
||||
|
||||
az.BlobDiskController = blobController
|
||||
az.ManagedDiskController = managedController
|
||||
az.controllerCommon = common
|
||||
|
||||
return nil
|
||||
}
|
439
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
Normal file
439
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
Normal file
@ -0,0 +1,439 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// requestBackoff if backoff is disabled in cloud provider it
|
||||
// returns a new Backoff object steps = 1
|
||||
// This is to make sure that the requested command executes
|
||||
// at least once
|
||||
func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
|
||||
if az.CloudProviderBackoff {
|
||||
return az.resourceRequestBackoff
|
||||
}
|
||||
resourceRequestBackoff = wait.Backoff{
|
||||
Steps: 1,
|
||||
}
|
||||
|
||||
return resourceRequestBackoff
|
||||
}
|
||||
|
||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, bool, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var exists bool
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
machine, exists, retryErr = az.getVirtualMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, exists, err
|
||||
}
|
||||
|
||||
// GetScaleSetsVMWithRetry invokes az.getScaleSetsVM with exponential backoff retry
|
||||
func (az *Cloud) GetScaleSetsVMWithRetry(name types.NodeName) (compute.VirtualMachineScaleSetVM, bool, error) {
|
||||
var machine compute.VirtualMachineScaleSetVM
|
||||
var exists bool
|
||||
err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) {
|
||||
var retryErr error
|
||||
machine, exists, retryErr = az.getVmssVirtualMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("GetScaleSetsVMWithRetry backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(10).Infof("GetScaleSetsVMWithRetry backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, exists, err
|
||||
}
|
||||
|
||||
// VirtualMachineClientGetWithRetry invokes az.VirtualMachinesClient.Get with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientGetWithRetry(resourceGroup, vmName string, types compute.InstanceViewTypes) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, retryErr = az.VirtualMachinesClient.Get(resourceGroup, vmName, types)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return machine, err
|
||||
}
|
||||
|
||||
// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) {
|
||||
allNodes := []compute.VirtualMachine{}
|
||||
var result compute.VirtualMachineListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("VirtualMachinesClient.List(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allNodes = append(allNodes, *result.Value...)
|
||||
appendResults = false
|
||||
// follow the next link to get all the vms for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.VirtualMachinesClient.ListNextResults(result)
|
||||
glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup, retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("VirtualMachinesClient.ListNextResults(%v): success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allNodes, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allNodes, err
|
||||
}
|
||||
|
||||
// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
|
||||
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) {
|
||||
var ip string
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ip, retryErr = az.getIPForMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("backoff: success")
|
||||
return true, nil
|
||||
})
|
||||
return ip, err
|
||||
}
|
||||
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): start", *sg.Name)
|
||||
respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): start", *lb.Name)
|
||||
respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
|
||||
// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
|
||||
func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
allLBs := []network.LoadBalancer{}
|
||||
var result network.LoadBalancerListResult
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allLBs = append(allLBs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the vms for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.LoadBalancerClient.ListNextResults(result)
|
||||
glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("LoadBalancerClient.ListNextResults(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allLBs, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allLBs, nil
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in az.ResourceGroup
|
||||
func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) {
|
||||
allPIPs := []network.PublicIPAddress{}
|
||||
var result network.PublicIPAddressListResult
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appendResults := (result.Value != nil && len(*result.Value) > 0)
|
||||
for appendResults {
|
||||
allPIPs = append(allPIPs, *result.Value...)
|
||||
appendResults = false
|
||||
|
||||
// follow the next link to get all the vms for resource group
|
||||
if result.NextLink != nil {
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", az.ResourceGroup)
|
||||
result, retryErr = az.PublicIPAddressesClient.ListNextResults(result)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return allPIPs, err
|
||||
}
|
||||
appendResults = (result.Value != nil && len(*result.Value) > 0)
|
||||
}
|
||||
}
|
||||
|
||||
return allPIPs, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): start", *pip.Name)
|
||||
respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): end", *pip.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): start", *nic.Name)
|
||||
respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): start", pipName)
|
||||
respChan, errChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): end", pipName)
|
||||
return processRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%s): start", lbName)
|
||||
respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("LoadBalancerClient.Delete(%s): end", lbName)
|
||||
return processRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): start", *routeTable.Name)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): end", *routeTable.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): start", *route.Name)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
|
||||
return processRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): start", vmName)
|
||||
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return processRetryResponse(resp.Response, err)
|
||||
})
|
||||
}
|
||||
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
glog.V(2).Infof("backoff: success, HTTP response=%d", resp.StatusCode)
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetryAPIRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
// Fall-through: stop periodic backoff, return error object from most recent request
|
||||
return true, err
|
||||
}
|
||||
|
||||
// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior
|
||||
func shouldRetryAPIRequest(resp autorest.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
// HTTP 4xx or 5xx suggests we should retry
|
||||
if 399 < resp.StatusCode && resp.StatusCode < 600 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isSuccessHTTPResponse determines if the response from an HTTP request suggests success
|
||||
func isSuccessHTTPResponse(resp autorest.Response) bool {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
720
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
Normal file
720
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
Normal file
@ -0,0 +1,720 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
azstorage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"github.com/rubiojr/go-vhd/vhd"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
vhdContainerName = "vhds"
|
||||
useHTTPSForBlobBasedDisk = true
|
||||
blobServiceName = "blob"
|
||||
)
|
||||
|
||||
type storageAccountState struct {
|
||||
name string
|
||||
saType storage.SkuName
|
||||
key string
|
||||
diskCount int32
|
||||
isValidating int32
|
||||
defaultContainerCreated bool
|
||||
}
|
||||
|
||||
//BlobDiskController : blob disk controller struct
|
||||
type BlobDiskController struct {
|
||||
common *controllerCommon
|
||||
accounts map[string]*storageAccountState
|
||||
}
|
||||
|
||||
var (
|
||||
defaultContainerName = ""
|
||||
storageAccountNamePrefix = ""
|
||||
storageAccountNameMatch = ""
|
||||
accountsLock = &sync.Mutex{}
|
||||
)
|
||||
|
||||
func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) {
|
||||
c := BlobDiskController{common: common}
|
||||
c.setUniqueStrings()
|
||||
|
||||
// get accounts
|
||||
accounts, err := c.getAllStorageAccounts()
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
|
||||
c.accounts = make(map[string]*storageAccountState)
|
||||
return &c, nil
|
||||
}
|
||||
c.accounts = accounts
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (c *BlobDiskController) CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = c.common.cloud.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", 0, err
|
||||
}
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if (storageAccountType == "" || account.StorageType == storageAccountType) && (location == "" || account.Location == location) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := c.common.cloud.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("no key found for storage account %s", account.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account.Name, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account.Name, name, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
}
|
||||
return "", "", 0, fmt.Errorf("failed to find a matching storage account")
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
|
||||
glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
|
||||
accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse vhd URI %v", err)
|
||||
}
|
||||
key, err := c.common.cloud.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
|
||||
}
|
||||
err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
|
||||
if err != nil {
|
||||
glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseIDMissing) {
|
||||
// disk is still being used
|
||||
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
|
||||
return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI))
|
||||
}
|
||||
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
|
||||
}
|
||||
glog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
|
||||
func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) {
|
||||
scheme := "http"
|
||||
if useHTTPSForBlobBasedDisk {
|
||||
scheme = "https"
|
||||
}
|
||||
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix)
|
||||
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
|
||||
re := regexp.MustCompile(reStr)
|
||||
res := re.FindSubmatch([]byte(diskURI))
|
||||
if len(res) < 3 {
|
||||
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI)
|
||||
}
|
||||
return string(res[1]), string(res[2]), nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) {
|
||||
container := blobClient.GetContainerReference(containerName)
|
||||
size := 1024 * 1024 * 1024 * sizeGB
|
||||
vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */
|
||||
// Blob name in URL must end with '.vhd' extension.
|
||||
vhdName = vhdName + ".vhd"
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["createdby"] = "k8sAzureDataDisk"
|
||||
glog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
|
||||
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
blob.Properties.ContentLength = vhdSize
|
||||
blob.Metadata = tags
|
||||
err := blob.PutPageBlob(nil)
|
||||
if err != nil {
|
||||
// if container doesn't exist, create one and retry PutPageBlob
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errContainerNotFound) {
|
||||
err = container.Create(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err == nil {
|
||||
err = blob.PutPageBlob(nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err)
|
||||
}
|
||||
|
||||
// add VHD signature to the blob
|
||||
h, err := createVHDHeader(uint64(size))
|
||||
if err != nil {
|
||||
blob.DeleteIfExists(nil)
|
||||
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
|
||||
}
|
||||
|
||||
blobRange := azstorage.BlobRange{
|
||||
Start: uint64(size),
|
||||
End: uint64(vhdSize - 1),
|
||||
}
|
||||
if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
|
||||
glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
|
||||
vhdName, containerName, accountName, err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if useHTTPSForBlobBasedDisk {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix)
|
||||
uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName)
|
||||
return vhdName, uri, nil
|
||||
}
|
||||
|
||||
// delete a vhd blob
|
||||
func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error {
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobSvc := client.GetBlobService()
|
||||
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(blobName)
|
||||
return blob.Delete(nil)
|
||||
}
|
||||
|
||||
//CreateBlobDisk : create a blob disk in a node
|
||||
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
|
||||
|
||||
storageAccountName, err := c.findSANameForDisk(storageAccountType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
blobClient, err := c.getBlobSvcClient(storageAccountName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1)
|
||||
|
||||
return diskURI, nil
|
||||
}
|
||||
|
||||
//DeleteBlobDisk : delete a blob disk from a node
|
||||
func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, ok := c.accounts[storageAccountName]
|
||||
if !ok {
|
||||
// the storage account is specified by user
|
||||
glog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
|
||||
return c.DeleteVolume(diskURI)
|
||||
}
|
||||
|
||||
blobSvc, err := c.getBlobSvcClient(storageAccountName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName)
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
_, err = blob.DeleteIfExists(nil)
|
||||
|
||||
if c.accounts[storageAccountName].diskCount == -1 {
|
||||
if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
|
||||
c.accounts[storageAccountName].diskCount = int32(diskCount)
|
||||
} else {
|
||||
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
|
||||
return nil // we have failed to aquire a new count. not an error condition
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
//Sets unique strings to be used as accountnames && || blob containers names
|
||||
func (c *BlobDiskController) setUniqueStrings() {
|
||||
uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID
|
||||
hash := MakeCRC32(uniqueString)
|
||||
//used to generate a unqie container name used by this cluster PVC
|
||||
defaultContainerName = hash
|
||||
|
||||
storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash)
|
||||
// Used to filter relevant accounts (accounts used by shared PVC)
|
||||
storageAccountNameMatch = storageAccountNamePrefix
|
||||
// Used as a template to create new names for relevant accounts
|
||||
storageAccountNamePrefix = storageAccountNamePrefix + "%s"
|
||||
}
|
||||
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if listKeysResult.Keys == nil {
|
||||
return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName)
|
||||
}
|
||||
for _, v := range *listKeysResult.Keys {
|
||||
if v.Value != nil && *v.Value == "key1" {
|
||||
if _, ok := c.accounts[SAName]; !ok {
|
||||
glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
|
||||
return *v.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.accounts[SAName].key = *v.Value
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName)
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) {
|
||||
key := ""
|
||||
var client azstorage.Client
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
var err error
|
||||
if key, err = c.getStorageAccountKey(SAName); err != nil {
|
||||
return blobSvc, err
|
||||
}
|
||||
|
||||
if client, err = azstorage.NewBasicClientOnSovereignCloud(SAName, key, c.common.cloud.Environment); err != nil {
|
||||
return blobSvc, err
|
||||
}
|
||||
|
||||
blobSvc = client.GetBlobService()
|
||||
return blobSvc, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error {
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
// short circut the check via local cache
|
||||
// we are forgiving the fact that account may not be in cache yet
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// not cached, check existance and readiness
|
||||
bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
// account does not exist
|
||||
if !bExist {
|
||||
return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName)
|
||||
}
|
||||
|
||||
// account exists but not ready yet
|
||||
if provisionState != storage.Succeeded {
|
||||
// we don't want many attempts to validate the account readiness
|
||||
// here hence we are locking
|
||||
counter := 1
|
||||
for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; {
|
||||
time.Sleep(3 * time.Second)
|
||||
counter = counter + 1
|
||||
// check if we passed the max sleep
|
||||
if counter >= 20 {
|
||||
return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName)
|
||||
}
|
||||
}
|
||||
|
||||
// swapped
|
||||
defer func() {
|
||||
c.accounts[storageAccountName].isValidating = 0
|
||||
}()
|
||||
|
||||
// short circut the check again.
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
_, provisionState, err := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
|
||||
return false, nil // error performing the query - retryable
|
||||
}
|
||||
|
||||
if provisionState == storage.Succeeded {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
|
||||
return false, nil // back off and see if the account becomes ready on next retry
|
||||
})
|
||||
// we have failed to ensure that account is ready for us to create
|
||||
// the default vhd container
|
||||
if err != nil {
|
||||
if err == kwait.ErrWaitTimeout {
|
||||
return fmt.Errorf("azureDisk - timed out waiting for storage account %s to become ready", storageAccountName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bCreated {
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName)
|
||||
}
|
||||
|
||||
// flag so we no longer have to check on ARM
|
||||
c.accounts[storageAccountName].defaultContainerCreated = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets Disk counts per storage account
|
||||
func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
// if we have it in cache
|
||||
if c.accounts[SAName].diskCount != -1 {
|
||||
return int(c.accounts[SAName].diskCount), nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
if err = c.ensureDefaultContainer(SAName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if blobSvc, err = c.getBlobSvcClient(SAName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
params := azstorage.ListBlobsParameters{}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
response, err := container.ListBlobs(params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
|
||||
c.accounts[SAName].diskCount = int32(len(response.Blobs))
|
||||
|
||||
return int(c.accounts[SAName].diskCount), nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if accountListResult.Value == nil {
|
||||
return nil, fmt.Errorf("azureDisk - empty accountListResult")
|
||||
}
|
||||
|
||||
accounts := make(map[string]*storageAccountState)
|
||||
for _, v := range *accountListResult.Value {
|
||||
if strings.Index(*v.Name, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Infof("azureDisk - accountListResult Name or Sku is nil")
|
||||
continue
|
||||
}
|
||||
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
|
||||
sastate := &storageAccountState{
|
||||
name: *v.Name,
|
||||
saType: (*v.Sku).Name,
|
||||
diskCount: -1,
|
||||
}
|
||||
|
||||
accounts[*v.Name] = sastate
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error {
|
||||
bExist, _, _ := c.getStorageAccountState(storageAccountName)
|
||||
if bExist {
|
||||
newAccountState := &storageAccountState{
|
||||
diskCount: -1,
|
||||
saType: storageAccountType,
|
||||
name: storageAccountName,
|
||||
}
|
||||
|
||||
c.addAccountState(storageAccountName, newAccountState)
|
||||
}
|
||||
// Account Does not exist
|
||||
if !bExist {
|
||||
if len(c.accounts) == maxStorageAccounts && checkMaxAccounts {
|
||||
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType))
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel)
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err))
|
||||
}
|
||||
|
||||
newAccountState := &storageAccountState{
|
||||
diskCount: -1,
|
||||
saType: storageAccountType,
|
||||
name: storageAccountName,
|
||||
}
|
||||
|
||||
c.addAccountState(storageAccountName, newAccountState)
|
||||
}
|
||||
|
||||
// finally, make sure that we default container is created
|
||||
// before handing it back over
|
||||
return c.ensureDefaultContainer(storageAccountName)
|
||||
}
|
||||
|
||||
// finds a new suitable storageAccount for this disk
|
||||
func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) {
|
||||
maxDiskCount := maxDisksPerStorageAccounts
|
||||
SAName := ""
|
||||
totalDiskCounts := 0
|
||||
countAccounts := 0 // account of this type.
|
||||
for _, v := range c.accounts {
|
||||
// filter out any stand-alone disks/accounts
|
||||
if strings.Index(v.name, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// note: we compute avge stratified by type.
|
||||
// this to enable user to grow per SA type to avoid low
|
||||
//avg utilization on one account type skewing all data.
|
||||
|
||||
if v.saType == storageAccountType {
|
||||
// compute average
|
||||
dCount, err := c.getDiskCount(v.name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
totalDiskCounts = totalDiskCounts + dCount
|
||||
countAccounts = countAccounts + 1
|
||||
// empty account
|
||||
if dCount == 0 {
|
||||
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
|
||||
return v.name, nil // shortcircut, avg is good and no need to adjust
|
||||
}
|
||||
// if this account is less allocated
|
||||
if dCount < maxDiskCount {
|
||||
maxDiskCount = dCount
|
||||
SAName = v.name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we failed to find storageaccount
|
||||
if SAName == "" {
|
||||
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
disksAfter := totalDiskCounts + 1 // with the new one!
|
||||
|
||||
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
|
||||
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
|
||||
|
||||
// avg are not create and we should craete more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
// avergates are not ok and we are at capacity(max storage accounts allowed)
|
||||
if aboveAvg && countAccounts == maxStorageAccounts {
|
||||
glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
|
||||
}
|
||||
|
||||
// we found a storage accounts && [ avg are ok || we reached max sa count ]
|
||||
return SAName, nil
|
||||
}
|
||||
func (c *BlobDiskController) getNextAccountNum() int {
|
||||
max := 0
|
||||
|
||||
for k := range c.accounts {
|
||||
// filter out accounts that are for standalone
|
||||
if strings.Index(k, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
num := getAccountNumFromName(k)
|
||||
if num > max {
|
||||
max = num
|
||||
}
|
||||
}
|
||||
|
||||
return max + 1
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error {
|
||||
resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err)
|
||||
}
|
||||
|
||||
c.removeAccountState(storageAccountName)
|
||||
|
||||
glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName)
|
||||
return nil
|
||||
}
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
return true, account.AccountProperties.ProvisioningState, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) {
|
||||
accountsLock.Lock()
|
||||
defer accountsLock.Unlock()
|
||||
|
||||
if _, ok := c.accounts[key]; !ok {
|
||||
c.accounts[key] = state
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) removeAccountState(key string) {
|
||||
accountsLock.Lock()
|
||||
defer accountsLock.Unlock()
|
||||
delete(c.accounts, key)
|
||||
}
|
||||
|
||||
// pads account num with zeros as needed
|
||||
func getAccountNameForNum(num int) string {
|
||||
sNum := strconv.Itoa(num)
|
||||
missingZeros := 3 - len(sNum)
|
||||
strZero := ""
|
||||
for missingZeros > 0 {
|
||||
strZero = strZero + "0"
|
||||
missingZeros = missingZeros - 1
|
||||
}
|
||||
|
||||
sNum = strZero + sNum
|
||||
return fmt.Sprintf(storageAccountNamePrefix, sNum)
|
||||
}
|
||||
|
||||
func getAccountNumFromName(accountName string) int {
|
||||
nameLen := len(accountName)
|
||||
num, _ := strconv.Atoi(accountName[nameLen-3:])
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
err := binary.Write(b, binary.BigEndian, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func diskNameandSANameFromURI(diskURI string) (string, string, error) {
|
||||
uri, err := url.Parse(diskURI)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
hostName := uri.Host
|
||||
storageAccountName := strings.Split(hostName, ".")[0]
|
||||
|
||||
segments := strings.Split(uri.Path, "/")
|
||||
diskNameVhd := segments[len(segments)-1]
|
||||
|
||||
return storageAccountName, diskNameVhd, nil
|
||||
}
|
270
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controllerCommon.go
generated
vendored
Normal file
270
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controllerCommon.go
generated
vendored
Normal file
@ -0,0 +1,270 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map)
|
||||
storageAccountNameTemplate = "pvc%s"
|
||||
|
||||
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
)
|
||||
|
||||
var defaultBackOff = kwait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 2 * time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.0,
|
||||
}
|
||||
|
||||
type controllerCommon struct {
|
||||
tenantID string
|
||||
subscriptionID string
|
||||
location string
|
||||
storageEndpointSuffix string
|
||||
resourceGroup string
|
||||
clientID string
|
||||
clientSecret string
|
||||
managementEndpoint string
|
||||
tokenEndPoint string
|
||||
aadResourceEndPoint string
|
||||
aadToken string
|
||||
expiresOn time.Time
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &compute.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) {
|
||||
// if lease cannot be acquired, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - failed to acquire disk lease, try detach")
|
||||
c.cloud.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure attach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil || !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bFoundDisk {
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure disk detach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
} else if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
627
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
Normal file
627
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
Normal file
@ -0,0 +1,627 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
type fakeAzureLBClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.LoadBalancer
|
||||
}
|
||||
|
||||
func newFakeAzureLBClient() fakeAzureLBClient {
|
||||
fLBC := fakeAzureLBClient{}
|
||||
fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer)
|
||||
fLBC.mutex = &sync.Mutex{}
|
||||
return fLBC
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
resultChan := make(chan network.LoadBalancer, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.LoadBalancer
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; !ok {
|
||||
fLBC.FakeStore[resourceGroupName] = make(map[string]network.LoadBalancer)
|
||||
}
|
||||
|
||||
// For dynamic ip allocation, just fill in the PrivateIPAddress
|
||||
if parameters.FrontendIPConfigurations != nil {
|
||||
for idx, config := range *parameters.FrontendIPConfigurations {
|
||||
if config.PrivateIPAllocationMethod == network.Dynamic {
|
||||
// Here we randomly assign an ip as private ip
|
||||
// It dosen't smart enough to know whether it is in the subnet's range
|
||||
(*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr()
|
||||
}
|
||||
}
|
||||
}
|
||||
fLBC.FakeStore[resourceGroupName][loadBalancerName] = parameters
|
||||
result = fLBC.FakeStore[resourceGroupName][loadBalancerName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp autorest.Response
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if rgLBs, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgLBs[loadBalancerName]; ok {
|
||||
delete(rgLBs, loadBalancerName)
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such LB",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fLBC.FakeStore[resourceGroupName][loadBalancerName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such LB",
|
||||
}
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
var value []network.LoadBalancer
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fLBC.FakeStore[resourceGroupName] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
result.NextLink = nil
|
||||
result.Value = &value
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fLBC fakeAzureLBClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
result.NextLink = nil
|
||||
result.Value = nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fakeAzurePIPClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.PublicIPAddress
|
||||
SubscriptionID string
|
||||
}
|
||||
|
||||
const publicIPAddressIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/publicIPAddresses/%s"
|
||||
|
||||
// returns the full identifier of a publicIPAddress.
|
||||
func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName string) string {
|
||||
return fmt.Sprintf(
|
||||
publicIPAddressIDTemplate,
|
||||
subscriptionID,
|
||||
resourceGroupName,
|
||||
pipName)
|
||||
}
|
||||
|
||||
func newFakeAzurePIPClient(subscriptionID string) fakeAzurePIPClient {
|
||||
fAPC := fakeAzurePIPClient{}
|
||||
fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress)
|
||||
fAPC.SubscriptionID = subscriptionID
|
||||
fAPC.mutex = &sync.Mutex{}
|
||||
return fAPC
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
resultChan := make(chan network.PublicIPAddress, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.PublicIPAddress
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; !ok {
|
||||
fAPC.FakeStore[resourceGroupName] = make(map[string]network.PublicIPAddress)
|
||||
}
|
||||
|
||||
// assign id
|
||||
pipID := getpublicIPAddressID(fAPC.SubscriptionID, resourceGroupName, publicIPAddressName)
|
||||
parameters.ID = &pipID
|
||||
|
||||
// only create in the case user has not provided
|
||||
if parameters.PublicIPAddressPropertiesFormat != nil &&
|
||||
parameters.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod == network.Static {
|
||||
// assign ip
|
||||
parameters.IPAddress = getRandomIPPtr()
|
||||
}
|
||||
|
||||
fAPC.FakeStore[resourceGroupName][publicIPAddressName] = parameters
|
||||
result = fAPC.FakeStore[resourceGroupName][publicIPAddressName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp autorest.Response
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if rgPIPs, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgPIPs[publicIPAddressName]; ok {
|
||||
delete(rgPIPs, publicIPAddressName)
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such PIP",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fAPC.FakeStore[resourceGroupName][publicIPAddressName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such PIP",
|
||||
}
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
return network.PublicIPAddressListResult{}, nil
|
||||
}
|
||||
|
||||
func (fAPC fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
var value []network.PublicIPAddress
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fAPC.FakeStore[resourceGroupName] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
result.NextLink = nil
|
||||
result.Value = &value
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fakeAzureInterfacesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Interface
|
||||
}
|
||||
|
||||
func newFakeAzureInterfacesClient() fakeAzureInterfacesClient {
|
||||
fIC := fakeAzureInterfacesClient{}
|
||||
fIC.FakeStore = make(map[string]map[string]network.Interface)
|
||||
fIC.mutex = &sync.Mutex{}
|
||||
|
||||
return fIC
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
resultChan := make(chan network.Interface, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.Interface
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; !ok {
|
||||
fIC.FakeStore[resourceGroupName] = make(map[string]network.Interface)
|
||||
}
|
||||
fIC.FakeStore[resourceGroupName][networkInterfaceName] = parameters
|
||||
result = fIC.FakeStore[resourceGroupName][networkInterfaceName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Interface",
|
||||
}
|
||||
}
|
||||
|
||||
func (fIC fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fakeAzureVirtualMachinesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]compute.VirtualMachine
|
||||
}
|
||||
|
||||
func newFakeAzureVirtualMachinesClient() fakeAzureVirtualMachinesClient {
|
||||
fVMC := fakeAzureVirtualMachinesClient{}
|
||||
fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine)
|
||||
fVMC.mutex = &sync.Mutex{}
|
||||
return fVMC
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
resultChan := make(chan compute.VirtualMachine, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result compute.VirtualMachine
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; !ok {
|
||||
fVMC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachine)
|
||||
}
|
||||
fVMC.FakeStore[resourceGroupName][VMName] = parameters
|
||||
result = fVMC.FakeStore[resourceGroupName][VMName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fVMC.FakeStore[resourceGroupName][VMName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such VM",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
var value []compute.VirtualMachine
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMC.FakeStore[resourceGroupName] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
result.NextLink = nil
|
||||
result.Value = &value
|
||||
return result, nil
|
||||
}
|
||||
func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
return compute.VirtualMachineListResult{}, nil
|
||||
}
|
||||
|
||||
type fakeAzureSubnetsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Subnet
|
||||
}
|
||||
|
||||
func newFakeAzureSubnetsClient() fakeAzureSubnetsClient {
|
||||
fASC := fakeAzureSubnetsClient{}
|
||||
fASC.FakeStore = make(map[string]map[string]network.Subnet)
|
||||
fASC.mutex = &sync.Mutex{}
|
||||
return fASC
|
||||
}
|
||||
|
||||
func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
resultChan := make(chan network.Subnet, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.Subnet
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
if _, ok := fASC.FakeStore[rgVnet]; !ok {
|
||||
fASC.FakeStore[rgVnet] = make(map[string]network.Subnet)
|
||||
}
|
||||
fASC.FakeStore[rgVnet][subnetName] = subnetParameters
|
||||
result = fASC.FakeStore[rgVnet][subnetName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp autorest.Response
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
if rgSubnets, ok := fASC.FakeStore[rgVnet]; ok {
|
||||
if _, ok := rgSubnets[subnetName]; ok {
|
||||
delete(rgSubnets, subnetName)
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Subnet",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
if _, ok := fASC.FakeStore[rgVnet]; ok {
|
||||
if entity, ok := fASC.FakeStore[rgVnet][subnetName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Subnet",
|
||||
}
|
||||
}
|
||||
func (fASC fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
var value []network.Subnet
|
||||
if _, ok := fASC.FakeStore[rgVnet]; ok {
|
||||
for _, v := range fASC.FakeStore[rgVnet] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
result.NextLink = nil
|
||||
result.Value = &value
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fakeAzureNSGClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.SecurityGroup
|
||||
}
|
||||
|
||||
func newFakeAzureNSGClient() fakeAzureNSGClient {
|
||||
fNSG := fakeAzureNSGClient{}
|
||||
fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup)
|
||||
fNSG.mutex = &sync.Mutex{}
|
||||
return fNSG
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
resultChan := make(chan network.SecurityGroup, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var result network.SecurityGroup
|
||||
var err error
|
||||
defer func() {
|
||||
resultChan <- result
|
||||
errChan <- err
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; !ok {
|
||||
fNSG.FakeStore[resourceGroupName] = make(map[string]network.SecurityGroup)
|
||||
}
|
||||
fNSG.FakeStore[resourceGroupName][networkSecurityGroupName] = parameters
|
||||
result = fNSG.FakeStore[resourceGroupName][networkSecurityGroupName]
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
err = nil
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
respChan := make(chan autorest.Response, 1)
|
||||
errChan := make(chan error, 1)
|
||||
var resp autorest.Response
|
||||
var err error
|
||||
defer func() {
|
||||
respChan <- resp
|
||||
errChan <- err
|
||||
close(respChan)
|
||||
close(errChan)
|
||||
}()
|
||||
if rgSGs, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgSGs[networkSecurityGroupName]; ok {
|
||||
delete(rgSGs, networkSecurityGroupName)
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
err = nil
|
||||
return respChan, errChan
|
||||
}
|
||||
}
|
||||
resp.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such NSG",
|
||||
}
|
||||
return respChan, errChan
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fNSG.FakeStore[resourceGroupName][networkSecurityGroupName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such NSG",
|
||||
}
|
||||
}
|
||||
|
||||
func (fNSG fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
var value []network.SecurityGroup
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fNSG.FakeStore[resourceGroupName] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
result.Response.Response = &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}
|
||||
result.NextLink = nil
|
||||
result.Value = &value
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getRandomIPPtr() *string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return to.StringPtr(fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)))
|
||||
}
|
72
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
Normal file
72
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
azs "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
useHTTPS = true
|
||||
)
|
||||
|
||||
// create file share
|
||||
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB int) error {
|
||||
fileClient, err := az.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create a file share and set quota
|
||||
// Note. Per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share,
|
||||
// setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare
|
||||
// receives error "The metadata specified is invalid. It has characters that are not permitted."
|
||||
// As a result,breaking into two API calls: create share and set quota
|
||||
share := fileClient.GetShareReference(name)
|
||||
if err = share.Create(nil); err != nil {
|
||||
return fmt.Errorf("failed to create file share, err: %v", err)
|
||||
}
|
||||
share.Properties.Quota = sizeGB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
if err := share.Delete(nil); err != nil {
|
||||
glog.Errorf("Error deleting share: %v", err)
|
||||
}
|
||||
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete a file share
|
||||
func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
|
||||
fileClient, err := az.getFileSvcClient(accountName, accountKey)
|
||||
if err == nil {
|
||||
share := fileClient.GetShareReference(name)
|
||||
return share.Delete(nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
|
||||
client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating azure client: %v", err)
|
||||
}
|
||||
f := client.GetFileService()
|
||||
return &f, nil
|
||||
}
|
113
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const metadataURL = "http://169.254.169.254/metadata/"
|
||||
|
||||
// NetworkMetadata contains metadata about an instance's network
|
||||
type NetworkMetadata struct {
|
||||
Interface []NetworkInterface `json:"interface"`
|
||||
}
|
||||
|
||||
// NetworkInterface represents an instances network interface.
|
||||
type NetworkInterface struct {
|
||||
IPV4 NetworkData `json:"ipv4"`
|
||||
IPV6 NetworkData `json:"ipv6"`
|
||||
MAC string `json:"macAddress"`
|
||||
}
|
||||
|
||||
// NetworkData contains IP information for a network.
|
||||
type NetworkData struct {
|
||||
IPAddress []IPAddress `json:"ipAddress"`
|
||||
Subnet []Subnet `json:"subnet"`
|
||||
}
|
||||
|
||||
// IPAddress represents IP address information.
|
||||
type IPAddress struct {
|
||||
PrivateIP string `json:"privateIPAddress"`
|
||||
PublicIP string `json:"publicIPAddress"`
|
||||
}
|
||||
|
||||
// Subnet represents subnet information.
|
||||
type Subnet struct {
|
||||
Address string `json:"address"`
|
||||
Prefix string `json:"prefix"`
|
||||
}
|
||||
|
||||
// InstanceMetadata knows how to query the Azure instance metadata server.
|
||||
type InstanceMetadata struct {
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// NewInstanceMetadata creates an instance of the InstanceMetadata accessor object.
|
||||
func NewInstanceMetadata() *InstanceMetadata {
|
||||
return &InstanceMetadata{
|
||||
baseURL: metadataURL,
|
||||
}
|
||||
}
|
||||
|
||||
// makeMetadataURL makes a complete metadata URL from the given path.
|
||||
func (i *InstanceMetadata) makeMetadataURL(path string) string {
|
||||
return i.baseURL + path
|
||||
}
|
||||
|
||||
// Object queries the metadata server and populates the passed in object
|
||||
func (i *InstanceMetadata) Object(path string, obj interface{}) error {
|
||||
data, err := i.queryMetadataBytes(path, "json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, obj)
|
||||
}
|
||||
|
||||
// Text queries the metadata server and returns the corresponding text
|
||||
func (i *InstanceMetadata) Text(path string) (string, error) {
|
||||
data, err := i.queryMetadataBytes(path, "text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func (i *InstanceMetadata) queryMetadataBytes(path, format string) ([]byte, error) {
|
||||
client := &http.Client{}
|
||||
|
||||
req, err := http.NewRequest("GET", i.makeMetadataURL(path), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Metadata", "True")
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("format", format)
|
||||
q.Add("api-version", "2017-04-02")
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
274
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
Normal file
274
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
Normal file
@ -0,0 +1,274 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
ipAddress := IPAddress{}
|
||||
err := az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
if len(ipAddress.PublicIP) > 0 {
|
||||
addr := v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: ipAddress.PublicIP,
|
||||
}
|
||||
addresses = append(addresses, addr)
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
ip, err := az.GetIPForMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff", name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ip},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return az.NodeAddresses(name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (az *Cloud) ExternalID(name types.NodeName) (string, error) {
|
||||
return az.InstanceID(name)
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = az.InstanceID(name)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
return (metadataName == nodeName), err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(name types.NodeName) (string, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isLocalInstance {
|
||||
externalInstanceID, err := az.metadata.Text("instance/compute/vmId")
|
||||
if err == nil {
|
||||
return externalInstanceID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
id, err := az.getVmssInstanceID(name)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
return az.getStandardInstanceID(name)
|
||||
}
|
||||
|
||||
return id, err
|
||||
}
|
||||
|
||||
return az.getStandardInstanceID(name)
|
||||
}
|
||||
|
||||
func (az *Cloud) getVmssInstanceID(name types.NodeName) (string, error) {
|
||||
var machine compute.VirtualMachineScaleSetVM
|
||||
var exists bool
|
||||
var err error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err = az.getVmssVirtualMachine(name)
|
||||
if err != nil {
|
||||
if az.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, exists, err = az.GetScaleSetsVMWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getStandardInstanceID(name types.NodeName) (string, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var exists bool
|
||||
var err error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err = az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
if az.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
machine, exists, err = az.GetVirtualMachineWithRetry(name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
name, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return az.InstanceType(name)
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isLocalInstance {
|
||||
machineType, err := az.metadata.Text("instance/compute/vmSize")
|
||||
if err == nil {
|
||||
return machineType, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
machineType, err := az.getVmssInstanceType(name)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
return az.getStandardInstanceType(name)
|
||||
}
|
||||
|
||||
return machineType, err
|
||||
}
|
||||
|
||||
return az.getStandardInstanceType(name)
|
||||
}
|
||||
|
||||
// getVmssInstanceType gets instance with type vmss.
|
||||
func (az *Cloud) getVmssInstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVmssVirtualMachine(name)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.InstanceType(%s), az.getVmssVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
if machine.Sku.Name != nil {
|
||||
return *machine.Sku.Name, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("instance type is not set")
|
||||
}
|
||||
|
||||
// getStandardInstanceType gets instance with standard type.
|
||||
func (az *Cloud) getStandardInstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.InstanceType(%s), az.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToVMName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapVMNameToNodeName maps an Azure VM Name to a k8s NodeName
|
||||
// This is a simple string cast.
|
||||
func mapVMNameToNodeName(vmName string) types.NodeName {
|
||||
return types.NodeName(vmName)
|
||||
}
|
1387
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
Normal file
1387
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
# Azure LoadBalancer
|
||||
|
||||
The way azure define LoadBalancer is different with GCE or AWS. Azure's LB can have multiple frontend IP refs. The GCE and AWS can only allow one, if you want more, you better to have another LB. Because of the fact, Public IP is not part of the LB in Azure. NSG is not part of LB in Azure either. However, you cannot delete them in parallel, Public IP can only be delete after LB's frontend IP ref is removed.
|
||||
|
||||
For different Azure Resources, such as LB, Public IP, NSG. They are the same tier azure resources. We need to make sure there is no connection in their own ensure loops. In another words, They would be eventually reconciled regardless of other resources' state. They should only depends on service state.
|
||||
|
||||
Despite the ideal philosophy above, we have to face the reality. NSG depends on LB's frontend ip to adjust NSG rules. So when we want to reconcile NSG, the LB should contain the corresponding frontend ip config.
|
||||
|
||||
And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict.
|
||||
|
||||
There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost incase of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...)
|
||||
|
||||
## Introduce Functions
|
||||
|
||||
- reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error)
|
||||
- Go through lb's properties, update based on wantLb
|
||||
- If any change on the lb, no matter if the lb exists or not
|
||||
- Call az cloud to CreateOrUpdate on this lb, or Delete if nothing left
|
||||
- return lb, err
|
||||
|
||||
- reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error)
|
||||
- Go though NSG' properties, update based on wantLb
|
||||
- Use destinationIPAddress as target address if possible
|
||||
- Consolidate NSG rules if possible
|
||||
- If any change on the NSG, (the NSG should always exists)
|
||||
- Call az cloud to CreateOrUpdate on this NSG
|
||||
- return sg, err
|
||||
|
||||
- reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error)
|
||||
- List all the public ip in the resource group
|
||||
- Make sure we only touch Public IP resources has tags[service] = "namespace/serviceName"
|
||||
- skip for wantLb && !isInternal && pipName == desiredPipName
|
||||
- delete other public ip resources if any
|
||||
- if !isInternal && wantLb
|
||||
- ensure Public IP with desiredPipName exists
|
||||
|
||||
- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error)
|
||||
- gets the loadbalancer for the service if it already exists
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selction helps distribute the services across load balancers
|
||||
- In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE
|
||||
- By default - cluster default LB is returned
|
||||
|
||||
## Define interface behaviors
|
||||
|
||||
### GetLoadBalancer
|
||||
|
||||
- Get LoadBalancer status, return status, error
|
||||
- return the load balancer status for this service
|
||||
- it will not create or update or delete any resource
|
||||
|
||||
### EnsureLoadBalancer
|
||||
|
||||
- Reconcile LB for the flipped service
|
||||
- Call reconcileLoadBalancer(clusterName, flipedService, nil, false/* wantLb */)
|
||||
- Reconcile Public IP
|
||||
- Call reconcilePublicIP(cluster, service, true)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
|
||||
- Reconcile NSG rules, it need to be called after reconcileLB
|
||||
- Call reconcileSecurityGroup(clusterName, service, lbStatus, true /* wantLb */)
|
||||
|
||||
### UpdateLoadBalancer
|
||||
|
||||
- Has no difference with EnsureLoadBalancer
|
||||
|
||||
### EnsureLoadBalancerDeleted
|
||||
|
||||
- Reconcile NSG first, before reconcile LB, because SG need LB to be there
|
||||
- Call reconcileSecurityGroup(clusterName, service, nil, false /* wantLb */)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, false)
|
||||
- Reconcile Public IP, public IP needs related LB reconciled first
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
99
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindProbe(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
existingProbe []network.Probe
|
||||
curProbe network.Probe
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
msg: "empty existing probes should return false",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "probe names match while ports unmatch should return false",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curProbe: network.Probe{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "probe ports match while names unmatch should return false",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("probe1"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curProbe: network.Probe{
|
||||
Name: to.StringPtr("probe2"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "both probe ports and names match should return true",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("matchName"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curProbe: network.Probe{
|
||||
Name: to.StringPtr("matchName"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
findResult := findProbe(test.existingProbe, test.curProbe)
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
129
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
Normal file
129
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/golang/glog"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
//ManagedDiskController : managed disk controller struct
|
||||
type ManagedDiskController struct {
|
||||
common *controllerCommon
|
||||
}
|
||||
|
||||
func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) {
|
||||
return &ManagedDiskController{common: common}, nil
|
||||
}
|
||||
|
||||
//CreateManagedDisk : create managed disk
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
|
||||
newTags := make(map[string]*string)
|
||||
azureDDTag := "kubernetes-azure-dd"
|
||||
newTags["created-by"] = &azureDDTag
|
||||
|
||||
// insert original tags to newTags
|
||||
if tags != nil {
|
||||
for k, v := range tags {
|
||||
// Azure won't allow / (forward slash) in tags
|
||||
newKey := strings.Replace(k, "/", "-", -1)
|
||||
newValue := strings.Replace(v, "/", "-", -1)
|
||||
newTags[newKey] = &newValue
|
||||
}
|
||||
}
|
||||
|
||||
diskSizeGB := int32(sizeGB)
|
||||
model := disk.Model{
|
||||
Location: &c.common.location,
|
||||
Tags: &newTags,
|
||||
Properties: &disk.Properties{
|
||||
AccountType: disk.StorageAccountTypes(storageAccountType),
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &disk.CreationData{CreateOption: disk.Empty},
|
||||
}}
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisonState, id, err := c.getDisk(diskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
//still being provisioned, this is to avoid some race conditions
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.ToLower(provisonState) == "succeeded" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB)
|
||||
} else {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
}
|
||||
|
||||
return diskID, nil
|
||||
}
|
||||
|
||||
//DeleteManagedDisk : delete managed disk
|
||||
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
diskName := path.Base(diskURI)
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We don't need poll here, k8s will immediatly stop referencing the disk
|
||||
// the disk will be evantually deleted - cleanly - by ARM
|
||||
|
||||
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// return: disk provisionState, diskID, error
|
||||
func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) {
|
||||
result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if result.Properties != nil && (*result.Properties).ProvisioningState != nil {
|
||||
return *(*result.Properties).ProvisioningState, *result.ID, nil
|
||||
}
|
||||
|
||||
return "", "", err
|
||||
}
|
184
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
Normal file
184
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route, err error) {
|
||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
return []*cloudprovider.Route{}, nil
|
||||
}
|
||||
|
||||
var kubeRoutes []*cloudprovider.Route
|
||||
if routeTable.Routes != nil {
|
||||
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.AddressPrefix
|
||||
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
TargetNode: instance,
|
||||
DestinationCIDR: cidr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(10).Info("list: FINISH")
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
routeTable = network.RouteTable{
|
||||
Name: to.StringPtr(az.RouteTableName),
|
||||
Location: to.StringPtr(az.Location),
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%q): start", az.RouteTableName)
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%q): end", az.RouteTableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
route := network.Route{
|
||||
Name: to.StringPtr(routeName),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr(kubeRoute.DestinationCIDR),
|
||||
NextHopType: network.RouteNextHopTypeVirtualAppliance,
|
||||
NextHopIPAddress: to.StringPtr(targetIP),
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): start", az.RouteTableName)
|
||||
respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
resp := <-respChan
|
||||
err := <-errChan
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) {
|
||||
glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.DeleteRouteWithRetry(routeName)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("delete abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This must be kept in sync with mapRouteNameToNodeName.
|
||||
// These two functions enable stashing the instance name in the route
|
||||
// and then retrieving it later when listing. This is needed because
|
||||
// Azure does not let you put tags/descriptions on the Route itself.
|
||||
func mapNodeNameToRouteName(nodeName types.NodeName) string {
|
||||
return fmt.Sprintf("%s", nodeName)
|
||||
}
|
||||
|
||||
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
|
||||
func mapRouteNameToNodeName(routeName string) types.NodeName {
|
||||
return types.NodeName(fmt.Sprintf("%s", routeName))
|
||||
}
|
74
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
Normal file
74
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = az.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := az.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not get storage key for storage account %s: %v", account.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = az.createFileShare(account.Name, key, name, requestGB)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to create share %s in account %s: %v", name, account.Name, err)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("created share %s in account %s", name, account.Name)
|
||||
return account.Name, key, err
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = fmt.Errorf("failed to find a matching storage account")
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// DeleteFileShare deletes a file share using storage account name and key
|
||||
func (az *Cloud) DeleteFileShare(accountName, key, name string) error {
|
||||
err := az.deleteFileShare(accountName, key, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("share %s deleted", name)
|
||||
return nil
|
||||
|
||||
}
|
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type accountWithLocation struct {
|
||||
Name, StorageType, Location string
|
||||
}
|
||||
|
||||
// getStorageAccounts gets the storage accounts' name, type, location in a resource group
|
||||
func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): start", az.ResourceGroup)
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup)
|
||||
glog.V(10).Infof("StorageAccountClient.ListByResourceGroup(%v): end", az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Value == nil {
|
||||
return nil, fmt.Errorf("no storage accounts from resource group %s", az.ResourceGroup)
|
||||
}
|
||||
|
||||
accounts := []accountWithLocation{}
|
||||
for _, acct := range *result.Value {
|
||||
if acct.Name != nil {
|
||||
name := *acct.Name
|
||||
loc := ""
|
||||
if acct.Location != nil {
|
||||
loc = *acct.Location
|
||||
}
|
||||
storageType := ""
|
||||
if acct.Sku != nil {
|
||||
storageType = string((*acct.Sku).Name)
|
||||
}
|
||||
accounts = append(accounts, accountWithLocation{Name: name, StorageType: storageType, Location: loc})
|
||||
}
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("StorageAccountClient.ListKeys(%q): start", account)
|
||||
result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account)
|
||||
glog.V(10).Infof("StorageAccountClient.ListKeys(%q): end", account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Keys == nil {
|
||||
return "", fmt.Errorf("empty keys")
|
||||
}
|
||||
|
||||
for _, k := range *result.Keys {
|
||||
if k.Value != nil && *k.Value != "" {
|
||||
v := *k.Value
|
||||
if ind := strings.LastIndex(v, " "); ind >= 0 {
|
||||
v = v[(ind + 1):]
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no valid keys")
|
||||
}
|
2605
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
Normal file
2605
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
521
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go
generated
vendored
Normal file
521
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go
generated
vendored
Normal file
@ -0,0 +1,521 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
loadBalancerMinimumPriority = 500
|
||||
loadBalancerMaximumPriority = 4096
|
||||
|
||||
machineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
|
||||
availabilitySetIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/%s"
|
||||
frontendIPConfigIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/%s"
|
||||
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
|
||||
loadBalancerRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/loadBalancingRules/%s"
|
||||
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
|
||||
securityRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s/securityRules/%s"
|
||||
|
||||
// InternalLoadBalancerNameSuffix is load balancer posfix
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
)
|
||||
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
|
||||
// returns the full identifier of a machine
|
||||
func (az *Cloud) getMachineID(machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
machineName)
|
||||
}
|
||||
|
||||
// returns the full identifier of an availabilitySet
|
||||
func (az *Cloud) getAvailabilitySetID(availabilitySetName string) string {
|
||||
return fmt.Sprintf(
|
||||
availabilitySetIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
availabilitySetName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer frontendipconfiguration.
|
||||
func (az *Cloud) getFrontendIPConfigID(lbName, backendPoolName string) string {
|
||||
return fmt.Sprintf(
|
||||
frontendIPConfigIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer backendpool.
|
||||
func (az *Cloud) getBackendPoolID(lbName, backendPoolName string) string {
|
||||
return fmt.Sprintf(
|
||||
backendPoolIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer rule.
|
||||
func (az *Cloud) getLoadBalancerRuleID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
loadBalancerRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer probe.
|
||||
func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
loadBalancerProbeIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a network security group security rule.
|
||||
func (az *Cloud) getSecurityRuleID(securityRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
securityRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
az.SecurityGroupName,
|
||||
securityRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a publicIPAddress.
|
||||
func (az *Cloud) getpublicIPAddressID(pipName string) string {
|
||||
return fmt.Sprintf(
|
||||
publicIPAddressIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
pipName)
|
||||
}
|
||||
|
||||
// getLoadBalancerAvailabilitySetNames selects all possible availability sets for
|
||||
// service load balancer, if the service has no loadbalancer mode annotaion returns the
|
||||
// primary availability set if service annotation for loadbalancer availability set
|
||||
// exists then return the eligible a availability set
|
||||
func (az *Cloud) getLoadBalancerAvailabilitySetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryAvailabilitySetName
|
||||
availabilitySetNames = &[]string{az.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
availabilitySetNames, err = az.getAgentPoolAvailabiliySets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*availabilitySetNames)
|
||||
if !isAuto {
|
||||
if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate availability set exists
|
||||
var found bool
|
||||
for sasx := range serviceAvailabilitySetNames {
|
||||
for asx := range *availabilitySetNames {
|
||||
if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) {
|
||||
found = true
|
||||
serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("az.getLoadBalancerAvailabilitySetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
|
||||
}
|
||||
}
|
||||
availabilitySetNames = &serviceAvailabilitySetNames
|
||||
}
|
||||
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// lists the virtual machines for for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s
|
||||
func (az *Cloud) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := az.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
|
||||
for vmx := range vms {
|
||||
vm := vms[vmx]
|
||||
if vm.AvailabilitySet != nil {
|
||||
vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
|
||||
}
|
||||
}
|
||||
availabilitySetIDs := sets.NewString()
|
||||
agentPoolAvailabilitySets = &[]string{}
|
||||
for nx := range nodes {
|
||||
nodeName := (*nodes[nx]).Name
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
// already added in the list
|
||||
continue
|
||||
}
|
||||
asName, err := getLastSegment(asID)
|
||||
if err != nil {
|
||||
glog.Errorf("az.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
// We want to keep it lower case, before the ID get fixed
|
||||
asName = strings.ToLower(asName)
|
||||
|
||||
*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
|
||||
}
|
||||
|
||||
return agentPoolAvailabilitySets, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) mapLoadBalancerNameToAvailabilitySet(lbName string, clusterName string) (availabilitySetName string) {
|
||||
availabilitySetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||
if strings.EqualFold(clusterName, lbName) {
|
||||
availabilitySetName = az.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
return availabilitySetName
|
||||
}
|
||||
|
||||
// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress.
|
||||
// Thus Azure do not allow mixed type (public and internal) load balancer.
|
||||
// So we'd have a separate name for internal load balancer.
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, availabilitySetName string, isInternal bool) string {
|
||||
lbNamePrefix := availabilitySetName
|
||||
if strings.EqualFold(availabilitySetName, az.Config.PrimaryAvailabilitySetName) {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
return fmt.Sprintf("%s%s", lbNamePrefix, InternalLoadBalancerNameSuffix)
|
||||
}
|
||||
return lbNamePrefix
|
||||
}
|
||||
|
||||
// isMasterNode returns returns true is the node has a master role label.
|
||||
// The master role is determined by looking for:
|
||||
// * a kubernetes.io/role="master" label
|
||||
func isMasterNode(node *v1.Node) bool {
|
||||
if val, ok := node.Labels[nodeLabelRole]; ok && val == "master" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// returns the deepest child's identifier from a full identifier string.
|
||||
func getLastSegment(ID string) (string, error) {
|
||||
parts := strings.Split(ID, "/")
|
||||
name := parts[len(parts)-1]
|
||||
if len(name) == 0 {
|
||||
return "", fmt.Errorf("resource name was missing from identifier")
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// returns the equivalent LoadBalancerRule, SecurityRule and LoadBalancerProbe
|
||||
// protocol types for the given Kubernetes protocol type.
|
||||
func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.TransportProtocol, *network.SecurityRuleProtocol, *network.ProbeProtocol, error) {
|
||||
var transportProto network.TransportProtocol
|
||||
var securityProto network.SecurityRuleProtocol
|
||||
var probeProto network.ProbeProtocol
|
||||
|
||||
switch protocol {
|
||||
case v1.ProtocolTCP:
|
||||
transportProto = network.TransportProtocolTCP
|
||||
securityProto = network.SecurityRuleProtocolTCP
|
||||
probeProto = network.ProbeProtocolTCP
|
||||
return &transportProto, &securityProto, &probeProto, nil
|
||||
case v1.ProtocolUDP:
|
||||
transportProto = network.TransportProtocolUDP
|
||||
securityProto = network.SecurityRuleProtocolUDP
|
||||
return &transportProto, &securityProto, nil, nil
|
||||
default:
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguration, error) {
|
||||
if len(*nic.IPConfigurations) == 1 {
|
||||
return &((*nic.IPConfigurations)[0]), nil
|
||||
}
|
||||
|
||||
for _, ref := range *nic.IPConfigurations {
|
||||
if *ref.Primary {
|
||||
return &ref, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to determine the determine primary ipconfig. nicname=%q", *nic.Name)
|
||||
}
|
||||
|
||||
func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
|
||||
return strings.HasSuffix(*lb.Name, InternalLoadBalancerNameSuffix)
|
||||
}
|
||||
|
||||
func getBackendPoolName(clusterName string) string {
|
||||
return clusterName
|
||||
}
|
||||
|
||||
func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
|
||||
if subnetName == nil {
|
||||
return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s-%d", getRulePrefix(service), *subnetName, port.Protocol, port.Port)
|
||||
}
|
||||
|
||||
func getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
|
||||
if useSharedSecurityRule(service) {
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("%s-%s-%d-%s", getRulePrefix(service), port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
|
||||
// This returns a human-readable version of the Service used to tag some resources.
|
||||
// This is only used for human-readable convenience, and not to filter.
|
||||
func getServiceName(service *v1.Service) string {
|
||||
return fmt.Sprintf("%s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
|
||||
// This returns a prefix for loadbalancer/security rules.
|
||||
func getRulePrefix(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
}
|
||||
|
||||
func getPublicIPName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, cloudprovider.GetLoadBalancerName(service))
|
||||
}
|
||||
|
||||
func serviceOwnsRule(service *v1.Service, rule string) bool {
|
||||
prefix := getRulePrefix(service)
|
||||
return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
|
||||
}
|
||||
|
||||
func serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
return strings.HasPrefix(*fip.Name, baseName)
|
||||
}
|
||||
|
||||
func getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
if subnetName != nil {
|
||||
return fmt.Sprintf("%s-%s", baseName, *subnetName)
|
||||
}
|
||||
return baseName
|
||||
}
|
||||
|
||||
// This returns the next available rule priority level for a given set of security rules.
|
||||
func getNextAvailablePriority(rules []network.SecurityRule) (int32, error) {
|
||||
var smallest int32 = loadBalancerMinimumPriority
|
||||
var spread int32 = 1
|
||||
|
||||
outer:
|
||||
for smallest < loadBalancerMaximumPriority {
|
||||
for _, rule := range rules {
|
||||
if *rule.Priority == smallest {
|
||||
smallest += spread
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
// no one else had it
|
||||
return smallest, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
|
||||
if az.Config.VMType == vmTypeVMSS {
|
||||
ip, err := az.getIPForVmssMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance {
|
||||
return az.getIPForStandardMachine(nodeName)
|
||||
}
|
||||
|
||||
return ip, err
|
||||
}
|
||||
|
||||
return az.getIPForStandardMachine(nodeName)
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForStandardMachine(nodeName types.NodeName) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err := az.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), az.getVirtualMachine(%s), err=%v", nodeName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(nicID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), az.InterfacesClient.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// splitProviderID converts a providerID to a NodeName.
|
||||
func splitProviderID(providerID string) (types.NodeName, error) {
|
||||
matches := providerIDRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("error splitting providerID")
|
||||
}
|
||||
return types.NodeName(matches[1]), nil
|
||||
}
|
||||
|
||||
var polyTable = crc32.MakeTable(crc32.Koopman)
|
||||
|
||||
//MakeCRC32 : convert string to CRC32 format
|
||||
func MakeCRC32(str string) string {
|
||||
crc := crc32.New(polyTable)
|
||||
crc.Write([]byte(str))
|
||||
hash := crc.Sum32()
|
||||
return strconv.FormatUint(uint64(hash), 10)
|
||||
}
|
||||
|
||||
//ExtractVMData : extract dataDisks, storageProfile from a map struct
|
||||
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
|
||||
storageProfile map[string]interface{},
|
||||
hardwareProfile map[string]interface{}, err error) {
|
||||
props, ok := vmData["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
|
||||
}
|
||||
|
||||
storageProfile, ok = props["storageProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
|
||||
}
|
||||
|
||||
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
|
||||
}
|
||||
|
||||
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
|
||||
}
|
||||
return dataDisks, storageProfile, hardwareProfile, nil
|
||||
}
|
||||
|
||||
//ExtractDiskData : extract provisioningState, diskState from a map struct
|
||||
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
|
||||
fragment, ok := diskData.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData to map error")
|
||||
}
|
||||
|
||||
properties, ok := fragment["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData(properties) to map error")
|
||||
}
|
||||
|
||||
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
|
||||
if ref, ok := properties["diskState"]; ok {
|
||||
diskState = ref.(string)
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_test.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_test.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetVmssInstanceID(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
machineName string
|
||||
expectError bool
|
||||
expectedInstanceID string
|
||||
}{{
|
||||
msg: "invalid vmss instance name",
|
||||
machineName: "vmvm",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
msg: "valid vmss instance name",
|
||||
machineName: "vm00000Z",
|
||||
expectError: false,
|
||||
expectedInstanceID: "35",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
instanceID, err := getVmssInstanceID(test.machineName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
} else {
|
||||
assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
}
|
102
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go
generated
vendored
Normal file
102
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func (az *Cloud) getIPForVmssMachine(nodeName types.NodeName) (string, error) {
|
||||
az.operationPollRateLimiter.Accept()
|
||||
machine, exists, err := az.getVmssVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), az.getVmssVirtualMachine(%s), err=%v", nodeName, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicID, err := getPrimaryInterfaceIDForVmssMachine(machine)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(nicID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName)
|
||||
nic, err := az.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(az.ResourceGroup, az.Config.PrimaryScaleSetName, *machine.InstanceID, nicName, "")
|
||||
glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), az.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func getPrimaryInterfaceIDForVmssMachine(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
// machineName is composed of computerNamePrefix and 36-based instanceID.
|
||||
// And instanceID part if in fixed length of 6 characters.
|
||||
// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
|
||||
func getVmssInstanceID(machineName string) (string, error) {
|
||||
nameLength := len(machineName)
|
||||
if nameLength < 6 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
|
||||
if err != nil {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
235
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
Normal file
235
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
// false if error is an autorest.Error with StatusCode=404 and will return the
|
||||
// error back if error is another status code or another type of error.
|
||||
func checkResourceExistsFromError(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
v, ok := err.(autorest.DetailedError)
|
||||
if !ok {
|
||||
return false, err
|
||||
}
|
||||
if v.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, v
|
||||
}
|
||||
|
||||
// If it is StatusNotFound return nil,
|
||||
// Otherwise, return what it is
|
||||
func ignoreStatusNotFoundFromError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
v, ok := err.(autorest.DetailedError)
|
||||
if ok && v.StatusCode == http.StatusNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
vmName := string(nodeName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%s): start", vmName)
|
||||
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
|
||||
glog.V(10).Infof("VirtualMachinesClient.Get(%s): end", vmName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return vm, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return vm, false, nil
|
||||
}
|
||||
|
||||
return vm, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getVmssVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
vmName := string(nodeName)
|
||||
instanceID, err := getVmssInstanceID(vmName)
|
||||
if err != nil {
|
||||
return vm, false, err
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): start", vmName)
|
||||
vm, err = az.VirtualMachineScaleSetVMsClient.Get(az.ResourceGroup, az.PrimaryScaleSetName, instanceID)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): end", vmName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return vm, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return vm, false, nil
|
||||
}
|
||||
|
||||
return vm, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%s): start", az.RouteTableName)
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
glog.V(10).Infof("RouteTablesClient.Get(%s): end", az.RouteTableName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return routeTable, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return routeTable, false, nil
|
||||
}
|
||||
|
||||
return routeTable, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%s): start", az.SecurityGroupName)
|
||||
sg, err = az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
|
||||
glog.V(10).Infof("SecurityGroupsClient.Get(%s): end", az.SecurityGroupName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return sg, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return sg, false, nil
|
||||
}
|
||||
|
||||
return sg, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
var realErr error
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.Get(%s): start", name)
|
||||
lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "")
|
||||
glog.V(10).Infof("LoadBalancerClient.Get(%s): end", name)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return lb, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return lb, false, nil
|
||||
}
|
||||
|
||||
return lb, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResult, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%s): start", az.ResourceGroup)
|
||||
lbListResult, err = az.LoadBalancerClient.List(az.ResourceGroup)
|
||||
glog.V(10).Infof("LoadBalancerClient.List(%s): end", az.ResourceGroup)
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return lbListResult, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return lbListResult, false, nil
|
||||
}
|
||||
|
||||
return lbListResult, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%s): start", name)
|
||||
pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, name, "")
|
||||
glog.V(10).Infof("PublicIPAddressesClient.Get(%s): end", name)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return pip, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return pip, false, nil
|
||||
}
|
||||
|
||||
return pip, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) {
|
||||
var realErr error
|
||||
var rg string
|
||||
|
||||
if len(az.VnetResourceGroup) > 0 {
|
||||
rg = az.VnetResourceGroup
|
||||
} else {
|
||||
rg = az.ResourceGroup
|
||||
}
|
||||
|
||||
az.operationPollRateLimiter.Accept()
|
||||
glog.V(10).Infof("SubnetsClient.Get(%s): start", subnetName)
|
||||
subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "")
|
||||
glog.V(10).Infof("SubnetsClient.Get(%s): end", subnetName)
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return subnet, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return subnet, false, nil
|
||||
}
|
||||
|
||||
return subnet, exists, err
|
||||
}
|
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
func TestExtractNotFound(t *testing.T) {
|
||||
notFound := autorest.DetailedError{StatusCode: http.StatusNotFound}
|
||||
otherHTTP := autorest.DetailedError{StatusCode: http.StatusForbidden}
|
||||
otherErr := fmt.Errorf("other error")
|
||||
|
||||
tests := []struct {
|
||||
err error
|
||||
expectedErr error
|
||||
exists bool
|
||||
}{
|
||||
{nil, nil, true},
|
||||
{otherErr, otherErr, false},
|
||||
{notFound, nil, false},
|
||||
{otherHTTP, otherHTTP, false},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
exists, err := checkResourceExistsFromError(test.err)
|
||||
if test.exists != exists {
|
||||
t.Errorf("expected: %v, saw: %v", test.exists, exists)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedErr, err) {
|
||||
t.Errorf("expected err: %v, saw: %v", test.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
113
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
)
|
||||
|
||||
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
|
||||
|
||||
var faultMutex = &sync.Mutex{}
|
||||
var faultDomain *string
|
||||
|
||||
type instanceInfo struct {
|
||||
ID string `json:"ID"`
|
||||
UpdateDomain string `json:"UD"`
|
||||
FaultDomain string `json:"FD"`
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
faultMutex.Lock()
|
||||
defer faultMutex.Unlock()
|
||||
if faultDomain == nil {
|
||||
var err error
|
||||
faultDomain, err = fetchFaultDomain()
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
}
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: *faultDomain,
|
||||
Region: az.Location,
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
nodeName, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
return az.GetZoneByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
|
||||
vm, err := az.VirtualMachinesClient.Get(az.ResourceGroup, string(nodeName), compute.InstanceView)
|
||||
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
func fetchFaultDomain() (*string, error) {
|
||||
resp, err := http.Get(instanceInfoURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return readFaultDomain(resp.Body)
|
||||
}
|
||||
|
||||
func readFaultDomain(reader io.Reader) (*string, error) {
|
||||
var instanceInfo instanceInfo
|
||||
body, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(body, &instanceInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &instanceInfo.FaultDomain, nil
|
||||
}
|
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cloudstack.go",
|
||||
"cloudstack_instances.go",
|
||||
"cloudstack_loadbalancer.go",
|
||||
"metadata.go",
|
||||
"metadata_other.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"metadata_linux.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/d2g/dhcp4:go_default_library",
|
||||
"//vendor/github.com/d2g/dhcp4client:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/kardianos/osext:go_default_library",
|
||||
"//vendor/github.com/xanzy/go-cloudstack/cloudstack:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cloudstack_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- ngtuna
|
||||
- sebgoa
|
||||
- svanharmelen
|
264
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go
generated
vendored
Normal file
264
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kardianos/osext"
|
||||
"github.com/xanzy/go-cloudstack/cloudstack"
|
||||
"gopkg.in/gcfg.v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
// ProviderName is the name of this cloud provider.
|
||||
const ProviderName = "cloudstack"
|
||||
|
||||
// CSConfig wraps the config for the CloudStack cloud provider.
|
||||
type CSConfig struct {
|
||||
Global struct {
|
||||
APIURL string `gcfg:"api-url"`
|
||||
APIKey string `gcfg:"api-key"`
|
||||
SecretKey string `gcfg:"secret-key"`
|
||||
SSLNoVerify bool `gcfg:"ssl-no-verify"`
|
||||
ProjectID string `gcfg:"project-id"`
|
||||
Zone string `gcfg:"zone"`
|
||||
}
|
||||
}
|
||||
|
||||
// CSCloud is an implementation of Interface for CloudStack.
|
||||
type CSCloud struct {
|
||||
client *cloudstack.CloudStackClient
|
||||
metadata *metadata
|
||||
projectID string // If non-"", all resources will be created within this project
|
||||
zone string
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := readConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newCSCloud(cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func readConfig(config io.Reader) (*CSConfig, error) {
|
||||
cfg := &CSConfig{}
|
||||
|
||||
if config == nil {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
if err := gcfg.ReadInto(cfg, config); err != nil {
|
||||
return nil, fmt.Errorf("could not parse cloud provider config: %v", err)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// newCSCloud creates a new instance of CSCloud.
|
||||
func newCSCloud(cfg *CSConfig) (*CSCloud, error) {
|
||||
cs := &CSCloud{
|
||||
projectID: cfg.Global.ProjectID,
|
||||
zone: cfg.Global.Zone,
|
||||
}
|
||||
|
||||
exe, err := osext.Executable()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cloud not find the service executable: %v", err)
|
||||
}
|
||||
|
||||
// When running the kubelet service it's fine to not specify a config file (or only a
|
||||
// partial config file) as all needed info can be retrieved anonymously using metadata.
|
||||
if filepath.Base(exe) == "kubelet" || filepath.Base(exe) == "kubelet.exe" {
|
||||
// In CloudStack your metadata is always served by the DHCP server.
|
||||
dhcpServer, err := findDHCPServer()
|
||||
if err == nil {
|
||||
glog.V(4).Infof("Found metadata server: %v", dhcpServer)
|
||||
cs.metadata = &metadata{dhcpServer: dhcpServer, zone: cs.zone}
|
||||
} else {
|
||||
glog.Errorf("Error searching metadata server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Global.APIURL != "" && cfg.Global.APIKey != "" && cfg.Global.SecretKey != "" {
|
||||
cs.client = cloudstack.NewAsyncClient(cfg.Global.APIURL, cfg.Global.APIKey, cfg.Global.SecretKey, !cfg.Global.SSLNoVerify)
|
||||
}
|
||||
|
||||
if cs.client == nil {
|
||||
if cs.metadata != nil {
|
||||
glog.V(2).Infof("No API URL, key and secret are provided, so only using metadata!")
|
||||
} else {
|
||||
return nil, errors.New("no cloud provider config given")
|
||||
}
|
||||
}
|
||||
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (cs *CSCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
|
||||
// LoadBalancer returns an implementation of LoadBalancer for CloudStack.
|
||||
func (cs *CSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
if cs.client == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return cs, true
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for CloudStack.
|
||||
func (cs *CSCloud) Instances() (cloudprovider.Instances, bool) {
|
||||
if cs.metadata != nil {
|
||||
return cs.metadata, true
|
||||
}
|
||||
|
||||
if cs.client == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return cs, true
|
||||
}
|
||||
|
||||
// Zones returns an implementation of Zones for CloudStack.
|
||||
func (cs *CSCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
if cs.metadata != nil {
|
||||
return cs.metadata, true
|
||||
}
|
||||
|
||||
if cs.client == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return cs, true
|
||||
}
|
||||
|
||||
// Clusters returns an implementation of Clusters for CloudStack.
|
||||
func (cs *CSCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
if cs.client == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for CloudStack.
|
||||
func (cs *CSCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
if cs.client == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (cs *CSCloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (cs *CSCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (cs *CSCloud) HasClusterID() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the region that the program is running in.
|
||||
func (cs *CSCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
if cs.zone == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return zone, fmt.Errorf("failed to get hostname for retrieving the zone: %v", err)
|
||||
}
|
||||
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(hostname)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return zone, fmt.Errorf("could not find instance for retrieving the zone: %v", err)
|
||||
}
|
||||
return zone, fmt.Errorf("error getting instance for retrieving the zone: %v", err)
|
||||
}
|
||||
|
||||
cs.zone = instance.Zonename
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Current zone is %v", cs.zone)
|
||||
zone.FailureDomain = cs.zone
|
||||
zone.Region = cs.zone
|
||||
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetZoneByProviderID returns the Zone, found by using the provider ID.
|
||||
func (cs *CSCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
providerID,
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return zone, fmt.Errorf("could not find node by ID: %v", providerID)
|
||||
}
|
||||
return zone, fmt.Errorf("error retrieving zone: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Current zone is %v", cs.zone)
|
||||
zone.FailureDomain = instance.Zonename
|
||||
zone.Region = instance.Zonename
|
||||
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName returns the Zone, found by using the node name.
|
||||
func (cs *CSCloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
string(nodeName),
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return zone, fmt.Errorf("could not find node: %v", nodeName)
|
||||
}
|
||||
return zone, fmt.Errorf("error retrieving zone: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Current zone is %v", cs.zone)
|
||||
zone.FailureDomain = instance.Zonename
|
||||
zone.Region = instance.Zonename
|
||||
|
||||
return zone, nil
|
||||
}
|
159
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go
generated
vendored
Normal file
159
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/xanzy/go-cloudstack/cloudstack"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (cs *CSCloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
string(name),
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("error retrieving node addresses: %v", err)
|
||||
}
|
||||
|
||||
return cs.nodeAddresses(instance)
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the addresses of the specified instance.
|
||||
func (cs *CSCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
providerID,
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("error retrieving node addresses: %v", err)
|
||||
}
|
||||
|
||||
return cs.nodeAddresses(instance)
|
||||
}
|
||||
|
||||
func (cs *CSCloud) nodeAddresses(instance *cloudstack.VirtualMachine) ([]v1.NodeAddress, error) {
|
||||
if len(instance.Nic) == 0 {
|
||||
return nil, errors.New("instance does not have an internal IP")
|
||||
}
|
||||
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: instance.Nic[0].Ipaddress},
|
||||
}
|
||||
|
||||
if instance.Publicip != "" {
|
||||
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: instance.Publicip})
|
||||
} else {
|
||||
// Since there is no sane way to determine the external IP if the host isn't
|
||||
// using static NAT, we will just fire a log message and omit the external IP.
|
||||
glog.V(4).Infof("Could not determine the public IP of host %v (%v)", instance.Name, instance.Id)
|
||||
}
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (cs *CSCloud) ExternalID(name types.NodeName) (string, error) {
|
||||
return cs.InstanceID(name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (cs *CSCloud) InstanceID(name types.NodeName) (string, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
string(name),
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return "", fmt.Errorf("error retrieving instance ID: %v", err)
|
||||
}
|
||||
|
||||
return instance.Id, nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (cs *CSCloud) InstanceType(name types.NodeName) (string, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByName(
|
||||
string(name),
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return "", fmt.Errorf("error retrieving instance type: %v", err)
|
||||
}
|
||||
|
||||
return instance.Serviceofferingname, nil
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
func (cs *CSCloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
instance, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
providerID,
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return "", fmt.Errorf("error retrieving instance type: %v", err)
|
||||
}
|
||||
|
||||
return instance.Serviceofferingname, nil
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances is currently not implemented.
|
||||
func (cs *CSCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("AddSSHKeyToAllInstances not implemented")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
func (cs *CSCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns if the instance still exists.
|
||||
func (cs *CSCloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
_, count, err := cs.client.VirtualMachine.GetVirtualMachineByID(
|
||||
providerID,
|
||||
cloudstack.WithProject(cs.projectID),
|
||||
)
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("error retrieving instance: %v", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
542
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go
generated
vendored
Normal file
542
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go
generated
vendored
Normal file
@ -0,0 +1,542 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/xanzy/go-cloudstack/cloudstack"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type loadBalancer struct {
|
||||
*cloudstack.CloudStackClient
|
||||
|
||||
name string
|
||||
algorithm string
|
||||
hostIDs []string
|
||||
ipAddr string
|
||||
ipAddrID string
|
||||
networkID string
|
||||
projectID string
|
||||
rules map[string]*cloudstack.LoadBalancerRule
|
||||
}
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and if so, what its status is.
|
||||
func (cs *CSCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
glog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// If we don't have any rules, the load balancer does not exist.
|
||||
if len(lb.rules) == 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr)
|
||||
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: lb.ipAddr})
|
||||
|
||||
return status, true, nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer creates a new load balancer, or updates the existing one. Returns the status of the balancer.
|
||||
func (cs *CSCloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (status *v1.LoadBalancerStatus, err error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes)
|
||||
|
||||
if len(service.Spec.Ports) == 0 {
|
||||
return nil, fmt.Errorf("requested load balancer with no ports")
|
||||
}
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the load balancer algorithm.
|
||||
switch service.Spec.SessionAffinity {
|
||||
case v1.ServiceAffinityNone:
|
||||
lb.algorithm = "roundrobin"
|
||||
case v1.ServiceAffinityClientIP:
|
||||
lb.algorithm = "source"
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported load balancer affinity: %v", service.Spec.SessionAffinity)
|
||||
}
|
||||
|
||||
// Verify that all the hosts belong to the same network, and retrieve their ID's.
|
||||
lb.hostIDs, lb.networkID, err = cs.verifyHosts(nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !lb.hasLoadBalancerIP() {
|
||||
// Create or retrieve the load balancer IP.
|
||||
if err := lb.getLoadBalancerIP(service.Spec.LoadBalancerIP); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if lb.ipAddr != "" && lb.ipAddr != service.Spec.LoadBalancerIP {
|
||||
defer func(lb *loadBalancer) {
|
||||
if err != nil {
|
||||
if err := lb.releaseLoadBalancerIP(); err != nil {
|
||||
glog.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
}(lb)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr)
|
||||
|
||||
for _, port := range service.Spec.Ports {
|
||||
// All ports have their own load balancer rule, so add the port to lbName to keep the names unique.
|
||||
lbRuleName := fmt.Sprintf("%s-%d", lb.name, port.Port)
|
||||
|
||||
// If the load balancer rule exists and is up-to-date, we move on to the next rule.
|
||||
exists, needsUpdate, err := lb.checkLoadBalancerRule(lbRuleName, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists && !needsUpdate {
|
||||
glog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName)
|
||||
// Delete the rule from the map, to prevent it being deleted.
|
||||
delete(lb.rules, lbRuleName)
|
||||
continue
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
glog.V(4).Infof("Updating load balancer rule: %v", lbRuleName)
|
||||
if err := lb.updateLoadBalancerRule(lbRuleName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Delete the rule from the map, to prevent it being deleted.
|
||||
delete(lb.rules, lbRuleName)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Creating load balancer rule: %v", lbRuleName)
|
||||
lbRule, err := lb.createLoadBalancerRule(lbRuleName, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName)
|
||||
if err = lb.assignHostsToRule(lbRule, lb.hostIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Cleanup any rules that are now still in the rules map, as they are no longer needed.
|
||||
for _, lbRule := range lb.rules {
|
||||
glog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name)
|
||||
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
status = &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: lb.ipAddr}}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (cs *CSCloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that all the hosts belong to the same network, and retrieve their ID's.
|
||||
lb.hostIDs, _, err = cs.verifyHosts(nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, lbRule := range lb.rules {
|
||||
p := lb.LoadBalancer.NewListLoadBalancerRuleInstancesParams(lbRule.Id)
|
||||
|
||||
// Retrieve all VMs currently associated to this load balancer rule.
|
||||
l, err := lb.LoadBalancer.ListLoadBalancerRuleInstances(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving associated instances: %v", err)
|
||||
}
|
||||
|
||||
assign, remove := symmetricDifference(lb.hostIDs, l.LoadBalancerRuleInstances)
|
||||
|
||||
if len(assign) > 0 {
|
||||
glog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name)
|
||||
if err := lb.assignHostsToRule(lbRule, assign); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(remove) > 0 {
|
||||
glog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name)
|
||||
if err := lb.removeHostsFromRule(lbRule, remove); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it exists, returning
|
||||
// nil if the load balancer specified either didn't exist or was successfully deleted.
|
||||
func (cs *CSCloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, lbRule := range lb.rules {
|
||||
glog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name)
|
||||
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if lb.ipAddr != "" && lb.ipAddr != service.Spec.LoadBalancerIP {
|
||||
glog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr)
|
||||
if err := lb.releaseLoadBalancerIP(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLoadBalancer retrieves the IP address and ID and all the existing rules it can find.
|
||||
func (cs *CSCloud) getLoadBalancer(service *v1.Service) (*loadBalancer, error) {
|
||||
lb := &loadBalancer{
|
||||
CloudStackClient: cs.client,
|
||||
name: cloudprovider.GetLoadBalancerName(service),
|
||||
projectID: cs.projectID,
|
||||
rules: make(map[string]*cloudstack.LoadBalancerRule),
|
||||
}
|
||||
|
||||
p := cs.client.LoadBalancer.NewListLoadBalancerRulesParams()
|
||||
p.SetKeyword(lb.name)
|
||||
p.SetListall(true)
|
||||
|
||||
if cs.projectID != "" {
|
||||
p.SetProjectid(cs.projectID)
|
||||
}
|
||||
|
||||
l, err := cs.client.LoadBalancer.ListLoadBalancerRules(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving load balancer rules: %v", err)
|
||||
}
|
||||
|
||||
for _, lbRule := range l.LoadBalancerRules {
|
||||
lb.rules[lbRule.Name] = lbRule
|
||||
|
||||
if lb.ipAddr != "" && lb.ipAddr != lbRule.Publicip {
|
||||
glog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip)
|
||||
}
|
||||
|
||||
lb.ipAddr = lbRule.Publicip
|
||||
lb.ipAddrID = lbRule.Publicipid
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules))
|
||||
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
// verifyHosts verifies if all hosts belong to the same network, and returns the host ID's and network ID.
|
||||
func (cs *CSCloud) verifyHosts(nodes []*v1.Node) ([]string, string, error) {
|
||||
hostNames := map[string]bool{}
|
||||
for _, node := range nodes {
|
||||
hostNames[node.Name] = true
|
||||
}
|
||||
|
||||
p := cs.client.VirtualMachine.NewListVirtualMachinesParams()
|
||||
p.SetListall(true)
|
||||
|
||||
if cs.projectID != "" {
|
||||
p.SetProjectid(cs.projectID)
|
||||
}
|
||||
|
||||
l, err := cs.client.VirtualMachine.ListVirtualMachines(p)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error retrieving list of hosts: %v", err)
|
||||
}
|
||||
|
||||
var hostIDs []string
|
||||
var networkID string
|
||||
|
||||
// Check if the virtual machine is in the hosts slice, then add the corresponding ID.
|
||||
for _, vm := range l.VirtualMachines {
|
||||
if hostNames[vm.Name] {
|
||||
if networkID != "" && networkID != vm.Nic[0].Networkid {
|
||||
return nil, "", fmt.Errorf("found hosts that belong to different networks")
|
||||
}
|
||||
|
||||
networkID = vm.Nic[0].Networkid
|
||||
hostIDs = append(hostIDs, vm.Id)
|
||||
}
|
||||
}
|
||||
|
||||
return hostIDs, networkID, nil
|
||||
}
|
||||
|
||||
// hasLoadBalancerIP returns true if we have a load balancer address and ID.
|
||||
func (lb *loadBalancer) hasLoadBalancerIP() bool {
|
||||
return lb.ipAddr != "" && lb.ipAddrID != ""
|
||||
}
|
||||
|
||||
// getLoadBalancerIP retieves an existing IP or associates a new IP.
|
||||
func (lb *loadBalancer) getLoadBalancerIP(loadBalancerIP string) error {
|
||||
if loadBalancerIP != "" {
|
||||
return lb.getPublicIPAddress(loadBalancerIP)
|
||||
}
|
||||
|
||||
return lb.associatePublicIPAddress()
|
||||
}
|
||||
|
||||
// getPublicIPAddressID retrieves the ID of the given IP, and sets the address and it's ID.
|
||||
func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error {
|
||||
glog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP)
|
||||
|
||||
p := lb.Address.NewListPublicIpAddressesParams()
|
||||
p.SetIpaddress(loadBalancerIP)
|
||||
p.SetListall(true)
|
||||
|
||||
if lb.projectID != "" {
|
||||
p.SetProjectid(lb.projectID)
|
||||
}
|
||||
|
||||
l, err := lb.Address.ListPublicIpAddresses(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving IP address: %v", err)
|
||||
}
|
||||
|
||||
if l.Count != 1 {
|
||||
return fmt.Errorf("could not find IP address %v", loadBalancerIP)
|
||||
}
|
||||
|
||||
lb.ipAddr = l.PublicIpAddresses[0].Ipaddress
|
||||
lb.ipAddrID = l.PublicIpAddresses[0].Id
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// associatePublicIPAddress associates a new IP and sets the address and it's ID.
|
||||
func (lb *loadBalancer) associatePublicIPAddress() error {
|
||||
glog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name)
|
||||
// If a network belongs to a VPC, the IP address needs to be associated with
|
||||
// the VPC instead of with the network.
|
||||
network, count, err := lb.Network.GetNetworkByID(lb.networkID, cloudstack.WithProject(lb.projectID))
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return fmt.Errorf("could not find network %v", lb.networkID)
|
||||
}
|
||||
return fmt.Errorf("error retrieving network: %v", err)
|
||||
}
|
||||
|
||||
p := lb.Address.NewAssociateIpAddressParams()
|
||||
|
||||
if network.Vpcid != "" {
|
||||
p.SetVpcid(network.Vpcid)
|
||||
} else {
|
||||
p.SetNetworkid(lb.networkID)
|
||||
}
|
||||
|
||||
if lb.projectID != "" {
|
||||
p.SetProjectid(lb.projectID)
|
||||
}
|
||||
|
||||
// Associate a new IP address
|
||||
r, err := lb.Address.AssociateIpAddress(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error associating new IP address: %v", err)
|
||||
}
|
||||
|
||||
lb.ipAddr = r.Ipaddress
|
||||
lb.ipAddrID = r.Id
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// releasePublicIPAddress releases an associated IP.
|
||||
func (lb *loadBalancer) releaseLoadBalancerIP() error {
|
||||
p := lb.Address.NewDisassociateIpAddressParams(lb.ipAddrID)
|
||||
|
||||
if _, err := lb.Address.DisassociateIpAddress(p); err != nil {
|
||||
return fmt.Errorf("error releasing load balancer IP %v: %v", lb.ipAddr, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkLoadBalancerRule checks if the rule already exists and if it does, if it can be updated. If
|
||||
// it does exist but cannot be updated, it will delete the existing rule so it can be created again.
|
||||
func (lb *loadBalancer) checkLoadBalancerRule(lbRuleName string, port v1.ServicePort) (bool, bool, error) {
|
||||
lbRule, ok := lb.rules[lbRuleName]
|
||||
if !ok {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// Check if any of the values we cannot update (those that require a new load balancer rule) are changed.
|
||||
if lbRule.Publicip == lb.ipAddr && lbRule.Privateport == strconv.Itoa(int(port.NodePort)) && lbRule.Publicport == strconv.Itoa(int(port.Port)) {
|
||||
return true, lbRule.Algorithm != lb.algorithm, nil
|
||||
}
|
||||
|
||||
// Delete the load balancer rule so we can create a new one using the new values.
|
||||
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// updateLoadBalancerRule updates a load balancer rule.
|
||||
func (lb *loadBalancer) updateLoadBalancerRule(lbRuleName string) error {
|
||||
lbRule := lb.rules[lbRuleName]
|
||||
|
||||
p := lb.LoadBalancer.NewUpdateLoadBalancerRuleParams(lbRule.Id)
|
||||
p.SetAlgorithm(lb.algorithm)
|
||||
|
||||
_, err := lb.LoadBalancer.UpdateLoadBalancerRule(p)
|
||||
return err
|
||||
}
|
||||
|
||||
// createLoadBalancerRule creates a new load balancer rule and returns it's ID.
|
||||
func (lb *loadBalancer) createLoadBalancerRule(lbRuleName string, port v1.ServicePort) (*cloudstack.LoadBalancerRule, error) {
|
||||
p := lb.LoadBalancer.NewCreateLoadBalancerRuleParams(
|
||||
lb.algorithm,
|
||||
lbRuleName,
|
||||
int(port.NodePort),
|
||||
int(port.Port),
|
||||
)
|
||||
|
||||
p.SetNetworkid(lb.networkID)
|
||||
p.SetPublicipid(lb.ipAddrID)
|
||||
|
||||
switch port.Protocol {
|
||||
case v1.ProtocolTCP:
|
||||
p.SetProtocol("TCP")
|
||||
case v1.ProtocolUDP:
|
||||
p.SetProtocol("UDP")
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported load balancer protocol: %v", port.Protocol)
|
||||
}
|
||||
|
||||
// Do not create corresponding firewall rule.
|
||||
p.SetOpenfirewall(false)
|
||||
|
||||
// Create a new load balancer rule.
|
||||
r, err := lb.LoadBalancer.CreateLoadBalancerRule(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating load balancer rule %v: %v", lbRuleName, err)
|
||||
}
|
||||
|
||||
lbRule := &cloudstack.LoadBalancerRule{
|
||||
Id: r.Id,
|
||||
Algorithm: r.Algorithm,
|
||||
Cidrlist: r.Cidrlist,
|
||||
Name: r.Name,
|
||||
Networkid: r.Networkid,
|
||||
Privateport: r.Privateport,
|
||||
Publicport: r.Publicport,
|
||||
Publicip: r.Publicip,
|
||||
Publicipid: r.Publicipid,
|
||||
}
|
||||
|
||||
return lbRule, nil
|
||||
}
|
||||
|
||||
// deleteLoadBalancerRule deletes a load balancer rule.
|
||||
func (lb *loadBalancer) deleteLoadBalancerRule(lbRule *cloudstack.LoadBalancerRule) error {
|
||||
p := lb.LoadBalancer.NewDeleteLoadBalancerRuleParams(lbRule.Id)
|
||||
|
||||
if _, err := lb.LoadBalancer.DeleteLoadBalancerRule(p); err != nil {
|
||||
return fmt.Errorf("error deleting load balancer rule %v: %v", lbRule.Name, err)
|
||||
}
|
||||
|
||||
// Delete the rule from the map as it no longer exists
|
||||
delete(lb.rules, lbRule.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// assignHostsToRule assigns hosts to a load balancer rule.
|
||||
func (lb *loadBalancer) assignHostsToRule(lbRule *cloudstack.LoadBalancerRule, hostIDs []string) error {
|
||||
p := lb.LoadBalancer.NewAssignToLoadBalancerRuleParams(lbRule.Id)
|
||||
p.SetVirtualmachineids(hostIDs)
|
||||
|
||||
if _, err := lb.LoadBalancer.AssignToLoadBalancerRule(p); err != nil {
|
||||
return fmt.Errorf("error assigning hosts to load balancer rule %v: %v", lbRule.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeHostsFromRule removes hosts from a load balancer rule.
|
||||
func (lb *loadBalancer) removeHostsFromRule(lbRule *cloudstack.LoadBalancerRule, hostIDs []string) error {
|
||||
p := lb.LoadBalancer.NewRemoveFromLoadBalancerRuleParams(lbRule.Id)
|
||||
p.SetVirtualmachineids(hostIDs)
|
||||
|
||||
if _, err := lb.LoadBalancer.RemoveFromLoadBalancerRule(p); err != nil {
|
||||
return fmt.Errorf("error removing hosts from load balancer rule %v: %v", lbRule.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// symmetricDifference returns the symmetric difference between the old (existing) and new (wanted) host ID's.
|
||||
func symmetricDifference(hostIDs []string, lbInstances []*cloudstack.VirtualMachine) ([]string, []string) {
|
||||
new := make(map[string]bool)
|
||||
for _, hostID := range hostIDs {
|
||||
new[hostID] = true
|
||||
}
|
||||
|
||||
var remove []string
|
||||
for _, instance := range lbInstances {
|
||||
if new[instance.Id] {
|
||||
delete(new, instance.Id)
|
||||
continue
|
||||
}
|
||||
|
||||
remove = append(remove, instance.Id)
|
||||
}
|
||||
|
||||
var assign []string
|
||||
for hostID := range new {
|
||||
assign = append(assign, hostID)
|
||||
}
|
||||
|
||||
return assign, remove
|
||||
}
|
117
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_test.go
generated
vendored
Normal file
117
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_test.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const testClusterName = "testCluster"
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
_, err := readConfig(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Should not return an error when no config is provided: %v", err)
|
||||
}
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
api-url = https://cloudstack.url
|
||||
api-key = a-valid-api-key
|
||||
secret-key = a-valid-secret-key
|
||||
ssl-no-verify = true
|
||||
project-id = a-valid-project-id
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %v", err)
|
||||
}
|
||||
|
||||
if cfg.Global.APIURL != "https://cloudstack.url" {
|
||||
t.Errorf("incorrect api-url: %s", cfg.Global.APIURL)
|
||||
}
|
||||
if cfg.Global.APIKey != "a-valid-api-key" {
|
||||
t.Errorf("incorrect api-key: %s", cfg.Global.APIKey)
|
||||
}
|
||||
if cfg.Global.SecretKey != "a-valid-secret-key" {
|
||||
t.Errorf("incorrect secret-key: %s", cfg.Global.SecretKey)
|
||||
}
|
||||
if !cfg.Global.SSLNoVerify {
|
||||
t.Errorf("incorrect ssl-no-verify: %t", cfg.Global.SSLNoVerify)
|
||||
}
|
||||
}
|
||||
|
||||
// This allows acceptance testing against an existing CloudStack environment.
|
||||
func configFromEnv() (*CSConfig, bool) {
|
||||
cfg := &CSConfig{}
|
||||
|
||||
cfg.Global.APIURL = os.Getenv("CS_API_URL")
|
||||
cfg.Global.APIKey = os.Getenv("CS_API_KEY")
|
||||
cfg.Global.SecretKey = os.Getenv("CS_SECRET_KEY")
|
||||
cfg.Global.ProjectID = os.Getenv("CS_PROJECT_ID")
|
||||
|
||||
// It is save to ignore the error here. If the input cannot be parsed SSLNoVerify
|
||||
// will still be a bool with its zero value (false) which is the expected default.
|
||||
cfg.Global.SSLNoVerify, _ = strconv.ParseBool(os.Getenv("CS_SSL_NO_VERIFY"))
|
||||
|
||||
// Check if we have the minimum required info to be able to connect to CloudStack.
|
||||
ok := cfg.Global.APIURL != "" && cfg.Global.APIKey != "" && cfg.Global.SecretKey != ""
|
||||
|
||||
return cfg, ok
|
||||
}
|
||||
|
||||
func TestNewCSCloud(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newCSCloud(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate CloudStack: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalancer(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
cs, err := newCSCloud(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate CloudStack: %v", err)
|
||||
}
|
||||
|
||||
lb, ok := cs.LoadBalancer()
|
||||
if !ok {
|
||||
t.Fatalf("LoadBalancer() returned false")
|
||||
}
|
||||
|
||||
_, exists, err := lb.GetLoadBalancer(testClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "noexist"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned error: %s", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned exists")
|
||||
}
|
||||
}
|
211
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go
generated
vendored
Normal file
211
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/d2g/dhcp4"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type metadata struct {
|
||||
dhcpServer string
|
||||
zone string
|
||||
}
|
||||
|
||||
type metadataType string
|
||||
|
||||
const (
|
||||
metadataTypeExternalIP metadataType = "public-ipv4"
|
||||
metadataTypeInternalIP metadataType = "local-ipv4"
|
||||
metadataTypeInstanceID metadataType = "instance-id"
|
||||
metadataTypeInstanceType metadataType = "service-offering"
|
||||
metadataTypeZone metadataType = "availability-zone"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (m *metadata) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
externalIP, err := m.get(metadataTypeExternalIP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get external IP: %v", err)
|
||||
}
|
||||
|
||||
internalIP, err := m.get(metadataTypeInternalIP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get internal IP: %v", err)
|
||||
}
|
||||
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeExternalIP, Address: externalIP},
|
||||
{Type: v1.NodeInternalIP, Address: internalIP},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the addresses of the specified instance.
|
||||
func (m *metadata) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
return nil, errors.New("NodeAddressesByProviderID not implemented")
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (m *metadata) ExternalID(name types.NodeName) (string, error) {
|
||||
return m.InstanceID(name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (m *metadata) InstanceID(name types.NodeName) (string, error) {
|
||||
instanceID, err := m.get(metadataTypeInstanceID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get instance ID: %v", err)
|
||||
}
|
||||
|
||||
zone, err := m.get(metadataTypeZone)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get zone: %v", err)
|
||||
}
|
||||
|
||||
return "/" + zone + "/" + instanceID, nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (m *metadata) InstanceType(name types.NodeName) (string, error) {
|
||||
instanceType, err := m.get(metadataTypeInstanceType)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get instance type: %v", err)
|
||||
}
|
||||
|
||||
return instanceType, nil
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
func (m *metadata) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
return "", errors.New("InstanceTypeByProviderID not implemented")
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances is currently not implemented.
|
||||
func (m *metadata) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("AddSSHKeyToAllInstances not implemented")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
func (m *metadata) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns if the instance still exists.
|
||||
func (m *metadata) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
return false, errors.New("InstanceExistsByProviderID not implemented")
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the region that the program is running in.
|
||||
func (m *metadata) GetZone() (cloudprovider.Zone, error) {
|
||||
zone := cloudprovider.Zone{}
|
||||
|
||||
if m.zone == "" {
|
||||
zoneName, err := m.get(metadataTypeZone)
|
||||
if err != nil {
|
||||
return zone, fmt.Errorf("could not get zone: %v", err)
|
||||
}
|
||||
|
||||
m.zone = zoneName
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Current zone is %v", zone)
|
||||
zone.FailureDomain = m.zone
|
||||
zone.Region = m.zone
|
||||
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetZoneByProviderID returns the Zone, found by using the provider ID.
|
||||
func (m *metadata) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, errors.New("GetZoneByProviderID not implemented")
|
||||
}
|
||||
|
||||
// GetZoneByNodeName returns the Zone, found by using the node name.
|
||||
func (m *metadata) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, errors.New("GetZoneByNodeName not implemented")
|
||||
}
|
||||
|
||||
func (m *metadata) get(mdType metadataType) (string, error) {
|
||||
url := fmt.Sprintf("http://%s/latest/meta-data/%s", m.dhcpServer, mdType)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading metadata: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("unexpected HTTP status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading response body: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func findDHCPServer() (string, error) {
|
||||
nics, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get interfaces: %v", err)
|
||||
}
|
||||
|
||||
for _, nic := range nics {
|
||||
if nic.Flags&net.FlagUp == 1 && nic.Flags&net.FlagLoopback == 0 && nic.Flags&net.FlagPointToPoint == 0 {
|
||||
addrs, err := nic.Addrs()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading IP addresses from interface %v: %v", nic.Name, err)
|
||||
}
|
||||
|
||||
if addrs != nil {
|
||||
client, err := newDHCPClient(&nic)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating new DHCP client: %v", err)
|
||||
}
|
||||
|
||||
discoverPacket, err := client.SendDiscoverPacket()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error sending DHCP discover package: %v", err)
|
||||
}
|
||||
|
||||
offerPacket, err := client.GetOffer(&discoverPacket)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error recieving DHCP offer package: %v", err)
|
||||
}
|
||||
|
||||
offerPacketOptions := offerPacket.ParseOptions()
|
||||
|
||||
if ipaddr, ok := offerPacketOptions[dhcp4.OptionServerIdentifier]; ok {
|
||||
return net.IP(ipaddr).String(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("no server found")
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata_linux.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata_linux.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/d2g/dhcp4client"
|
||||
)
|
||||
|
||||
func newDHCPClient(nic *net.Interface) (*dhcp4client.Client, error) {
|
||||
pktsock, err := dhcp4client.NewPacketSock(nic.Index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dhcp4client.New(
|
||||
dhcp4client.HardwareAddr(nic.HardwareAddr),
|
||||
dhcp4client.Timeout(2*time.Second),
|
||||
dhcp4client.Broadcast(false),
|
||||
dhcp4client.Connection(pktsock),
|
||||
)
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata_other.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata_other.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/d2g/dhcp4client"
|
||||
)
|
||||
|
||||
func newDHCPClient(nic *net.Interface) (*dhcp4client.Client, error) {
|
||||
inetsock, err := dhcp4client.NewInetSock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dhcp4client.New(
|
||||
dhcp4client.HardwareAddr(nic.HardwareAddr),
|
||||
dhcp4client.Timeout(2*time.Second),
|
||||
dhcp4client.Broadcast(false),
|
||||
dhcp4client.Connection(inetsock),
|
||||
)
|
||||
}
|
34
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/BUILD
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/BUILD
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"fake.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/fake",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package fake is a test-double implementation of cloudprovider
|
||||
// Interface, LoadBalancer and Instances. It is useful for testing.
|
||||
package fake // import "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
331
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
Normal file
331
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
const defaultProviderName = "fake"
|
||||
|
||||
// FakeBalancer is a fake storage of balancer information
|
||||
type FakeBalancer struct {
|
||||
Name string
|
||||
Region string
|
||||
LoadBalancerIP string
|
||||
Ports []v1.ServicePort
|
||||
Hosts []*v1.Node
|
||||
}
|
||||
|
||||
type FakeUpdateBalancerCall struct {
|
||||
Service *v1.Service
|
||||
Hosts []*v1.Node
|
||||
}
|
||||
|
||||
// FakeCloud is a test-double implementation of Interface, LoadBalancer, Instances, and Routes. It is useful for testing.
|
||||
type FakeCloud struct {
|
||||
Exists bool
|
||||
Err error
|
||||
|
||||
ExistsByProviderID bool
|
||||
ErrByProviderID error
|
||||
|
||||
Calls []string
|
||||
Addresses []v1.NodeAddress
|
||||
ExtID map[types.NodeName]string
|
||||
InstanceTypes map[types.NodeName]string
|
||||
Machines []types.NodeName
|
||||
NodeResources *v1.NodeResources
|
||||
ClusterList []string
|
||||
MasterName string
|
||||
ExternalIP net.IP
|
||||
Balancers map[string]FakeBalancer
|
||||
UpdateCalls []FakeUpdateBalancerCall
|
||||
RouteMap map[string]*FakeRoute
|
||||
Lock sync.Mutex
|
||||
Provider string
|
||||
addCallLock sync.Mutex
|
||||
cloudprovider.Zone
|
||||
VolumeLabelMap map[string]map[string]string
|
||||
}
|
||||
|
||||
type FakeRoute struct {
|
||||
ClusterName string
|
||||
Route cloudprovider.Route
|
||||
}
|
||||
|
||||
func (f *FakeCloud) addCall(desc string) {
|
||||
f.addCallLock.Lock()
|
||||
defer f.addCallLock.Unlock()
|
||||
f.Calls = append(f.Calls, desc)
|
||||
}
|
||||
|
||||
// ClearCalls clears internal record of method calls to this FakeCloud.
|
||||
func (f *FakeCloud) ClearCalls() {
|
||||
f.Calls = []string{}
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (f *FakeCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
|
||||
func (f *FakeCloud) ListClusters() ([]string, error) {
|
||||
return f.ClusterList, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Master(name string) (string, error) {
|
||||
return f.MasterName, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (f *FakeCloud) ProviderName() string {
|
||||
if f.Provider == "" {
|
||||
return defaultProviderName
|
||||
}
|
||||
return f.Provider
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (f *FakeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (f *FakeCloud) HasClusterID() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// LoadBalancer returns a fake implementation of LoadBalancer.
|
||||
// Actually it just returns f itself.
|
||||
func (f *FakeCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
// Instances returns a fake implementation of Instances.
|
||||
//
|
||||
// Actually it just returns f itself.
|
||||
func (f *FakeCloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
// GetLoadBalancer is a stub implementation of LoadBalancer.GetLoadBalancer.
|
||||
func (f *FakeCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: f.ExternalIP.String()}}
|
||||
|
||||
return status, f.Exists, f.Err
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer.
|
||||
// It adds an entry "create" into the internal method call record.
|
||||
func (f *FakeCloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
f.addCall("create")
|
||||
if f.Balancers == nil {
|
||||
f.Balancers = make(map[string]FakeBalancer)
|
||||
}
|
||||
|
||||
name := cloudprovider.GetLoadBalancerName(service)
|
||||
spec := service.Spec
|
||||
|
||||
zone, err := f.GetZone()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
region := zone.Region
|
||||
|
||||
f.Balancers[name] = FakeBalancer{name, region, spec.LoadBalancerIP, spec.Ports, nodes}
|
||||
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: f.ExternalIP.String()}}
|
||||
|
||||
return status, f.Err
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer is a test-spy implementation of LoadBalancer.UpdateLoadBalancer.
|
||||
// It adds an entry "update" into the internal method call record.
|
||||
func (f *FakeCloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
f.addCall("update")
|
||||
f.UpdateCalls = append(f.UpdateCalls, FakeUpdateBalancerCall{service, nodes})
|
||||
return f.Err
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted is a test-spy implementation of LoadBalancer.EnsureLoadBalancerDeleted.
|
||||
// It adds an entry "delete" into the internal method call record.
|
||||
func (f *FakeCloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
f.addCall("delete")
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (f *FakeCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// NodeAddresses is a test-spy implementation of Instances.NodeAddresses.
|
||||
// It adds an entry "node-addresses" into the internal method call record.
|
||||
func (f *FakeCloud) NodeAddresses(instance types.NodeName) ([]v1.NodeAddress, error) {
|
||||
f.addCall("node-addresses")
|
||||
return f.Addresses, f.Err
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID is a test-spy implementation of Instances.NodeAddressesByProviderID.
|
||||
// It adds an entry "node-addresses-by-provider-id" into the internal method call record.
|
||||
func (f *FakeCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
f.addCall("node-addresses-by-provider-id")
|
||||
return f.Addresses, f.Err
|
||||
}
|
||||
|
||||
// ExternalID is a test-spy implementation of Instances.ExternalID.
|
||||
// It adds an entry "external-id" into the internal method call record.
|
||||
// It returns an external id to the mapped instance name, if not found, it will return "ext-{instance}"
|
||||
func (f *FakeCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
f.addCall("external-id")
|
||||
return f.ExtID[nodeName], f.Err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (f *FakeCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
f.addCall("instance-id")
|
||||
return f.ExtID[nodeName], nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (f *FakeCloud) InstanceType(instance types.NodeName) (string, error) {
|
||||
f.addCall("instance-type")
|
||||
return f.InstanceTypes[instance], nil
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the type of the specified instance.
|
||||
func (f *FakeCloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
f.addCall("instance-type-by-provider-id")
|
||||
return f.InstanceTypes[types.NodeName(providerID)], nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (f *FakeCloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
f.addCall("instance-exists-by-provider-id")
|
||||
return f.ExistsByProviderID, f.ErrByProviderID
|
||||
}
|
||||
|
||||
// List is a test-spy implementation of Instances.List.
|
||||
// It adds an entry "list" into the internal method call record.
|
||||
func (f *FakeCloud) List(filter string) ([]types.NodeName, error) {
|
||||
f.addCall("list")
|
||||
result := []types.NodeName{}
|
||||
for _, machine := range f.Machines {
|
||||
if match, _ := regexp.MatchString(filter, string(machine)); match {
|
||||
result = append(result, machine)
|
||||
}
|
||||
}
|
||||
return result, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
f.addCall("get-zone")
|
||||
return f.Zone, f.Err
|
||||
}
|
||||
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (f *FakeCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
f.addCall("get-zone-by-provider-id")
|
||||
return f.Zone, f.Err
|
||||
}
|
||||
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (f *FakeCloud) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
f.addCall("get-zone-by-node-name")
|
||||
return f.Zone, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("list-routes")
|
||||
var routes []*cloudprovider.Route
|
||||
for _, fakeRoute := range f.RouteMap {
|
||||
if clusterName == fakeRoute.ClusterName {
|
||||
routeCopy := fakeRoute.Route
|
||||
routes = append(routes, &routeCopy)
|
||||
}
|
||||
}
|
||||
return routes, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("create-route")
|
||||
name := clusterName + "-" + nameHint
|
||||
if _, exists := f.RouteMap[name]; exists {
|
||||
f.Err = fmt.Errorf("route %q already exists", name)
|
||||
return f.Err
|
||||
}
|
||||
fakeRoute := FakeRoute{}
|
||||
fakeRoute.Route = *route
|
||||
fakeRoute.Route.Name = name
|
||||
fakeRoute.ClusterName = clusterName
|
||||
f.RouteMap[name] = &fakeRoute
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeCloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("delete-route")
|
||||
name := route.Name
|
||||
if _, exists := f.RouteMap[name]; !exists {
|
||||
f.Err = fmt.Errorf("no route found with name %q", name)
|
||||
return f.Err
|
||||
}
|
||||
delete(f.RouteMap, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *FakeCloud) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
if val, ok := c.VolumeLabelMap[pv.Name]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return nil, fmt.Errorf("label not found for volume")
|
||||
}
|
130
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
Normal file
130
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"gce.go",
|
||||
"gce_address_manager.go",
|
||||
"gce_addresses.go",
|
||||
"gce_addresses_fakes.go",
|
||||
"gce_alpha.go",
|
||||
"gce_annotations.go",
|
||||
"gce_backendservice.go",
|
||||
"gce_cert.go",
|
||||
"gce_clusterid.go",
|
||||
"gce_clusters.go",
|
||||
"gce_disks.go",
|
||||
"gce_firewall.go",
|
||||
"gce_forwardingrule.go",
|
||||
"gce_forwardingrule_fakes.go",
|
||||
"gce_healthchecks.go",
|
||||
"gce_instancegroup.go",
|
||||
"gce_instances.go",
|
||||
"gce_interfaces.go",
|
||||
"gce_loadbalancer.go",
|
||||
"gce_loadbalancer_external.go",
|
||||
"gce_loadbalancer_internal.go",
|
||||
"gce_loadbalancer_naming.go",
|
||||
"gce_networkendpointgroup.go",
|
||||
"gce_op.go",
|
||||
"gce_routes.go",
|
||||
"gce_targetpool.go",
|
||||
"gce_targetproxy.go",
|
||||
"gce_urlmap.go",
|
||||
"gce_util.go",
|
||||
"gce_zones.go",
|
||||
"metrics.go",
|
||||
"token_source.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/cloud.google.com/go/compute/metadata:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/container/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"gce_address_manager_test.go",
|
||||
"gce_annotations_test.go",
|
||||
"gce_disks_test.go",
|
||||
"gce_healthchecks_test.go",
|
||||
"gce_loadbalancer_external_test.go",
|
||||
"gce_test.go",
|
||||
"gce_util_test.go",
|
||||
"metrics_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
approvers:
|
||||
- saad-ali
|
||||
- jingxu97
|
||||
- bowei
|
||||
- freehan
|
||||
- nicksardo
|
||||
- mrhohn
|
||||
- dnardo
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package gce is an implementation of Interface, LoadBalancer
|
||||
// and Instances for Google Compute Engine.
|
||||
package gce // import "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
880
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
Normal file
880
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
Normal file
@ -0,0 +1,880 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gcfg "gopkg.in/gcfg.v1"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
container "google.golang.org/api/container/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ProviderName = "gce"
|
||||
|
||||
k8sNodeRouteTag = "k8s-node-route"
|
||||
|
||||
// AffinityTypeNone - no session affinity.
|
||||
gceAffinityTypeNone = "NONE"
|
||||
// AffinityTypeClientIP - affinity based on Client IP.
|
||||
gceAffinityTypeClientIP = "CLIENT_IP"
|
||||
// AffinityTypeClientIPProto - affinity based on Client IP and port.
|
||||
gceAffinityTypeClientIPProto = "CLIENT_IP_PROTO"
|
||||
|
||||
operationPollInterval = 3 * time.Second
|
||||
// Creating Route in very large clusters, may take more than half an hour.
|
||||
operationPollTimeoutDuration = time.Hour
|
||||
|
||||
// Each page can have 500 results, but we cap how many pages
|
||||
// are iterated through to prevent infinite loops if the API
|
||||
// were to continuously return a nextPageToken.
|
||||
maxPages = 25
|
||||
|
||||
maxTargetPoolCreateInstances = 200
|
||||
|
||||
// HTTP Load Balancer parameters
|
||||
// Configure 2 second period for external health checks.
|
||||
gceHcCheckIntervalSeconds = int64(2)
|
||||
gceHcTimeoutSeconds = int64(1)
|
||||
// Start sending requests as soon as a pod is found on the node.
|
||||
gceHcHealthyThreshold = int64(1)
|
||||
// Defaults to 5 * 2 = 10 seconds before the LB will steer traffic away
|
||||
gceHcUnhealthyThreshold = int64(5)
|
||||
|
||||
gceComputeAPIEndpoint = "https://www.googleapis.com/compute/v1/"
|
||||
gceComputeAPIEndpointAlpha = "https://www.googleapis.com/compute/alpha/"
|
||||
)
|
||||
|
||||
// gceObject is an abstraction of all GCE API object in go client
|
||||
type gceObject interface {
|
||||
MarshalJSON() ([]byte, error)
|
||||
}
|
||||
|
||||
// GCECloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine.
|
||||
type GCECloud struct {
|
||||
// ClusterID contains functionality for getting (and initializing) the ingress-uid. Call GCECloud.Initialize()
|
||||
// for the cloudprovider to start watching the configmap.
|
||||
ClusterID ClusterID
|
||||
|
||||
service *compute.Service
|
||||
serviceBeta *computebeta.Service
|
||||
serviceAlpha *computealpha.Service
|
||||
containerService *container.Service
|
||||
client clientset.Interface
|
||||
clientBuilder controller.ControllerClientBuilder
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
projectID string
|
||||
region string
|
||||
localZone string // The zone in which we are running
|
||||
// managedZones will be set to the 1 zone if running a single zone cluster
|
||||
// it will be set to ALL zones in region for any multi-zone cluster
|
||||
// Use GetAllCurrentZones to get only zones that contain nodes
|
||||
managedZones []string
|
||||
networkURL string
|
||||
isLegacyNetwork bool
|
||||
subnetworkURL string
|
||||
secondaryRangeName string
|
||||
networkProjectID string
|
||||
onXPN bool
|
||||
nodeTags []string // List of tags to use on firewall rules for load balancers
|
||||
lastComputedNodeTags []string // List of node tags calculated in GetHostTags()
|
||||
lastKnownNodeNames sets.String // List of hostnames used to calculate lastComputedHostTags in GetHostTags(names)
|
||||
computeNodeTagLock sync.Mutex // Lock for computing and setting node tags
|
||||
nodeInstancePrefix string // If non-"", an advisory prefix for all nodes in the cluster
|
||||
useMetadataServer bool
|
||||
operationPollRateLimiter flowcontrol.RateLimiter
|
||||
manager diskServiceManager
|
||||
// Lock for access to nodeZones
|
||||
nodeZonesLock sync.Mutex
|
||||
// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
|
||||
// it is updated by the nodeInformer
|
||||
nodeZones map[string]sets.String
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
// sharedResourceLock is used to serialize GCE operations that may mutate shared state to
|
||||
// prevent inconsistencies. For example, load balancers manipulation methods will take the
|
||||
// lock to prevent shared resources from being prematurely deleted while the operation is
|
||||
// in progress.
|
||||
sharedResourceLock sync.Mutex
|
||||
// AlphaFeatureGate gates gce alpha features in GCECloud instance.
|
||||
// Related wrapper functions that interacts with gce alpha api should examine whether
|
||||
// the corresponding api is enabled.
|
||||
// If not enabled, it should return error.
|
||||
AlphaFeatureGate *AlphaFeatureGate
|
||||
}
|
||||
|
||||
// TODO: replace gcfg with json
|
||||
type ConfigGlobal struct {
|
||||
TokenURL string `gcfg:"token-url"`
|
||||
TokenBody string `gcfg:"token-body"`
|
||||
// ProjectID and NetworkProjectID can either be the numeric or string-based
|
||||
// unique identifier that starts with [a-z].
|
||||
ProjectID string `gcfg:"project-id"`
|
||||
// NetworkProjectID refers to the project which owns the network being used.
|
||||
NetworkProjectID string `gcfg:"network-project-id"`
|
||||
NetworkName string `gcfg:"network-name"`
|
||||
SubnetworkName string `gcfg:"subnetwork-name"`
|
||||
// SecondaryRangeName is the name of the secondary range to allocate IP
|
||||
// aliases. The secondary range must be present on the subnetwork the
|
||||
// cluster is attached to.
|
||||
SecondaryRangeName string `gcfg:"secondary-range-name"`
|
||||
NodeTags []string `gcfg:"node-tags"`
|
||||
NodeInstancePrefix string `gcfg:"node-instance-prefix"`
|
||||
Multizone bool `gcfg:"multizone"`
|
||||
// ApiEndpoint is the GCE compute API endpoint to use. If this is blank,
|
||||
// then the default endpoint is used.
|
||||
ApiEndpoint string `gcfg:"api-endpoint"`
|
||||
// LocalZone specifies the GCE zone that gce cloud client instance is
|
||||
// located in (i.e. where the controller will be running). If this is
|
||||
// blank, then the local zone will be discovered via the metadata server.
|
||||
LocalZone string `gcfg:"local-zone"`
|
||||
// Default to none.
|
||||
// For example: MyFeatureFlag
|
||||
AlphaFeatures []string `gcfg:"alpha-features"`
|
||||
}
|
||||
|
||||
// ConfigFile is the struct used to parse the /etc/gce.conf configuration file.
|
||||
type ConfigFile struct {
|
||||
Global ConfigGlobal `gcfg:"global"`
|
||||
}
|
||||
|
||||
// CloudConfig includes all the necessary configuration for creating GCECloud
|
||||
type CloudConfig struct {
|
||||
ApiEndpoint string
|
||||
ProjectID string
|
||||
NetworkProjectID string
|
||||
Region string
|
||||
Zone string
|
||||
ManagedZones []string
|
||||
NetworkName string
|
||||
NetworkURL string
|
||||
SubnetworkName string
|
||||
SubnetworkURL string
|
||||
SecondaryRangeName string
|
||||
NodeTags []string
|
||||
NodeInstancePrefix string
|
||||
TokenSource oauth2.TokenSource
|
||||
UseMetadataServer bool
|
||||
AlphaFeatureGate *AlphaFeatureGate
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(
|
||||
ProviderName,
|
||||
func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
return newGCECloud(config)
|
||||
})
|
||||
}
|
||||
|
||||
// Raw access to the underlying GCE service, probably should only be used for e2e tests
|
||||
func (g *GCECloud) GetComputeService() *compute.Service {
|
||||
return g.service
|
||||
}
|
||||
|
||||
// newGCECloud creates a new instance of GCECloud.
|
||||
func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) {
|
||||
var cloudConfig *CloudConfig
|
||||
var configFile *ConfigFile
|
||||
|
||||
if config != nil {
|
||||
configFile, err = readConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.Infof("Using GCE provider config %+v", configFile)
|
||||
}
|
||||
|
||||
cloudConfig, err = generateCloudConfig(configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return CreateGCECloud(cloudConfig)
|
||||
|
||||
}
|
||||
|
||||
func readConfig(reader io.Reader) (*ConfigFile, error) {
|
||||
cfg := &ConfigFile{}
|
||||
if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil {
|
||||
glog.Errorf("Couldn't read config: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err error) {
|
||||
cloudConfig = &CloudConfig{}
|
||||
// By default, fetch token from GCE metadata server
|
||||
cloudConfig.TokenSource = google.ComputeTokenSource("")
|
||||
cloudConfig.UseMetadataServer = true
|
||||
|
||||
featureMap := make(map[string]bool)
|
||||
cloudConfig.AlphaFeatureGate = &AlphaFeatureGate{featureMap}
|
||||
if configFile != nil {
|
||||
if configFile.Global.ApiEndpoint != "" {
|
||||
cloudConfig.ApiEndpoint = configFile.Global.ApiEndpoint
|
||||
}
|
||||
|
||||
if configFile.Global.TokenURL != "" {
|
||||
// if tokenURL is nil, set tokenSource to nil. This will force the OAuth client to fall
|
||||
// back to use DefaultTokenSource. This allows running gceCloud remotely.
|
||||
if configFile.Global.TokenURL == "nil" {
|
||||
cloudConfig.TokenSource = nil
|
||||
} else {
|
||||
cloudConfig.TokenSource = NewAltTokenSource(configFile.Global.TokenURL, configFile.Global.TokenBody)
|
||||
}
|
||||
}
|
||||
|
||||
cloudConfig.NodeTags = configFile.Global.NodeTags
|
||||
cloudConfig.NodeInstancePrefix = configFile.Global.NodeInstancePrefix
|
||||
|
||||
alphaFeatureGate, err := NewAlphaFeatureGate(configFile.Global.AlphaFeatures)
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for creating alpha feature gate: %v", err)
|
||||
}
|
||||
cloudConfig.AlphaFeatureGate = alphaFeatureGate
|
||||
} else {
|
||||
// initialize AlphaFeatureGate when no AlphaFeatures are configured.
|
||||
alphaFeatureGate, err := NewAlphaFeatureGate([]string{})
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for initializing alpha feature gate: %v", err)
|
||||
}
|
||||
cloudConfig.AlphaFeatureGate = alphaFeatureGate
|
||||
}
|
||||
|
||||
// retrieve projectID and zone
|
||||
if configFile == nil || configFile.Global.ProjectID == "" || configFile.Global.LocalZone == "" {
|
||||
cloudConfig.ProjectID, cloudConfig.Zone, err = getProjectAndZone()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if configFile != nil {
|
||||
if configFile.Global.ProjectID != "" {
|
||||
cloudConfig.ProjectID = configFile.Global.ProjectID
|
||||
}
|
||||
if configFile.Global.LocalZone != "" {
|
||||
cloudConfig.Zone = configFile.Global.LocalZone
|
||||
}
|
||||
if configFile.Global.NetworkProjectID != "" {
|
||||
cloudConfig.NetworkProjectID = configFile.Global.NetworkProjectID
|
||||
}
|
||||
}
|
||||
|
||||
// retrieve region
|
||||
cloudConfig.Region, err = GetGCERegion(cloudConfig.Zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// generate managedZones
|
||||
cloudConfig.ManagedZones = []string{cloudConfig.Zone}
|
||||
if configFile != nil && configFile.Global.Multizone {
|
||||
cloudConfig.ManagedZones = nil // Use all zones in region
|
||||
}
|
||||
|
||||
// Determine if network parameter is URL or Name
|
||||
if configFile != nil && configFile.Global.NetworkName != "" {
|
||||
if strings.Contains(configFile.Global.NetworkName, "/") {
|
||||
cloudConfig.NetworkURL = configFile.Global.NetworkName
|
||||
} else {
|
||||
cloudConfig.NetworkName = configFile.Global.NetworkName
|
||||
}
|
||||
} else {
|
||||
cloudConfig.NetworkName, err = getNetworkNameViaMetadata()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if subnetwork parameter is URL or Name
|
||||
// If cluster is on a GCP network of mode=custom, then `SubnetName` must be specified in config file.
|
||||
if configFile != nil && configFile.Global.SubnetworkName != "" {
|
||||
if strings.Contains(configFile.Global.SubnetworkName, "/") {
|
||||
cloudConfig.SubnetworkURL = configFile.Global.SubnetworkName
|
||||
} else {
|
||||
cloudConfig.SubnetworkName = configFile.Global.SubnetworkName
|
||||
}
|
||||
}
|
||||
|
||||
if configFile != nil {
|
||||
cloudConfig.SecondaryRangeName = configFile.Global.SecondaryRangeName
|
||||
}
|
||||
|
||||
return cloudConfig, err
|
||||
}
|
||||
|
||||
// CreateGCECloud creates a GCECloud object using the specified parameters.
|
||||
// If no networkUrl is specified, loads networkName via rest call.
|
||||
// If no tokenSource is specified, uses oauth2.DefaultTokenSource.
|
||||
// If managedZones is nil / empty all zones in the region will be managed.
|
||||
func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
// Remove any pre-release version and build metadata from the semver, leaving only the MAJOR.MINOR.PATCH portion.
|
||||
// See http://semver.org/.
|
||||
version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v")
|
||||
|
||||
// Create a user-agent header append string to supply to the Google API clients, to identify Kubernetes as the origin of the GCP API calls.
|
||||
userAgent := fmt.Sprintf("Kubernetes/%s (%s %s)", version, runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
// Use ProjectID for NetworkProjectID, if it wasn't explicitly set.
|
||||
if config.NetworkProjectID == "" {
|
||||
config.NetworkProjectID = config.ProjectID
|
||||
}
|
||||
|
||||
client, err := newOauthClient(config.TokenSource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
service, err := compute.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
service.UserAgent = userAgent
|
||||
|
||||
client, err = newOauthClient(config.TokenSource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceBeta, err := computebeta.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceBeta.UserAgent = userAgent
|
||||
|
||||
client, err = newOauthClient(config.TokenSource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceAlpha, err := computealpha.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceAlpha.UserAgent = userAgent
|
||||
|
||||
// Expect override api endpoint to always be v1 api and follows the same pattern as prod.
|
||||
// Generate alpha and beta api endpoints based on override v1 api endpoint.
|
||||
// For example,
|
||||
// staging API endpoint: https://www.googleapis.com/compute/staging_v1/
|
||||
if config.ApiEndpoint != "" {
|
||||
service.BasePath = fmt.Sprintf("%sprojects/", config.ApiEndpoint)
|
||||
serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "beta", -1))
|
||||
serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "alpha", -1))
|
||||
}
|
||||
|
||||
containerService, err := container.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containerService.UserAgent = userAgent
|
||||
|
||||
// ProjectID and.NetworkProjectID may be project number or name.
|
||||
projID, netProjID := tryConvertToProjectNames(config.ProjectID, config.NetworkProjectID, service)
|
||||
onXPN := projID != netProjID
|
||||
|
||||
var networkURL string
|
||||
var subnetURL string
|
||||
var isLegacyNetwork bool
|
||||
|
||||
if config.NetworkURL != "" {
|
||||
networkURL = config.NetworkURL
|
||||
} else if config.NetworkName != "" {
|
||||
networkURL = gceNetworkURL(config.ApiEndpoint, netProjID, config.NetworkName)
|
||||
} else {
|
||||
// Other consumers may use the cloudprovider without utilizing the wrapped GCE API functions
|
||||
// or functions requiring network/subnetwork URLs (e.g. Kubelet).
|
||||
glog.Warningf("No network name or URL specified.")
|
||||
}
|
||||
|
||||
if config.SubnetworkURL != "" {
|
||||
subnetURL = config.SubnetworkURL
|
||||
} else if config.SubnetworkName != "" {
|
||||
subnetURL = gceSubnetworkURL(config.ApiEndpoint, netProjID, config.Region, config.SubnetworkName)
|
||||
} else {
|
||||
// Determine the type of network and attempt to discover the correct subnet for AUTO mode.
|
||||
// Gracefully fail because kubelet calls CreateGCECloud without any config, and minions
|
||||
// lack the proper credentials for API calls.
|
||||
if networkName := lastComponent(networkURL); networkName != "" {
|
||||
var n *compute.Network
|
||||
if n, err = getNetwork(service, netProjID, networkName); err != nil {
|
||||
glog.Warningf("Could not retrieve network %q; err: %v", networkName, err)
|
||||
} else {
|
||||
switch typeOfNetwork(n) {
|
||||
case netTypeLegacy:
|
||||
glog.Infof("Network %q is type legacy - no subnetwork", networkName)
|
||||
isLegacyNetwork = true
|
||||
case netTypeCustom:
|
||||
glog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName)
|
||||
case netTypeAuto:
|
||||
subnetURL, err = determineSubnetURL(service, netProjID, networkName, config.Region)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err)
|
||||
} else {
|
||||
glog.Infof("Auto selecting subnetwork %q", subnetURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(config.ManagedZones) == 0 {
|
||||
config.ManagedZones, err = getZonesForRegion(service, config.ProjectID, config.Region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(config.ManagedZones) > 1 {
|
||||
glog.Infof("managing multiple zones: %v", config.ManagedZones)
|
||||
}
|
||||
|
||||
operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(10, 100) // 10 qps, 100 bucket size.
|
||||
|
||||
gce := &GCECloud{
|
||||
service: service,
|
||||
serviceAlpha: serviceAlpha,
|
||||
serviceBeta: serviceBeta,
|
||||
containerService: containerService,
|
||||
projectID: projID,
|
||||
networkProjectID: netProjID,
|
||||
onXPN: onXPN,
|
||||
region: config.Region,
|
||||
localZone: config.Zone,
|
||||
managedZones: config.ManagedZones,
|
||||
networkURL: networkURL,
|
||||
isLegacyNetwork: isLegacyNetwork,
|
||||
subnetworkURL: subnetURL,
|
||||
secondaryRangeName: config.SecondaryRangeName,
|
||||
nodeTags: config.NodeTags,
|
||||
nodeInstancePrefix: config.NodeInstancePrefix,
|
||||
useMetadataServer: config.UseMetadataServer,
|
||||
operationPollRateLimiter: operationPollRateLimiter,
|
||||
AlphaFeatureGate: config.AlphaFeatureGate,
|
||||
nodeZones: map[string]sets.String{},
|
||||
}
|
||||
|
||||
gce.manager = &gceServiceManager{gce}
|
||||
|
||||
return gce, nil
|
||||
}
|
||||
|
||||
// determineSubnetURL queries for all subnetworks in a region for a given network and returns
|
||||
// the URL of the subnetwork which exists in the auto-subnet range.
|
||||
func determineSubnetURL(service *compute.Service, networkProjectID, networkName, region string) (string, error) {
|
||||
subnets, err := listSubnetworksOfNetwork(service, networkProjectID, networkName, region)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
autoSubnets, err := subnetsInCIDR(subnets, autoSubnetIPRange)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(autoSubnets) == 0 {
|
||||
return "", fmt.Errorf("no subnet exists in auto CIDR")
|
||||
}
|
||||
|
||||
if len(autoSubnets) > 1 {
|
||||
return "", fmt.Errorf("multiple subnetworks in the same region exist in auto CIDR")
|
||||
}
|
||||
|
||||
return autoSubnets[0].SelfLink, nil
|
||||
}
|
||||
|
||||
func tryConvertToProjectNames(configProject, configNetworkProject string, service *compute.Service) (projID, netProjID string) {
|
||||
projID = configProject
|
||||
if isProjectNumber(projID) {
|
||||
projName, err := getProjectID(service, projID)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err)
|
||||
} else {
|
||||
projID = projName
|
||||
}
|
||||
}
|
||||
|
||||
netProjID = projID
|
||||
if configNetworkProject != configProject {
|
||||
netProjID = configNetworkProject
|
||||
}
|
||||
if isProjectNumber(netProjID) {
|
||||
netProjName, err := getProjectID(service, netProjID)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err)
|
||||
} else {
|
||||
netProjID = netProjName
|
||||
}
|
||||
}
|
||||
|
||||
return projID, netProjID
|
||||
}
|
||||
|
||||
// Initialize takes in a clientBuilder and spawns a goroutine for watching the clusterid configmap.
|
||||
// This must be called before utilizing the funcs of gce.ClusterID
|
||||
func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder) {
|
||||
gce.clientBuilder = clientBuilder
|
||||
gce.client = clientBuilder.ClientOrDie("cloud-provider")
|
||||
|
||||
if gce.OnXPN() {
|
||||
gce.eventBroadcaster = record.NewBroadcaster()
|
||||
gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.CoreV1().RESTClient()).Events("")})
|
||||
gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"})
|
||||
}
|
||||
|
||||
go gce.watchClusterID()
|
||||
}
|
||||
|
||||
// LoadBalancer returns an implementation of LoadBalancer for Google Compute Engine.
|
||||
func (gce *GCECloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return gce, true
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for Google Compute Engine.
|
||||
func (gce *GCECloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return gce, true
|
||||
}
|
||||
|
||||
// Zones returns an implementation of Zones for Google Compute Engine.
|
||||
func (gce *GCECloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return gce, true
|
||||
}
|
||||
|
||||
func (gce *GCECloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return gce, true
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for Google Compute Engine.
|
||||
func (gce *GCECloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return gce, true
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (gce *GCECloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ProjectID returns the ProjectID corresponding to the project this cloud is in.
|
||||
func (g *GCECloud) ProjectID() string {
|
||||
return g.projectID
|
||||
}
|
||||
|
||||
// NetworkProjectID returns the ProjectID corresponding to the project this cluster's network is in.
|
||||
func (g *GCECloud) NetworkProjectID() string {
|
||||
return g.networkProjectID
|
||||
}
|
||||
|
||||
// Region returns the region
|
||||
func (gce *GCECloud) Region() string {
|
||||
return gce.region
|
||||
}
|
||||
|
||||
// OnXPN returns true if the cluster is running on a cross project network (XPN)
|
||||
func (gce *GCECloud) OnXPN() bool {
|
||||
return gce.onXPN
|
||||
}
|
||||
|
||||
// NetworkURL returns the network url
|
||||
func (gce *GCECloud) NetworkURL() string {
|
||||
return gce.networkURL
|
||||
}
|
||||
|
||||
// SubnetworkURL returns the subnetwork url
|
||||
func (gce *GCECloud) SubnetworkURL() string {
|
||||
return gce.subnetworkURL
|
||||
}
|
||||
|
||||
func (gce *GCECloud) IsLegacyNetwork() bool {
|
||||
return gce.isLegacyNetwork
|
||||
}
|
||||
|
||||
func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactory) {
|
||||
glog.Infof("Setting up informers for GCECloud")
|
||||
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
node := obj.(*v1.Node)
|
||||
gce.updateNodeZones(nil, node)
|
||||
},
|
||||
UpdateFunc: func(prev, obj interface{}) {
|
||||
prevNode := prev.(*v1.Node)
|
||||
newNode := obj.(*v1.Node)
|
||||
if newNode.Labels[kubeletapis.LabelZoneFailureDomain] ==
|
||||
prevNode.Labels[kubeletapis.LabelZoneFailureDomain] {
|
||||
return
|
||||
}
|
||||
gce.updateNodeZones(prevNode, newNode)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
node, isNode := obj.(*v1.Node)
|
||||
// We can get DeletedFinalStateUnknown instead of *v1.Node here
|
||||
// and we need to handle that correctly.
|
||||
if !isNode {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Received unexpected object: %v", obj)
|
||||
return
|
||||
}
|
||||
node, ok = deletedState.Obj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
gce.updateNodeZones(node, nil)
|
||||
},
|
||||
})
|
||||
gce.nodeInformerSynced = nodeInformer.HasSynced
|
||||
}
|
||||
|
||||
func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) {
|
||||
gce.nodeZonesLock.Lock()
|
||||
defer gce.nodeZonesLock.Unlock()
|
||||
if prevNode != nil {
|
||||
prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok {
|
||||
gce.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
|
||||
if gce.nodeZones[prevZone].Len() == 0 {
|
||||
gce.nodeZones[prevZone] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if newNode != nil {
|
||||
newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok {
|
||||
if gce.nodeZones[newZone] == nil {
|
||||
gce.nodeZones[newZone] = sets.NewString()
|
||||
}
|
||||
gce.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Known-useless DNS search path.
|
||||
var uselessDNSSearchRE = regexp.MustCompile(`^[0-9]+.google.internal.$`)
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (gce *GCECloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
// GCE has too many search paths by default. Filter the ones we know are useless.
|
||||
for _, s := range searches {
|
||||
if !uselessDNSSearchRE.MatchString(s) {
|
||||
srchOut = append(srchOut, s)
|
||||
}
|
||||
}
|
||||
return nameservers, srchOut
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (gce *GCECloud) HasClusterID() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Project IDs cannot have a digit for the first characeter. If the id contains a digit,
|
||||
// then it must be a project number.
|
||||
func isProjectNumber(idOrNumber string) bool {
|
||||
_, err := strconv.ParseUint(idOrNumber, 10, 64)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// GCECloud implements cloudprovider.Interface.
|
||||
var _ cloudprovider.Interface = (*GCECloud)(nil)
|
||||
|
||||
func gceNetworkURL(apiEndpoint, project, network string) string {
|
||||
if apiEndpoint == "" {
|
||||
apiEndpoint = gceComputeAPIEndpoint
|
||||
}
|
||||
return apiEndpoint + strings.Join([]string{"projects", project, "global", "networks", network}, "/")
|
||||
}
|
||||
|
||||
func gceSubnetworkURL(apiEndpoint, project, region, subnetwork string) string {
|
||||
if apiEndpoint == "" {
|
||||
apiEndpoint = gceComputeAPIEndpoint
|
||||
}
|
||||
return apiEndpoint + strings.Join([]string{"projects", project, "regions", region, "subnetworks", subnetwork}, "/")
|
||||
}
|
||||
|
||||
// getProjectIDInURL parses full resource URLS and shorter URLS
|
||||
// https://www.googleapis.com/compute/v1/projects/myproject/global/networks/mycustom
|
||||
// projects/myproject/global/networks/mycustom
|
||||
// All return "myproject"
|
||||
func getProjectIDInURL(urlStr string) (string, error) {
|
||||
fields := strings.Split(urlStr, "/")
|
||||
for i, v := range fields {
|
||||
if v == "projects" && i < len(fields)-1 {
|
||||
return fields[i+1], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("could not find project field in url: %v", urlStr)
|
||||
}
|
||||
|
||||
// getRegionInURL parses full resource URLS and shorter URLS
|
||||
// https://www.googleapis.com/compute/v1/projects/myproject/regions/us-central1/subnetworks/a
|
||||
// projects/myproject/regions/us-central1/subnetworks/a
|
||||
// All return "us-central1"
|
||||
func getRegionInURL(urlStr string) string {
|
||||
fields := strings.Split(urlStr, "/")
|
||||
for i, v := range fields {
|
||||
if v == "regions" && i < len(fields)-1 {
|
||||
return fields[i+1]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getNetworkNameViaMetadata() (string, error) {
|
||||
result, err := metadata.Get("instance/network-interfaces/0/network")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts := strings.Split(result, "/")
|
||||
if len(parts) != 4 {
|
||||
return "", fmt.Errorf("unexpected response: %s", result)
|
||||
}
|
||||
return parts[3], nil
|
||||
}
|
||||
|
||||
// getNetwork returns a GCP network
|
||||
func getNetwork(svc *compute.Service, networkProjectID, networkID string) (*compute.Network, error) {
|
||||
return svc.Networks.Get(networkProjectID, networkID).Do()
|
||||
}
|
||||
|
||||
// listSubnetworksOfNetwork returns a list of subnetworks for a particular region of a network.
|
||||
func listSubnetworksOfNetwork(svc *compute.Service, networkProjectID, networkID, region string) ([]*compute.Subnetwork, error) {
|
||||
var subnets []*compute.Subnetwork
|
||||
err := svc.Subnetworks.List(networkProjectID, region).Filter(fmt.Sprintf("network eq .*/%v$", networkID)).Pages(context.Background(), func(res *compute.SubnetworkList) error {
|
||||
subnets = append(subnets, res.Items...)
|
||||
return nil
|
||||
})
|
||||
return subnets, err
|
||||
}
|
||||
|
||||
// getProjectID returns the project's string ID given a project number or string
|
||||
func getProjectID(svc *compute.Service, projectNumberOrID string) (string, error) {
|
||||
proj, err := svc.Projects.Get(projectNumberOrID).Do()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return proj.Name, nil
|
||||
}
|
||||
|
||||
func getZonesForRegion(svc *compute.Service, projectID, region string) ([]string, error) {
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
listCall := svc.Zones.List(projectID)
|
||||
|
||||
// Filtering by region doesn't seem to work
|
||||
// (tested in https://cloud.google.com/compute/docs/reference/latest/zones/list)
|
||||
// listCall = listCall.Filter("region eq " + region)
|
||||
|
||||
res, err := listCall.Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected response listing zones: %v", err)
|
||||
}
|
||||
zones := []string{}
|
||||
for _, zone := range res.Items {
|
||||
regionName := lastComponent(zone.Region)
|
||||
if regionName == region {
|
||||
zones = append(zones, zone.Name)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
func findSubnetForRegion(subnetURLs []string, region string) string {
|
||||
for _, url := range subnetURLs {
|
||||
if thisRegion := getRegionInURL(url); thisRegion == region {
|
||||
return url
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) {
|
||||
if tokenSource == nil {
|
||||
var err error
|
||||
tokenSource, err = google.DefaultTokenSource(
|
||||
oauth2.NoContext,
|
||||
compute.CloudPlatformScope,
|
||||
compute.ComputeScope)
|
||||
glog.Infof("Using DefaultTokenSource %#v", tokenSource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Using existing Token Source %#v", tokenSource)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
if _, err := tokenSource.Token(); err != nil {
|
||||
glog.Errorf("error fetching initial token: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return oauth2.NewClient(oauth2.NoContext, tokenSource), nil
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getProjectsAPIEndpoint() string {
|
||||
projectsApiEndpoint := gceComputeAPIEndpoint + "projects/"
|
||||
if manager.gce.service != nil {
|
||||
projectsApiEndpoint = manager.gce.service.BasePath
|
||||
}
|
||||
|
||||
return projectsApiEndpoint
|
||||
}
|
||||
|
||||
func (manager *gceServiceManager) getProjectsAPIEndpointAlpha() string {
|
||||
projectsApiEndpoint := gceComputeAPIEndpointAlpha + "projects/"
|
||||
if manager.gce.service != nil {
|
||||
projectsApiEndpoint = manager.gce.serviceAlpha.BasePath
|
||||
}
|
||||
|
||||
return projectsApiEndpoint
|
||||
}
|
198
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go
generated
vendored
Normal file
198
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type addressManager struct {
|
||||
logPrefix string
|
||||
svc CloudAddressService
|
||||
name string
|
||||
serviceName string
|
||||
targetIP string
|
||||
addressType lbScheme
|
||||
region string
|
||||
subnetURL string
|
||||
tryRelease bool
|
||||
}
|
||||
|
||||
func newAddressManager(svc CloudAddressService, serviceName, region, subnetURL, name, targetIP string, addressType lbScheme) *addressManager {
|
||||
return &addressManager{
|
||||
svc: svc,
|
||||
logPrefix: fmt.Sprintf("AddressManager(%q)", name),
|
||||
region: region,
|
||||
serviceName: serviceName,
|
||||
name: name,
|
||||
targetIP: targetIP,
|
||||
addressType: addressType,
|
||||
tryRelease: true,
|
||||
subnetURL: subnetURL,
|
||||
}
|
||||
}
|
||||
|
||||
// HoldAddress will ensure that the IP is reserved with an address - either owned by the controller
|
||||
// or by a user. If the address is not the addressManager.name, then it's assumed to be a user's address.
|
||||
// The string returned is the reserved IP address.
|
||||
func (am *addressManager) HoldAddress() (string, error) {
|
||||
// HoldAddress starts with retrieving the address that we use for this load balancer (by name).
|
||||
// Retrieving an address by IP will indicate if the IP is reserved and if reserved by the user
|
||||
// or the controller, but won't tell us the current state of the controller's IP. The address
|
||||
// could be reserving another address; therefore, it would need to be deleted. In the normal
|
||||
// case of using a controller address, retrieving the address by name results in the fewest API
|
||||
// calls since it indicates whether a Delete is necessary before Reserve.
|
||||
glog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType)
|
||||
// Get the address in case it was orphaned earlier
|
||||
addr, err := am.svc.GetBetaRegionAddress(am.name, am.region)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if addr != nil {
|
||||
// If address exists, check if the address had the expected attributes.
|
||||
validationError := am.validateAddress(addr)
|
||||
if validationError == nil {
|
||||
glog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType)
|
||||
return addr.Address, nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError)
|
||||
err := am.svc.DeleteRegionAddress(addr.Name, am.region)
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
glog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name)
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return am.ensureAddressReservation()
|
||||
}
|
||||
|
||||
// ReleaseAddress will release the address if it's owned by the controller.
|
||||
func (am *addressManager) ReleaseAddress() error {
|
||||
if !am.tryRelease {
|
||||
glog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name)
|
||||
// Controller only ever tries to unreserve the address named with the load balancer's name.
|
||||
err := am.svc.DeleteRegionAddress(am.name, am.region)
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
glog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *addressManager) ensureAddressReservation() (string, error) {
|
||||
// Try reserving the IP with controller-owned address name
|
||||
// If am.targetIP is an empty string, a new IP will be created.
|
||||
newAddr := &computebeta.Address{
|
||||
Name: am.name,
|
||||
Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, am.serviceName),
|
||||
Address: am.targetIP,
|
||||
AddressType: string(am.addressType),
|
||||
Subnetwork: am.subnetURL,
|
||||
}
|
||||
|
||||
reserveErr := am.svc.ReserveBetaRegionAddress(newAddr, am.region)
|
||||
if reserveErr == nil {
|
||||
if newAddr.Address != "" {
|
||||
glog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name)
|
||||
return newAddr.Address, nil
|
||||
}
|
||||
|
||||
addr, err := am.svc.GetRegionAddress(newAddr.Name, am.region)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address)
|
||||
return addr.Address, nil
|
||||
} else if !isHTTPErrorCode(reserveErr, http.StatusConflict) && !isHTTPErrorCode(reserveErr, http.StatusBadRequest) {
|
||||
// If the IP is already reserved:
|
||||
// by an internal address: a StatusConflict is returned
|
||||
// by an external address: a BadRequest is returned
|
||||
return "", reserveErr
|
||||
}
|
||||
|
||||
// If the target IP was empty, we cannot try to find which IP caused a conflict.
|
||||
// If the name was already used, then the next sync will attempt deletion of that address.
|
||||
if am.targetIP == "" {
|
||||
return "", fmt.Errorf("failed to reserve address %q with no specific IP, err: %v", am.name, reserveErr)
|
||||
}
|
||||
|
||||
// Reserving the address failed due to a conflict or bad request. The address manager just checked that no address
|
||||
// exists with the name, so it may belong to the user.
|
||||
addr, err := am.svc.GetBetaRegionAddressByIP(am.region, am.targetIP)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get address by IP %q after reservation attempt, err: %q, reservation err: %q", am.targetIP, err, reserveErr)
|
||||
}
|
||||
|
||||
// Check that the address attributes are as required.
|
||||
if err := am.validateAddress(addr); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if am.isManagedAddress(addr) {
|
||||
// The address with this name is checked at the beginning of 'HoldAddress()', but for some reason
|
||||
// it was re-created by this point. May be possible that two controllers are running.
|
||||
glog.Warning("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
|
||||
} else {
|
||||
// If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it.
|
||||
glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description)
|
||||
am.tryRelease = false
|
||||
}
|
||||
|
||||
return addr.Address, nil
|
||||
}
|
||||
|
||||
func (am *addressManager) validateAddress(addr *computebeta.Address) error {
|
||||
if am.targetIP != "" && am.targetIP != addr.Address {
|
||||
return fmt.Errorf("address %q does not have the expected IP %q, actual: %q", addr.Name, am.targetIP, addr.Address)
|
||||
}
|
||||
if addr.AddressType != string(am.addressType) {
|
||||
return fmt.Errorf("address %q does not have the expected address type %q, actual: %q", addr.Name, am.addressType, addr.AddressType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *addressManager) isManagedAddress(addr *computebeta.Address) bool {
|
||||
return addr.Name == am.name
|
||||
}
|
||||
|
||||
func ensureAddressDeleted(svc CloudAddressService, name, region string) error {
|
||||
return ignoreNotFound(svc.DeleteRegionAddress(name, region))
|
||||
}
|
137
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager_test.go
generated
vendored
Normal file
137
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager_test.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
)
|
||||
|
||||
const testSvcName = "my-service"
|
||||
const testRegion = "us-central1"
|
||||
const testSubnet = "/projects/x/testRegions/us-central1/testSubnetworks/customsub"
|
||||
const testLBName = "a111111111111111"
|
||||
|
||||
// TestAddressManagerNoRequestedIP tests the typical case of passing in no requested IP
|
||||
func TestAddressManagerNoRequestedIP(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := ""
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
// TestAddressManagerBasic tests the typical case of reserving and unreserving an address.
|
||||
func TestAddressManagerBasic(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
// TestAddressManagerOrphaned tests the case where the address exists with the IP being equal
|
||||
// to the requested address (forwarding rule or loadbalancer IP).
|
||||
func TestAddressManagerOrphaned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: testLBName, Address: targetIP, AddressType: string(schemeInternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
// TestAddressManagerOutdatedOrphan tests the case where an address exists but points to
|
||||
// an IP other than the forwarding rule or loadbalancer IP.
|
||||
func TestAddressManagerOutdatedOrphan(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
previousAddress := "1.1.0.0"
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: testLBName, Address: previousAddress, AddressType: string(schemeExternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(schemeInternal))
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
// TestAddressManagerExternallyOwned tests the case where the address exists but isn't
|
||||
// owned by the controller.
|
||||
func TestAddressManagerExternallyOwned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeInternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
ipToUse, err := mgr.HoldAddress()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, ipToUse)
|
||||
|
||||
_, err = svc.GetRegionAddress(testLBName, testRegion)
|
||||
assert.True(t, isNotFound(err))
|
||||
|
||||
testReleaseAddress(t, mgr, svc, testLBName, testRegion)
|
||||
}
|
||||
|
||||
// TestAddressManagerExternallyOwned tests the case where the address exists but isn't
|
||||
// owned by the controller. However, this address has the wrong type.
|
||||
func TestAddressManagerBadExternallyOwned(t *testing.T) {
|
||||
svc := NewFakeCloudAddressService()
|
||||
targetIP := "1.1.1.1"
|
||||
|
||||
addr := &computebeta.Address{Name: "my-important-address", Address: targetIP, AddressType: string(schemeExternal)}
|
||||
err := svc.ReserveBetaRegionAddress(addr, testRegion)
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, schemeInternal)
|
||||
_, err = mgr.HoldAddress()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func testHoldAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, name, region, targetIP, scheme string) {
|
||||
ipToUse, err := mgr.HoldAddress()
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, ipToUse)
|
||||
|
||||
addr, err := svc.GetBetaRegionAddress(name, region)
|
||||
require.NoError(t, err)
|
||||
if targetIP != "" {
|
||||
assert.EqualValues(t, targetIP, addr.Address)
|
||||
}
|
||||
assert.EqualValues(t, scheme, addr.AddressType)
|
||||
}
|
||||
|
||||
func testReleaseAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, name, region string) {
|
||||
err := mgr.ReleaseAddress()
|
||||
require.NoError(t, err)
|
||||
_, err = svc.GetBetaRegionAddress(name, region)
|
||||
assert.True(t, isNotFound(err))
|
||||
}
|
190
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go
generated
vendored
Normal file
190
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func newAddressMetricContext(request, region string) *metricContext {
|
||||
return newAddressMetricContextWithVersion(request, region, computeV1Version)
|
||||
}
|
||||
|
||||
func newAddressMetricContextWithVersion(request, region, version string) *metricContext {
|
||||
return newGenericMetricContext("address", request, region, unusedMetricLabel, version)
|
||||
}
|
||||
|
||||
// ReserveGlobalAddress creates a global address.
|
||||
// Caller is allocated a random IP if they do not specify an ipAddress. If an
|
||||
// ipAddress is specified, it must belong to the current project, eg: an
|
||||
// ephemeral IP associated with a global forwarding rule.
|
||||
func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error {
|
||||
mc := newAddressMetricContext("reserve", "")
|
||||
op, err := gce.service.GlobalAddresses.Insert(gce.projectID, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// DeleteGlobalAddress deletes a global address by name.
|
||||
func (gce *GCECloud) DeleteGlobalAddress(name string) error {
|
||||
mc := newAddressMetricContext("delete", "")
|
||||
op, err := gce.service.GlobalAddresses.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// GetGlobalAddress returns the global address by name.
|
||||
func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) {
|
||||
mc := newAddressMetricContext("get", "")
|
||||
v, err := gce.service.GlobalAddresses.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ReserveRegionAddress creates a region address
|
||||
func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error {
|
||||
mc := newAddressMetricContext("reserve", region)
|
||||
op, err := gce.service.Addresses.Insert(gce.projectID, region, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// ReserveAlphaRegionAddress creates an Alpha, regional address.
|
||||
func (gce *GCECloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error {
|
||||
mc := newAddressMetricContextWithVersion("reserve", region, computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.Addresses.Insert(gce.projectID, region, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// ReserveBetaRegionAddress creates a beta region address
|
||||
func (gce *GCECloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error {
|
||||
mc := newAddressMetricContextWithVersion("reserve", region, computeBetaVersion)
|
||||
op, err := gce.serviceBeta.Addresses.Insert(gce.projectID, region, addr).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// DeleteRegionAddress deletes a region address by name.
|
||||
func (gce *GCECloud) DeleteRegionAddress(name, region string) error {
|
||||
mc := newAddressMetricContext("delete", region)
|
||||
op, err := gce.service.Addresses.Delete(gce.projectID, region, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// GetRegionAddress returns the region address by name
|
||||
func (gce *GCECloud) GetRegionAddress(name, region string) (*compute.Address, error) {
|
||||
mc := newAddressMetricContext("get", region)
|
||||
v, err := gce.service.Addresses.Get(gce.projectID, region, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaRegionAddress returns the Alpha, regional address by name.
|
||||
func (gce *GCECloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) {
|
||||
mc := newAddressMetricContextWithVersion("get", region, computeAlphaVersion)
|
||||
v, err := gce.serviceAlpha.Addresses.Get(gce.projectID, region, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetBetaRegionAddress returns the beta region address by name
|
||||
func (gce *GCECloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) {
|
||||
mc := newAddressMetricContextWithVersion("get", region, computeBetaVersion)
|
||||
v, err := gce.serviceBeta.Addresses.Get(gce.projectID, region, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionAddressByIP returns the regional address matching the given IP address.
|
||||
func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) {
|
||||
mc := newAddressMetricContext("list", region)
|
||||
addrs, err := gce.service.Addresses.List(gce.projectID, region).Filter("address eq " + ipAddress).Do()
|
||||
// Record the metrics for the call.
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(addrs.Items) > 1 {
|
||||
// We don't expect more than one match.
|
||||
addrsToPrint := []compute.Address{}
|
||||
for _, addr := range addrs.Items {
|
||||
addrsToPrint = append(addrsToPrint, *addr)
|
||||
}
|
||||
glog.Errorf("More than one addresses matching the IP %q: %+v", ipAddress, addrsToPrint)
|
||||
}
|
||||
for _, addr := range addrs.Items {
|
||||
if addr.Address == ipAddress {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
return nil, makeGoogleAPINotFoundError(fmt.Sprintf("Address with IP %q was not found in region %q", ipAddress, region))
|
||||
}
|
||||
|
||||
// GetBetaRegionAddressByIP returns the beta regional address matching the given IP address.
|
||||
func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) {
|
||||
mc := newAddressMetricContext("list", region)
|
||||
addrs, err := gce.serviceBeta.Addresses.List(gce.projectID, region).Filter("address eq " + ipAddress).Do()
|
||||
// Record the metrics for the call.
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(addrs.Items) > 1 {
|
||||
// We don't expect more than one match.
|
||||
addrsToPrint := []computebeta.Address{}
|
||||
for _, addr := range addrs.Items {
|
||||
addrsToPrint = append(addrsToPrint, *addr)
|
||||
}
|
||||
glog.Errorf("More than one addresses matching the IP %q: %+v", ipAddress, addrsToPrint)
|
||||
}
|
||||
for _, addr := range addrs.Items {
|
||||
if addr.Address == ipAddress {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
return nil, makeGoogleAPINotFoundError(fmt.Sprintf("Address with IP %q was not found in region %q", ipAddress, region))
|
||||
}
|
||||
|
||||
// TODO(#51665): retire this function once Network Tiers becomes Beta in GCP.
|
||||
func (gce *GCECloud) getNetworkTierFromAddress(name, region string) (string, error) {
|
||||
if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) {
|
||||
return NetworkTierDefault.ToGCEValue(), nil
|
||||
}
|
||||
addr, err := gce.GetAlphaRegionAddress(name, region)
|
||||
if err != nil {
|
||||
return handleAlphaNetworkTierGetError(err)
|
||||
}
|
||||
return addr.NetworkTier, nil
|
||||
}
|
237
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go
generated
vendored
Normal file
237
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// test
|
||||
|
||||
type FakeCloudAddressService struct {
|
||||
count int
|
||||
// reservedAddrs tracks usage of IP addresses
|
||||
// Key is the IP address as a string
|
||||
reservedAddrs map[string]bool
|
||||
// addrsByRegionAndName
|
||||
// Outer key is for region string; inner key is for address name.
|
||||
addrsByRegionAndName map[string]map[string]*computealpha.Address
|
||||
}
|
||||
|
||||
// FakeCloudAddressService Implements CloudAddressService
|
||||
var _ CloudAddressService = &FakeCloudAddressService{}
|
||||
|
||||
func NewFakeCloudAddressService() *FakeCloudAddressService {
|
||||
return &FakeCloudAddressService{
|
||||
reservedAddrs: make(map[string]bool),
|
||||
addrsByRegionAndName: make(map[string]map[string]*computealpha.Address),
|
||||
}
|
||||
}
|
||||
|
||||
// SetRegionalAddresses sets the addresses of ther region. This is used for
|
||||
// setting the test environment.
|
||||
func (cas *FakeCloudAddressService) SetRegionalAddresses(region string, addrs []*computealpha.Address) {
|
||||
// Reset addresses in the region.
|
||||
cas.addrsByRegionAndName[region] = make(map[string]*computealpha.Address)
|
||||
|
||||
for _, addr := range addrs {
|
||||
cas.reservedAddrs[addr.Address] = true
|
||||
cas.addrsByRegionAndName[region][addr.Name] = addr
|
||||
}
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error {
|
||||
if addr.Address == "" {
|
||||
addr.Address = fmt.Sprintf("1.2.3.%d", cas.count)
|
||||
cas.count++
|
||||
}
|
||||
|
||||
if addr.AddressType == "" {
|
||||
addr.AddressType = string(schemeExternal)
|
||||
}
|
||||
|
||||
if cas.reservedAddrs[addr.Address] {
|
||||
msg := "IP in use"
|
||||
// When the IP is already in use, this call returns an error code based
|
||||
// on the type (internal vs external) of the address. This is to be
|
||||
// consistent with actual GCE API.
|
||||
switch lbScheme(addr.AddressType) {
|
||||
case schemeExternal:
|
||||
return makeGoogleAPIError(http.StatusBadRequest, msg)
|
||||
default:
|
||||
return makeGoogleAPIError(http.StatusConflict, msg)
|
||||
}
|
||||
}
|
||||
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
cas.addrsByRegionAndName[region] = make(map[string]*computealpha.Address)
|
||||
}
|
||||
|
||||
if _, exists := cas.addrsByRegionAndName[region][addr.Name]; exists {
|
||||
return makeGoogleAPIError(http.StatusConflict, "name in use")
|
||||
}
|
||||
|
||||
cas.addrsByRegionAndName[region][addr.Name] = addr
|
||||
cas.reservedAddrs[addr.Address] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error {
|
||||
alphaAddr := convertToAlphaAddress(addr)
|
||||
return cas.ReserveAlphaRegionAddress(alphaAddr, region)
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) ReserveRegionAddress(addr *compute.Address, region string) error {
|
||||
alphaAddr := convertToAlphaAddress(addr)
|
||||
return cas.ReserveAlphaRegionAddress(alphaAddr, region)
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) {
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
if addr, exists := cas.addrsByRegionAndName[region][name]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
} else {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddress(name, region)
|
||||
if addr != nil {
|
||||
return convertToBetaAddress(addr), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetRegionAddress(name, region string) (*compute.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddress(name, region)
|
||||
if addr != nil {
|
||||
return convertToV1Address(addr), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) DeleteRegionAddress(name, region string) error {
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
return makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
addr, exists := cas.addrsByRegionAndName[region][name]
|
||||
if !exists {
|
||||
return makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
delete(cas.reservedAddrs, addr.Address)
|
||||
delete(cas.addrsByRegionAndName[region], name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetAlphaRegionAddressByIP(region, ipAddress string) (*computealpha.Address, error) {
|
||||
if _, exists := cas.addrsByRegionAndName[region]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
for _, addr := range cas.addrsByRegionAndName[region] {
|
||||
if addr.Address == ipAddress {
|
||||
return addr, nil
|
||||
}
|
||||
}
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetBetaRegionAddressByIP(name, region string) (*computebeta.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddressByIP(name, region)
|
||||
if addr != nil {
|
||||
return convertToBetaAddress(addr), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) GetRegionAddressByIP(name, region string) (*compute.Address, error) {
|
||||
addr, err := cas.GetAlphaRegionAddressByIP(name, region)
|
||||
if addr != nil {
|
||||
return convertToV1Address(addr), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) getNetworkTierFromAddress(name, region string) (string, error) {
|
||||
addr, err := cas.GetAlphaRegionAddress(name, region)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return addr.NetworkTier, nil
|
||||
}
|
||||
|
||||
func convertToV1Address(object gceObject) *compute.Address {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var addr compute.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to v1 address: %v", object, err))
|
||||
}
|
||||
return &addr
|
||||
}
|
||||
|
||||
func convertToAlphaAddress(object gceObject) *computealpha.Address {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var addr computealpha.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to alpha address: %v", object, err))
|
||||
}
|
||||
// Set the default values for the Alpha fields.
|
||||
addr.NetworkTier = NetworkTierDefault.ToGCEValue()
|
||||
return &addr
|
||||
}
|
||||
|
||||
func convertToBetaAddress(object gceObject) *computebeta.Address {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var addr computebeta.Address
|
||||
if err := json.Unmarshal(enc, &addr); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to beta address: %v", object, err))
|
||||
}
|
||||
return &addr
|
||||
}
|
||||
|
||||
func (cas *FakeCloudAddressService) String() string {
|
||||
var b bytes.Buffer
|
||||
for region, regAddresses := range cas.addrsByRegionAndName {
|
||||
b.WriteString(fmt.Sprintf("%v:\n", region))
|
||||
for name, addr := range regAddresses {
|
||||
b.WriteString(fmt.Sprintf(" %v: %v\n", name, addr.Address))
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
70
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// alpha: v1.8 (for Services)
|
||||
//
|
||||
// Allows Services backed by a GCP load balancer to choose what network
|
||||
// tier to use. Currently supports "Standard" and "Premium" (default).
|
||||
AlphaFeatureNetworkTiers = "NetworkTiers"
|
||||
|
||||
AlphaFeatureGCEDisk = "DiskAlphaAPI"
|
||||
|
||||
AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup"
|
||||
)
|
||||
|
||||
// All known alpha features
|
||||
var knownAlphaFeatures = map[string]bool{
|
||||
AlphaFeatureNetworkTiers: true,
|
||||
AlphaFeatureGCEDisk: true,
|
||||
AlphaFeatureNetworkEndpointGroup: true,
|
||||
}
|
||||
|
||||
type AlphaFeatureGate struct {
|
||||
features map[string]bool
|
||||
}
|
||||
|
||||
func (af *AlphaFeatureGate) Enabled(key string) bool {
|
||||
return af.features[key]
|
||||
}
|
||||
|
||||
func NewAlphaFeatureGate(features []string) (*AlphaFeatureGate, error) {
|
||||
errList := []error{}
|
||||
featureMap := make(map[string]bool)
|
||||
for _, name := range features {
|
||||
if _, ok := knownAlphaFeatures[name]; !ok {
|
||||
errList = append(errList, fmt.Errorf("alpha feature %q is not supported.", name))
|
||||
} else {
|
||||
featureMap[name] = true
|
||||
}
|
||||
}
|
||||
return &AlphaFeatureGate{featureMap}, utilerrors.NewAggregate(errList)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) alphaFeatureEnabled(feature string) error {
|
||||
if !gce.AlphaFeatureGate.Enabled(feature) {
|
||||
return fmt.Errorf("alpha feature %q is not enabled.", feature)
|
||||
}
|
||||
return nil
|
||||
}
|
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type LoadBalancerType string
|
||||
type NetworkTier string
|
||||
|
||||
const (
|
||||
// ServiceAnnotationLoadBalancerType is annotated on a service with type LoadBalancer
|
||||
// dictates what specific kind of GCP LB should be assembled.
|
||||
// Currently, only "internal" is supported.
|
||||
ServiceAnnotationLoadBalancerType = "cloud.google.com/load-balancer-type"
|
||||
|
||||
LBTypeInternal LoadBalancerType = "Internal"
|
||||
// Deprecating the lowercase spelling of Internal.
|
||||
deprecatedTypeInternalLowerCase LoadBalancerType = "internal"
|
||||
|
||||
// ServiceAnnotationInternalBackendShare is annotated on a service with "true" when users
|
||||
// want to share GCP Backend Services for a set of internal load balancers.
|
||||
// ALPHA feature - this may be removed in a future release.
|
||||
ServiceAnnotationILBBackendShare = "alpha.cloud.google.com/load-balancer-backend-share"
|
||||
// This annotation did not correctly specify "alpha", so both annotations will be checked.
|
||||
deprecatedServiceAnnotationILBBackendShare = "cloud.google.com/load-balancer-backend-share"
|
||||
|
||||
// NetworkTierAnnotationKey is annotated on a Service object to indicate which
|
||||
// network tier a GCP LB should use. The valid values are "Standard" and
|
||||
// "Premium" (default).
|
||||
NetworkTierAnnotationKey = "cloud.google.com/network-tier"
|
||||
NetworkTierAnnotationStandard = "Standard"
|
||||
NetworkTierAnnotationPremium = "Premium"
|
||||
|
||||
NetworkTierStandard NetworkTier = NetworkTierAnnotationStandard
|
||||
NetworkTierPremium NetworkTier = NetworkTierAnnotationPremium
|
||||
NetworkTierDefault NetworkTier = NetworkTierPremium
|
||||
)
|
||||
|
||||
// GetLoadBalancerAnnotationType returns the type of GCP load balancer which should be assembled.
|
||||
func GetLoadBalancerAnnotationType(service *v1.Service) (LoadBalancerType, bool) {
|
||||
v := LoadBalancerType("")
|
||||
if service.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
return v, false
|
||||
}
|
||||
|
||||
l, ok := service.Annotations[ServiceAnnotationLoadBalancerType]
|
||||
v = LoadBalancerType(l)
|
||||
if !ok {
|
||||
return v, false
|
||||
}
|
||||
|
||||
switch v {
|
||||
case LBTypeInternal, deprecatedTypeInternalLowerCase:
|
||||
return LBTypeInternal, true
|
||||
default:
|
||||
return v, false
|
||||
}
|
||||
}
|
||||
|
||||
// GetLoadBalancerAnnotationBackendShare returns whether this service's backend service should be
|
||||
// shared with other load balancers. Health checks and the healthcheck firewall will be shared regardless.
|
||||
func GetLoadBalancerAnnotationBackendShare(service *v1.Service) bool {
|
||||
if l, exists := service.Annotations[ServiceAnnotationILBBackendShare]; exists && l == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for deprecated annotation key
|
||||
if l, exists := service.Annotations[deprecatedServiceAnnotationILBBackendShare]; exists && l == "true" {
|
||||
glog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetServiceNetworkTier returns the network tier of GCP load balancer
|
||||
// which should be assembled, and an error if the specified tier is not
|
||||
// supported.
|
||||
func GetServiceNetworkTier(service *v1.Service) (NetworkTier, error) {
|
||||
l, ok := service.Annotations[NetworkTierAnnotationKey]
|
||||
if !ok {
|
||||
return NetworkTierDefault, nil
|
||||
}
|
||||
|
||||
v := NetworkTier(l)
|
||||
switch v {
|
||||
case NetworkTierStandard:
|
||||
fallthrough
|
||||
case NetworkTierPremium:
|
||||
return v, nil
|
||||
default:
|
||||
return NetworkTierDefault, fmt.Errorf("unsupported network tier: %q", v)
|
||||
}
|
||||
}
|
||||
|
||||
// ToGCEValue converts NetworkTier to a string that we can populate the
|
||||
// NetworkTier field of GCE objects.
|
||||
func (n NetworkTier) ToGCEValue() string {
|
||||
return strings.ToUpper(string(n))
|
||||
}
|
||||
|
||||
// NetworkTierGCEValueToType converts the value of the NetworkTier field of a
|
||||
// GCE object to the NetworkTier type.
|
||||
func NetworkTierGCEValueToType(s string) NetworkTier {
|
||||
switch s {
|
||||
case "STANDARD":
|
||||
return NetworkTierStandard
|
||||
case "PREMIUM":
|
||||
return NetworkTierPremium
|
||||
default:
|
||||
return NetworkTier(s)
|
||||
}
|
||||
}
|
70
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations_test.go
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations_test.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestServiceNetworkTierAnnotationKey(t *testing.T) {
|
||||
createTestService := func() *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "randome-uid",
|
||||
Name: "test-svc",
|
||||
Namespace: "test-ns",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for testName, testCase := range map[string]struct {
|
||||
annotations map[string]string
|
||||
expectedTier NetworkTier
|
||||
expectErr bool
|
||||
}{
|
||||
"Use the default when the annotation does not exist": {
|
||||
annotations: nil,
|
||||
expectedTier: NetworkTierDefault,
|
||||
},
|
||||
"Standard tier": {
|
||||
annotations: map[string]string{NetworkTierAnnotationKey: "Standard"},
|
||||
expectedTier: NetworkTierStandard,
|
||||
},
|
||||
"Premium tier": {
|
||||
annotations: map[string]string{NetworkTierAnnotationKey: "Premium"},
|
||||
expectedTier: NetworkTierPremium,
|
||||
},
|
||||
"Report an error on invalid network tier value": {
|
||||
annotations: map[string]string{NetworkTierAnnotationKey: "Unknown-tier"},
|
||||
expectedTier: NetworkTierPremium,
|
||||
expectErr: true,
|
||||
},
|
||||
} {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
svc := createTestService()
|
||||
svc.Annotations = testCase.annotations
|
||||
actualTier, err := GetServiceNetworkTier(svc)
|
||||
assert.Equal(t, testCase.expectedTier, actualTier)
|
||||
assert.Equal(t, testCase.expectErr, err != nil)
|
||||
})
|
||||
}
|
||||
}
|
183
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
Normal file
183
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func newBackendServiceMetricContext(request, region string) *metricContext {
|
||||
return newBackendServiceMetricContextWithVersion(request, region, computeV1Version)
|
||||
}
|
||||
|
||||
func newBackendServiceMetricContextWithVersion(request, region, version string) *metricContext {
|
||||
return newGenericMetricContext("backendservice", request, region, unusedMetricLabel, version)
|
||||
}
|
||||
|
||||
// GetGlobalBackendService retrieves a backend by name.
|
||||
func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendService, error) {
|
||||
mc := newBackendServiceMetricContext("get", "")
|
||||
v, err := gce.service.BackendServices.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaGlobalBackendService retrieves alpha backend by name.
|
||||
func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) {
|
||||
mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion)
|
||||
v, err := gce.serviceAlpha.BackendServices.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateGlobalBackendService applies the given BackendService as an update to an existing service.
|
||||
func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) error {
|
||||
mc := newBackendServiceMetricContext("update", "")
|
||||
op, err := gce.service.BackendServices.Update(gce.projectID, bg.Name, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// UpdateAlphaGlobalBackendService applies the given alpha BackendService as an update to an existing service.
|
||||
func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
mc := newBackendServiceMetricContextWithVersion("update", "", computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.BackendServices.Update(gce.projectID, bg.Name, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// DeleteGlobalBackendService deletes the given BackendService by name.
|
||||
func (gce *GCECloud) DeleteGlobalBackendService(name string) error {
|
||||
mc := newBackendServiceMetricContext("delete", "")
|
||||
op, err := gce.service.BackendServices.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil
|
||||
}
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// CreateGlobalBackendService creates the given BackendService.
|
||||
func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) error {
|
||||
mc := newBackendServiceMetricContext("create", "")
|
||||
op, err := gce.service.BackendServices.Insert(gce.projectID, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// CreateAlphaGlobalBackendService creates the given alpha BackendService.
|
||||
func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
mc := newBackendServiceMetricContextWithVersion("create", "", computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.BackendServices.Insert(gce.projectID, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// ListGlobalBackendServices lists all backend services in the project.
|
||||
func (gce *GCECloud) ListGlobalBackendServices() (*compute.BackendServiceList, error) {
|
||||
mc := newBackendServiceMetricContext("list", "")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.BackendServices.List(gce.projectID).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetGlobalBackendServiceHealth returns the health of the BackendService identified by the given
|
||||
// name, in the given instanceGroup. The instanceGroupLink is the fully
|
||||
// qualified self link of an instance group.
|
||||
func (gce *GCECloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
mc := newBackendServiceMetricContext("get_health", "")
|
||||
groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink}
|
||||
v, err := gce.service.BackendServices.GetHealth(gce.projectID, name, groupRef).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionBackendService retrieves a backend by name.
|
||||
func (gce *GCECloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) {
|
||||
mc := newBackendServiceMetricContext("get", region)
|
||||
v, err := gce.service.RegionBackendServices.Get(gce.projectID, region, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateRegionBackendService applies the given BackendService as an update to an existing service.
|
||||
func (gce *GCECloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error {
|
||||
mc := newBackendServiceMetricContext("update", region)
|
||||
op, err := gce.service.RegionBackendServices.Update(gce.projectID, region, bg.Name, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// DeleteRegionBackendService deletes the given BackendService by name.
|
||||
func (gce *GCECloud) DeleteRegionBackendService(name, region string) error {
|
||||
mc := newBackendServiceMetricContext("delete", region)
|
||||
op, err := gce.service.RegionBackendServices.Delete(gce.projectID, region, name).Do()
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil
|
||||
}
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// CreateRegionBackendService creates the given BackendService.
|
||||
func (gce *GCECloud) CreateRegionBackendService(bg *compute.BackendService, region string) error {
|
||||
mc := newBackendServiceMetricContext("create", region)
|
||||
op, err := gce.service.RegionBackendServices.Insert(gce.projectID, region, bg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// ListRegionBackendServices lists all backend services in the project.
|
||||
func (gce *GCECloud) ListRegionBackendServices(region string) (*compute.BackendServiceList, error) {
|
||||
mc := newBackendServiceMetricContext("list", region)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.RegionBackendServices.List(gce.projectID, region).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionalBackendServiceHealth returns the health of the BackendService identified by the given
|
||||
// name, in the given instanceGroup. The instanceGroupLink is the fully
|
||||
// qualified self link of an instance group.
|
||||
func (gce *GCECloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
mc := newBackendServiceMetricContext("get_health", region)
|
||||
groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink}
|
||||
v, err := gce.service.RegionBackendServices.GetHealth(gce.projectID, region, name, groupRef).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
74
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go
generated
vendored
Normal file
74
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func newCertMetricContext(request string) *metricContext {
|
||||
return newGenericMetricContext("cert", request, unusedMetricLabel, unusedMetricLabel, computeV1Version)
|
||||
}
|
||||
|
||||
// GetSslCertificate returns the SslCertificate by name.
|
||||
func (gce *GCECloud) GetSslCertificate(name string) (*compute.SslCertificate, error) {
|
||||
mc := newCertMetricContext("get")
|
||||
v, err := gce.service.SslCertificates.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateSslCertificate creates and returns a SslCertificate.
|
||||
func (gce *GCECloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) {
|
||||
mc := newCertMetricContext("create")
|
||||
op, err := gce.service.SslCertificates.Insert(gce.projectID, sslCerts).Do()
|
||||
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
|
||||
if err = gce.waitForGlobalOp(op, mc); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.GetSslCertificate(sslCerts.Name)
|
||||
}
|
||||
|
||||
// DeleteSslCertificate deletes the SslCertificate by name.
|
||||
func (gce *GCECloud) DeleteSslCertificate(name string) error {
|
||||
mc := newCertMetricContext("delete")
|
||||
op, err := gce.service.SslCertificates.Delete(gce.projectID, name).Do()
|
||||
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// ListSslCertificates lists all SslCertificates in the project.
|
||||
func (gce *GCECloud) ListSslCertificates() (*compute.SslCertificateList, error) {
|
||||
mc := newCertMetricContext("list")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.SslCertificates.List(gce.projectID).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
258
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go
generated
vendored
Normal file
258
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
// Key used to persist UIDs to configmaps.
|
||||
UIDConfigMapName = "ingress-uid"
|
||||
// Namespace which contains the above config map
|
||||
UIDNamespace = metav1.NamespaceSystem
|
||||
// Data keys for the specific ids
|
||||
UIDCluster = "uid"
|
||||
UIDProvider = "provider-uid"
|
||||
UIDLengthBytes = 8
|
||||
// Frequency of the updateFunc event handler being called
|
||||
// This does not actually query the apiserver for current state - the local cache value is used.
|
||||
updateFuncFrequency = 10 * time.Minute
|
||||
)
|
||||
|
||||
type ClusterID struct {
|
||||
idLock sync.RWMutex
|
||||
client clientset.Interface
|
||||
cfgMapKey string
|
||||
store cache.Store
|
||||
providerID *string
|
||||
clusterID *string
|
||||
}
|
||||
|
||||
// Continually watches for changes to the cluster id config map
|
||||
func (gce *GCECloud) watchClusterID() {
|
||||
gce.ClusterID = ClusterID{
|
||||
cfgMapKey: fmt.Sprintf("%v/%v", UIDNamespace, UIDConfigMapName),
|
||||
client: gce.client,
|
||||
}
|
||||
|
||||
mapEventHandler := cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
m, ok := obj.(*v1.ConfigMap)
|
||||
if !ok || m == nil {
|
||||
glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok)
|
||||
return
|
||||
}
|
||||
if m.Namespace != UIDNamespace ||
|
||||
m.Name != UIDConfigMapName {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data)
|
||||
gce.ClusterID.update(m)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
m, ok := cur.(*v1.ConfigMap)
|
||||
if !ok || m == nil {
|
||||
glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok)
|
||||
return
|
||||
}
|
||||
|
||||
if m.Namespace != UIDNamespace ||
|
||||
m.Name != UIDConfigMapName {
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(old, cur) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data)
|
||||
gce.ClusterID.update(m)
|
||||
},
|
||||
}
|
||||
|
||||
listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything())
|
||||
var controller cache.Controller
|
||||
gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler)
|
||||
|
||||
controller.Run(nil)
|
||||
}
|
||||
|
||||
// GetID returns the id which is unique to this cluster
|
||||
// if federated, return the provider id (unique to the cluster)
|
||||
// if not federated, return the cluster id
|
||||
func (ci *ClusterID) GetID() (string, error) {
|
||||
if err := ci.getOrInitialize(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ci.idLock.RLock()
|
||||
defer ci.idLock.RUnlock()
|
||||
if ci.clusterID == nil {
|
||||
return "", errors.New("Could not retrieve cluster id")
|
||||
}
|
||||
|
||||
// If provider ID is set, (Federation is enabled) use this field
|
||||
if ci.providerID != nil {
|
||||
return *ci.providerID, nil
|
||||
}
|
||||
|
||||
// providerID is not set, use the cluster id
|
||||
return *ci.clusterID, nil
|
||||
}
|
||||
|
||||
// GetFederationId returns the id which could represent the entire Federation
|
||||
// or just the cluster if not federated.
|
||||
func (ci *ClusterID) GetFederationId() (string, bool, error) {
|
||||
if err := ci.getOrInitialize(); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
ci.idLock.RLock()
|
||||
defer ci.idLock.RUnlock()
|
||||
if ci.clusterID == nil {
|
||||
return "", false, errors.New("Could not retrieve cluster id")
|
||||
}
|
||||
|
||||
// If provider ID is not set, return false
|
||||
if ci.providerID == nil || *ci.clusterID == *ci.providerID {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
return *ci.clusterID, true, nil
|
||||
}
|
||||
|
||||
// getOrInitialize either grabs the configmaps current value or defines the value
|
||||
// and sets the configmap. This is for the case of the user calling GetClusterID()
|
||||
// before the watch has begun.
|
||||
func (ci *ClusterID) getOrInitialize() error {
|
||||
if ci.store == nil {
|
||||
return errors.New("GCECloud.ClusterID is not ready. Call Initialize() before using.")
|
||||
}
|
||||
|
||||
if ci.clusterID != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
exists, err := ci.getConfigMap()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The configmap does not exist - let's try creating one.
|
||||
newId, err := makeUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Creating clusteriD: %v", newId)
|
||||
cfg := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: UIDConfigMapName,
|
||||
Namespace: UIDNamespace,
|
||||
},
|
||||
}
|
||||
cfg.Data = map[string]string{
|
||||
UIDCluster: newId,
|
||||
UIDProvider: newId,
|
||||
}
|
||||
|
||||
if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil {
|
||||
glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Created a config map containing clusteriD: %v", newId)
|
||||
ci.update(cfg)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ci *ClusterID) getConfigMap() (bool, error) {
|
||||
item, exists, err := ci.store.GetByKey(ci.cfgMapKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !exists {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
m, ok := item.(*v1.ConfigMap)
|
||||
if !ok || m == nil {
|
||||
err = fmt.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", item, ok)
|
||||
glog.Error(err)
|
||||
return false, err
|
||||
}
|
||||
ci.update(m)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (ci *ClusterID) update(m *v1.ConfigMap) {
|
||||
ci.idLock.Lock()
|
||||
defer ci.idLock.Unlock()
|
||||
if clusterID, exists := m.Data[UIDCluster]; exists {
|
||||
ci.clusterID = &clusterID
|
||||
}
|
||||
if provId, exists := m.Data[UIDProvider]; exists {
|
||||
ci.providerID = &provId
|
||||
}
|
||||
}
|
||||
|
||||
func makeUID() (string, error) {
|
||||
b := make([]byte, UIDLengthBytes)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
func newSingleObjectListerWatcher(lw cache.ListerWatcher, objectName string) *singleObjListerWatcher {
|
||||
return &singleObjListerWatcher{lw: lw, objectName: objectName}
|
||||
}
|
||||
|
||||
type singleObjListerWatcher struct {
|
||||
lw cache.ListerWatcher
|
||||
objectName string
|
||||
}
|
||||
|
||||
func (sow *singleObjListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = "metadata.name=" + sow.objectName
|
||||
return sow.lw.List(options)
|
||||
}
|
||||
|
||||
func (sow *singleObjListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = "metadata.name=" + sow.objectName
|
||||
return sow.lw.Watch(options)
|
||||
}
|
55
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
func newClustersMetricContext(request, zone string) *metricContext {
|
||||
return newGenericMetricContext("clusters", request, unusedMetricLabel, zone, computeV1Version)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListClusters() ([]string, error) {
|
||||
allClusters := []string{}
|
||||
|
||||
for _, zone := range gce.managedZones {
|
||||
clusters, err := gce.listClustersInZone(zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: Scoping? Do we need to qualify the cluster name?
|
||||
allClusters = append(allClusters, clusters...)
|
||||
}
|
||||
|
||||
return allClusters, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) Master(clusterName string) (string, error) {
|
||||
return "k8s-" + clusterName + "-master.internal", nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) {
|
||||
mc := newClustersMetricContext("list_zone", zone)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
list, err := gce.containerService.Projects.Zones.Clusters.List(gce.projectID, zone).Do()
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
|
||||
result := []string{}
|
||||
for _, cluster := range list.Clusters {
|
||||
result = append(result, cluster.Name)
|
||||
}
|
||||
return result, mc.Observe(nil)
|
||||
}
|
1124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
Normal file
1124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1009
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks_test.go
generated
vendored
Normal file
1009
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
64
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func newFirewallMetricContext(request string) *metricContext {
|
||||
return newGenericMetricContext("firewall", request, unusedMetricLabel, unusedMetricLabel, computeV1Version)
|
||||
}
|
||||
|
||||
// GetFirewall returns the Firewall by name.
|
||||
func (gce *GCECloud) GetFirewall(name string) (*compute.Firewall, error) {
|
||||
mc := newFirewallMetricContext("get")
|
||||
v, err := gce.service.Firewalls.Get(gce.NetworkProjectID(), name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateFirewall creates the passed firewall
|
||||
func (gce *GCECloud) CreateFirewall(f *compute.Firewall) error {
|
||||
mc := newFirewallMetricContext("create")
|
||||
op, err := gce.service.Firewalls.Insert(gce.NetworkProjectID(), f).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOpInProject(op, gce.NetworkProjectID(), mc)
|
||||
}
|
||||
|
||||
// DeleteFirewall deletes the given firewall rule.
|
||||
func (gce *GCECloud) DeleteFirewall(name string) error {
|
||||
mc := newFirewallMetricContext("delete")
|
||||
op, err := gce.service.Firewalls.Delete(gce.NetworkProjectID(), name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForGlobalOpInProject(op, gce.NetworkProjectID(), mc)
|
||||
}
|
||||
|
||||
// UpdateFirewall applies the given firewall as an update to an existing service.
|
||||
func (gce *GCECloud) UpdateFirewall(f *compute.Firewall) error {
|
||||
mc := newFirewallMetricContext("update")
|
||||
op, err := gce.service.Firewalls.Update(gce.NetworkProjectID(), f.Name, f).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOpInProject(op, gce.NetworkProjectID(), mc)
|
||||
}
|
155
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go
generated
vendored
Normal file
155
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func newForwardingRuleMetricContext(request, region string) *metricContext {
|
||||
return newForwardingRuleMetricContextWithVersion(request, region, computeV1Version)
|
||||
}
|
||||
func newForwardingRuleMetricContextWithVersion(request, region, version string) *metricContext {
|
||||
return newGenericMetricContext("forwardingrule", request, region, unusedMetricLabel, version)
|
||||
}
|
||||
|
||||
// CreateGlobalForwardingRule creates the passed GlobalForwardingRule
|
||||
func (gce *GCECloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error {
|
||||
mc := newForwardingRuleMetricContext("create", "")
|
||||
op, err := gce.service.GlobalForwardingRules.Insert(gce.projectID, rule).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// SetProxyForGlobalForwardingRule links the given TargetHttp(s)Proxy with the given GlobalForwardingRule.
|
||||
// targetProxyLink is the SelfLink of a TargetHttp(s)Proxy.
|
||||
func (gce *GCECloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error {
|
||||
mc := newForwardingRuleMetricContext("set_proxy", "")
|
||||
op, err := gce.service.GlobalForwardingRules.SetTarget(
|
||||
gce.projectID, forwardingRuleName, &compute.TargetReference{Target: targetProxyLink}).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// DeleteGlobalForwardingRule deletes the GlobalForwardingRule by name.
|
||||
func (gce *GCECloud) DeleteGlobalForwardingRule(name string) error {
|
||||
mc := newForwardingRuleMetricContext("delete", "")
|
||||
op, err := gce.service.GlobalForwardingRules.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// GetGlobalForwardingRule returns the GlobalForwardingRule by name.
|
||||
func (gce *GCECloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) {
|
||||
mc := newForwardingRuleMetricContext("get", "")
|
||||
v, err := gce.service.GlobalForwardingRules.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListGlobalForwardingRules lists all GlobalForwardingRules in the project.
|
||||
func (gce *GCECloud) ListGlobalForwardingRules() (*compute.ForwardingRuleList, error) {
|
||||
mc := newForwardingRuleMetricContext("list", "")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.GlobalForwardingRules.List(gce.projectID).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetRegionForwardingRule returns the RegionalForwardingRule by name & region.
|
||||
func (gce *GCECloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) {
|
||||
mc := newForwardingRuleMetricContext("get", region)
|
||||
v, err := gce.service.ForwardingRules.Get(gce.projectID, region, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaRegionForwardingRule returns the Alpha forwarding rule by name & region.
|
||||
func (gce *GCECloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) {
|
||||
mc := newForwardingRuleMetricContextWithVersion("get", region, computeAlphaVersion)
|
||||
v, err := gce.serviceAlpha.ForwardingRules.Get(gce.projectID, region, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListRegionForwardingRules lists all RegionalForwardingRules in the project & region.
|
||||
func (gce *GCECloud) ListRegionForwardingRules(region string) (*compute.ForwardingRuleList, error) {
|
||||
mc := newForwardingRuleMetricContext("list", region)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.ForwardingRules.List(gce.projectID, region).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListRegionForwardingRules lists all RegionalForwardingRules in the project & region.
|
||||
func (gce *GCECloud) ListAlphaRegionForwardingRules(region string) (*computealpha.ForwardingRuleList, error) {
|
||||
mc := newForwardingRuleMetricContextWithVersion("list", region, computeAlphaVersion)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.serviceAlpha.ForwardingRules.List(gce.projectID, region).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// CreateRegionForwardingRule creates and returns a
|
||||
// RegionalForwardingRule that points to the given BackendService
|
||||
func (gce *GCECloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error {
|
||||
mc := newForwardingRuleMetricContext("create", region)
|
||||
op, err := gce.service.ForwardingRules.Insert(gce.projectID, region, rule).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// CreateAlphaRegionForwardingRule creates and returns an Alpha
|
||||
// forwarding fule in the given region.
|
||||
func (gce *GCECloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error {
|
||||
mc := newForwardingRuleMetricContextWithVersion("create", region, computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.ForwardingRules.Insert(gce.projectID, region, rule).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// DeleteRegionForwardingRule deletes the RegionalForwardingRule by name & region.
|
||||
func (gce *GCECloud) DeleteRegionForwardingRule(name, region string) error {
|
||||
mc := newForwardingRuleMetricContext("delete", region)
|
||||
op, err := gce.service.ForwardingRules.Delete(gce.projectID, region, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForRegionOp(op, region, mc)
|
||||
}
|
||||
|
||||
// TODO(#51665): retire this function once Network Tiers becomes Beta in GCP.
|
||||
func (gce *GCECloud) getNetworkTierFromForwardingRule(name, region string) (string, error) {
|
||||
if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) {
|
||||
return NetworkTierDefault.ToGCEValue(), nil
|
||||
}
|
||||
fwdRule, err := gce.GetAlphaRegionForwardingRule(name, region)
|
||||
if err != nil {
|
||||
return handleAlphaNetworkTierGetError(err)
|
||||
}
|
||||
return fwdRule.NetworkTier, nil
|
||||
}
|
138
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule_fakes.go
generated
vendored
Normal file
138
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule_fakes.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
type FakeCloudForwardingRuleService struct {
|
||||
// fwdRulesByRegionAndName
|
||||
// Outer key is for region string; inner key is for fwdRuleess name.
|
||||
fwdRulesByRegionAndName map[string]map[string]*computealpha.ForwardingRule
|
||||
}
|
||||
|
||||
// FakeCloudForwardingRuleService Implements CloudForwardingRuleService
|
||||
var _ CloudForwardingRuleService = &FakeCloudForwardingRuleService{}
|
||||
|
||||
func NewFakeCloudForwardingRuleService() *FakeCloudForwardingRuleService {
|
||||
return &FakeCloudForwardingRuleService{
|
||||
fwdRulesByRegionAndName: make(map[string]map[string]*computealpha.ForwardingRule),
|
||||
}
|
||||
}
|
||||
|
||||
// SetRegionalForwardingRulees sets the fwdRuleesses of ther region. This is used for
|
||||
// setting the test environment.
|
||||
func (f *FakeCloudForwardingRuleService) SetRegionalForwardingRulees(region string, fwdRules []*computealpha.ForwardingRule) {
|
||||
// Reset fwdRuleesses in the region.
|
||||
f.fwdRulesByRegionAndName[region] = make(map[string]*computealpha.ForwardingRule)
|
||||
|
||||
for _, fwdRule := range fwdRules {
|
||||
f.fwdRulesByRegionAndName[region][fwdRule.Name] = fwdRule
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeCloudForwardingRuleService) CreateAlphaRegionForwardingRule(fwdRule *computealpha.ForwardingRule, region string) error {
|
||||
if _, exists := f.fwdRulesByRegionAndName[region]; !exists {
|
||||
f.fwdRulesByRegionAndName[region] = make(map[string]*computealpha.ForwardingRule)
|
||||
}
|
||||
|
||||
if _, exists := f.fwdRulesByRegionAndName[region][fwdRule.Name]; exists {
|
||||
return &googleapi.Error{Code: http.StatusConflict}
|
||||
}
|
||||
|
||||
f.fwdRulesByRegionAndName[region][fwdRule.Name] = fwdRule
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeCloudForwardingRuleService) CreateRegionForwardingRule(fwdRule *compute.ForwardingRule, region string) error {
|
||||
alphafwdRule := convertToAlphaForwardingRule(fwdRule)
|
||||
return f.CreateAlphaRegionForwardingRule(alphafwdRule, region)
|
||||
}
|
||||
|
||||
func (f *FakeCloudForwardingRuleService) DeleteRegionForwardingRule(name, region string) error {
|
||||
if _, exists := f.fwdRulesByRegionAndName[region]; !exists {
|
||||
return makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
if _, exists := f.fwdRulesByRegionAndName[region][name]; !exists {
|
||||
return makeGoogleAPINotFoundError("")
|
||||
}
|
||||
delete(f.fwdRulesByRegionAndName[region], name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeCloudForwardingRuleService) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) {
|
||||
if _, exists := f.fwdRulesByRegionAndName[region]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
}
|
||||
|
||||
if fwdRule, exists := f.fwdRulesByRegionAndName[region][name]; !exists {
|
||||
return nil, makeGoogleAPINotFoundError("")
|
||||
} else {
|
||||
return fwdRule, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeCloudForwardingRuleService) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) {
|
||||
fwdRule, err := f.GetAlphaRegionForwardingRule(name, region)
|
||||
if fwdRule != nil {
|
||||
return convertToV1ForwardingRule(fwdRule), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (f *FakeCloudForwardingRuleService) getNetworkTierFromForwardingRule(name, region string) (string, error) {
|
||||
fwdRule, err := f.GetAlphaRegionForwardingRule(name, region)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fwdRule.NetworkTier, nil
|
||||
}
|
||||
|
||||
func convertToV1ForwardingRule(object gceObject) *compute.ForwardingRule {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var fwdRule compute.ForwardingRule
|
||||
if err := json.Unmarshal(enc, &fwdRule); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to v1 fwdRuleess: %v", object, err))
|
||||
}
|
||||
return &fwdRule
|
||||
}
|
||||
|
||||
func convertToAlphaForwardingRule(object gceObject) *computealpha.ForwardingRule {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var fwdRule computealpha.ForwardingRule
|
||||
if err := json.Unmarshal(enc, &fwdRule); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to alpha fwdRuleess: %v", object, err))
|
||||
}
|
||||
// Set the default values for the Alpha fields.
|
||||
fwdRule.NetworkTier = NetworkTierDefault.ToGCEValue()
|
||||
|
||||
return &fwdRule
|
||||
}
|
265
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go
generated
vendored
Normal file
265
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go
generated
vendored
Normal file
@ -0,0 +1,265 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
nodesHealthCheckPath = "/healthz"
|
||||
lbNodesHealthCheckPort = ports.ProxyHealthzPort
|
||||
)
|
||||
|
||||
var (
|
||||
minNodesHealthCheckVersion *utilversion.Version
|
||||
)
|
||||
|
||||
func init() {
|
||||
if v, err := utilversion.ParseGeneric("1.7.2"); err != nil {
|
||||
glog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err)
|
||||
} else {
|
||||
minNodesHealthCheckVersion = v
|
||||
}
|
||||
}
|
||||
|
||||
func newHealthcheckMetricContext(request string) *metricContext {
|
||||
return newHealthcheckMetricContextWithVersion(request, computeV1Version)
|
||||
}
|
||||
|
||||
func newHealthcheckMetricContextWithVersion(request, version string) *metricContext {
|
||||
return newGenericMetricContext("healthcheck", request, unusedMetricLabel, unusedMetricLabel, version)
|
||||
}
|
||||
|
||||
// GetHttpHealthCheck returns the given HttpHealthCheck by name.
|
||||
func (gce *GCECloud) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) {
|
||||
mc := newHealthcheckMetricContext("get_legacy")
|
||||
v, err := gce.service.HttpHealthChecks.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateHttpHealthCheck applies the given HttpHealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
mc := newHealthcheckMetricContext("update_legacy")
|
||||
op, err := gce.service.HttpHealthChecks.Update(gce.projectID, hc.Name, hc).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// DeleteHttpHealthCheck deletes the given HttpHealthCheck by name.
|
||||
func (gce *GCECloud) DeleteHttpHealthCheck(name string) error {
|
||||
mc := newHealthcheckMetricContext("delete_legacy")
|
||||
op, err := gce.service.HttpHealthChecks.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// CreateHttpHealthCheck creates the given HttpHealthCheck.
|
||||
func (gce *GCECloud) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
mc := newHealthcheckMetricContext("create_legacy")
|
||||
op, err := gce.service.HttpHealthChecks.Insert(gce.projectID, hc).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// ListHttpHealthChecks lists all HttpHealthChecks in the project.
|
||||
func (gce *GCECloud) ListHttpHealthChecks() (*compute.HttpHealthCheckList, error) {
|
||||
mc := newHealthcheckMetricContext("list_legacy")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.HttpHealthChecks.List(gce.projectID).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// Legacy HTTPS Health Checks
|
||||
|
||||
// GetHttpsHealthCheck returns the given HttpsHealthCheck by name.
|
||||
func (gce *GCECloud) GetHttpsHealthCheck(name string) (*compute.HttpsHealthCheck, error) {
|
||||
mc := newHealthcheckMetricContext("get_legacy")
|
||||
v, err := gce.service.HttpsHealthChecks.Get(gce.projectID, name).Do()
|
||||
mc.Observe(err)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// UpdateHttpsHealthCheck applies the given HttpsHealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error {
|
||||
mc := newHealthcheckMetricContext("update_legacy")
|
||||
op, err := gce.service.HttpsHealthChecks.Update(gce.projectID, hc.Name, hc).Do()
|
||||
if err != nil {
|
||||
mc.Observe(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// DeleteHttpsHealthCheck deletes the given HttpsHealthCheck by name.
|
||||
func (gce *GCECloud) DeleteHttpsHealthCheck(name string) error {
|
||||
mc := newHealthcheckMetricContext("delete_legacy")
|
||||
op, err := gce.service.HttpsHealthChecks.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// CreateHttpsHealthCheck creates the given HttpsHealthCheck.
|
||||
func (gce *GCECloud) CreateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error {
|
||||
mc := newHealthcheckMetricContext("create_legacy")
|
||||
op, err := gce.service.HttpsHealthChecks.Insert(gce.projectID, hc).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// ListHttpsHealthChecks lists all HttpsHealthChecks in the project.
|
||||
func (gce *GCECloud) ListHttpsHealthChecks() (*compute.HttpsHealthCheckList, error) {
|
||||
mc := newHealthcheckMetricContext("list_legacy")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.HttpsHealthChecks.List(gce.projectID).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// Generic HealthCheck
|
||||
|
||||
// GetHealthCheck returns the given HealthCheck by name.
|
||||
func (gce *GCECloud) GetHealthCheck(name string) (*compute.HealthCheck, error) {
|
||||
mc := newHealthcheckMetricContext("get")
|
||||
v, err := gce.service.HealthChecks.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetAlphaHealthCheck returns the given alpha HealthCheck by name.
|
||||
func (gce *GCECloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) {
|
||||
mc := newHealthcheckMetricContextWithVersion("get", computeAlphaVersion)
|
||||
v, err := gce.serviceAlpha.HealthChecks.Get(gce.projectID, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateHealthCheck applies the given HealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error {
|
||||
mc := newHealthcheckMetricContext("update")
|
||||
op, err := gce.service.HealthChecks.Update(gce.projectID, hc.Name, hc).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// UpdateAlphaHealthCheck applies the given alpha HealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
|
||||
mc := newHealthcheckMetricContextWithVersion("update", computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.HealthChecks.Update(gce.projectID, hc.Name, hc).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// DeleteHealthCheck deletes the given HealthCheck by name.
|
||||
func (gce *GCECloud) DeleteHealthCheck(name string) error {
|
||||
mc := newHealthcheckMetricContext("delete")
|
||||
op, err := gce.service.HealthChecks.Delete(gce.projectID, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// CreateHealthCheck creates the given HealthCheck.
|
||||
func (gce *GCECloud) CreateHealthCheck(hc *compute.HealthCheck) error {
|
||||
mc := newHealthcheckMetricContext("create")
|
||||
op, err := gce.service.HealthChecks.Insert(gce.projectID, hc).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// CreateAlphaHealthCheck creates the given alpha HealthCheck.
|
||||
func (gce *GCECloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error {
|
||||
mc := newHealthcheckMetricContextWithVersion("create", computeAlphaVersion)
|
||||
op, err := gce.serviceAlpha.HealthChecks.Insert(gce.projectID, hc).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForGlobalOp(op, mc)
|
||||
}
|
||||
|
||||
// ListHealthChecks lists all HealthCheck in the project.
|
||||
func (gce *GCECloud) ListHealthChecks() (*compute.HealthCheckList, error) {
|
||||
mc := newHealthcheckMetricContext("list")
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.HealthChecks.List(gce.projectID).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetNodesHealthCheckPort returns the health check port used by the GCE load
|
||||
// balancers (l4) for performing health checks on nodes.
|
||||
func GetNodesHealthCheckPort() int32 {
|
||||
return lbNodesHealthCheckPort
|
||||
}
|
||||
|
||||
// GetNodesHealthCheckPath returns the health check path used by the GCE load
|
||||
// balancers (l4) for performing health checks on nodes.
|
||||
func GetNodesHealthCheckPath() string {
|
||||
return nodesHealthCheckPath
|
||||
}
|
||||
|
||||
// isAtLeastMinNodesHealthCheckVersion checks if a version is higher than
|
||||
// `minNodesHealthCheckVersion`.
|
||||
func isAtLeastMinNodesHealthCheckVersion(vstring string) bool {
|
||||
version, err := utilversion.ParseGeneric(vstring)
|
||||
if err != nil {
|
||||
glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err)
|
||||
return false
|
||||
}
|
||||
return version.AtLeast(minNodesHealthCheckVersion)
|
||||
}
|
||||
|
||||
// supportsNodesHealthCheck returns false if anyone of the nodes has version
|
||||
// lower than `minNodesHealthCheckVersion`.
|
||||
func supportsNodesHealthCheck(nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
if !isAtLeastMinNodesHealthCheckVersion(node.Status.NodeInfo.KubeProxyVersion) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks_test.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks_test.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestIsAtLeastMinNodesHealthCheckVersion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
version string
|
||||
expect bool
|
||||
}{
|
||||
{"v1.7.3", true},
|
||||
{"v1.7.2", true},
|
||||
{"v1.7.2-alpha.2.597+276d289b90d322", true},
|
||||
{"v1.6.0-beta.3.472+831q821c907t31a", false},
|
||||
{"v1.5.2", false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if res := isAtLeastMinNodesHealthCheckVersion(tc.version); res != tc.expect {
|
||||
t.Errorf("%v: want %v, got %v", tc.version, tc.expect, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSupportsNodesHealthCheck(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
nodes []*v1.Node
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
"All nodes support nodes health check",
|
||||
[]*v1.Node{
|
||||
{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.7.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.7.2-alpha.2.597+276d289b90d322",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"All nodes don't support nodes health check",
|
||||
[]*v1.Node{
|
||||
{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.6.0-beta.3.472+831q821c907t31a",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.5.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"One node doesn't support nodes health check",
|
||||
[]*v1.Node{
|
||||
{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.7.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.7.2-alpha.2.597+276d289b90d322",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeProxyVersion: "v1.5.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if res := supportsNodesHealthCheck(tc.nodes); res != tc.expect {
|
||||
t.Errorf("%v: want %v, got %v", tc.desc, tc.expect, res)
|
||||
}
|
||||
}
|
||||
}
|
127
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go
generated
vendored
Normal file
127
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import compute "google.golang.org/api/compute/v1"
|
||||
|
||||
func newInstanceGroupMetricContext(request string, zone string) *metricContext {
|
||||
return newGenericMetricContext("instancegroup", request, unusedMetricLabel, zone, computeV1Version)
|
||||
}
|
||||
|
||||
// CreateInstanceGroup creates an instance group with the given
|
||||
// instances. It is the callers responsibility to add named ports.
|
||||
func (gce *GCECloud) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error {
|
||||
mc := newInstanceGroupMetricContext("create", zone)
|
||||
op, err := gce.service.InstanceGroups.Insert(gce.projectID, zone, ig).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
// DeleteInstanceGroup deletes an instance group.
|
||||
func (gce *GCECloud) DeleteInstanceGroup(name string, zone string) error {
|
||||
mc := newInstanceGroupMetricContext("delete", zone)
|
||||
op, err := gce.service.InstanceGroups.Delete(
|
||||
gce.projectID, zone, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
// ListInstanceGroups lists all InstanceGroups in the project and
|
||||
// zone.
|
||||
func (gce *GCECloud) ListInstanceGroups(zone string) (*compute.InstanceGroupList, error) {
|
||||
mc := newInstanceGroupMetricContext("list", zone)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.InstanceGroups.List(gce.projectID, zone).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// ListInstancesInInstanceGroup lists all the instances in a given
|
||||
// instance group and state.
|
||||
func (gce *GCECloud) ListInstancesInInstanceGroup(name string, zone string, state string) (*compute.InstanceGroupsListInstances, error) {
|
||||
mc := newInstanceGroupMetricContext("list_instances", zone)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
v, err := gce.service.InstanceGroups.ListInstances(
|
||||
gce.projectID, zone, name,
|
||||
&compute.InstanceGroupsListInstancesRequest{InstanceState: state}).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// AddInstancesToInstanceGroup adds the given instances to the given
|
||||
// instance group.
|
||||
func (gce *GCECloud) AddInstancesToInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
mc := newInstanceGroupMetricContext("add_instances", zone)
|
||||
if len(instanceRefs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
op, err := gce.service.InstanceGroups.AddInstances(
|
||||
gce.projectID, zone, name,
|
||||
&compute.InstanceGroupsAddInstancesRequest{
|
||||
Instances: instanceRefs,
|
||||
}).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
// RemoveInstancesFromInstanceGroup removes the given instances from
|
||||
// the instance group.
|
||||
func (gce *GCECloud) RemoveInstancesFromInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error {
|
||||
mc := newInstanceGroupMetricContext("remove_instances", zone)
|
||||
if len(instanceRefs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
op, err := gce.service.InstanceGroups.RemoveInstances(
|
||||
gce.projectID, zone, name,
|
||||
&compute.InstanceGroupsRemoveInstancesRequest{
|
||||
Instances: instanceRefs,
|
||||
}).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
// SetNamedPortsOfInstanceGroup sets the list of named ports on a given instance group
|
||||
func (gce *GCECloud) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error {
|
||||
mc := newInstanceGroupMetricContext("set_namedports", zone)
|
||||
op, err := gce.service.InstanceGroups.SetNamedPorts(
|
||||
gce.projectID, zone, igName,
|
||||
&compute.InstanceGroupsSetNamedPortsRequest{NamedPorts: namedPorts}).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
// GetInstanceGroup returns an instance group by name.
|
||||
func (gce *GCECloud) GetInstanceGroup(name string, zone string) (*compute.InstanceGroup, error) {
|
||||
mc := newInstanceGroupMetricContext("get", zone)
|
||||
v, err := gce.service.InstanceGroups.Get(gce.projectID, zone, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
690
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go
generated
vendored
Normal file
690
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go
generated
vendored
Normal file
@ -0,0 +1,690 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"github.com/golang/glog"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultZone = ""
|
||||
)
|
||||
|
||||
func newInstancesMetricContext(request, zone string) *metricContext {
|
||||
return newGenericMetricContext("instances", request, unusedMetricLabel, zone, computeV1Version)
|
||||
}
|
||||
|
||||
func splitNodesByZone(nodes []*v1.Node) map[string][]*v1.Node {
|
||||
zones := make(map[string][]*v1.Node)
|
||||
for _, n := range nodes {
|
||||
z := getZone(n)
|
||||
if z != defaultZone {
|
||||
zones[z] = append(zones[z], n)
|
||||
}
|
||||
}
|
||||
return zones
|
||||
}
|
||||
|
||||
func getZone(n *v1.Node) string {
|
||||
zone, ok := n.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if !ok {
|
||||
return defaultZone
|
||||
}
|
||||
return zone
|
||||
}
|
||||
|
||||
// ToInstanceReferences returns instance references by links
|
||||
func (gce *GCECloud) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) {
|
||||
for _, ins := range instanceNames {
|
||||
instanceLink := makeHostURL(gce.service.BasePath, gce.projectID, zone, ins)
|
||||
refs = append(refs, &compute.InstanceReference{Instance: instanceLink})
|
||||
}
|
||||
return refs
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (gce *GCECloud) NodeAddresses(_ types.NodeName) ([]v1.NodeAddress, error) {
|
||||
internalIP, err := metadata.Get("instance/network-interfaces/0/ip")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get internal IP: %v", err)
|
||||
}
|
||||
externalIP, err := metadata.Get("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get external IP: %v", err)
|
||||
}
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: internalIP},
|
||||
{Type: v1.NodeExternalIP, Address: externalIP},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This method will not be called from the node that is requesting this ID.
|
||||
// i.e. metadata service and other local methods cannot be used here
|
||||
func (gce *GCECloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
project, zone, name, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return []v1.NodeAddress{}, err
|
||||
}
|
||||
|
||||
instance, err := gce.service.Instances.Get(project, zone, canonicalizeInstanceName(name)).Do()
|
||||
if err != nil {
|
||||
return []v1.NodeAddress{}, fmt.Errorf("error while querying for providerID %q: %v", providerID, err)
|
||||
}
|
||||
|
||||
if len(instance.NetworkInterfaces) < 1 {
|
||||
return []v1.NodeAddress{}, fmt.Errorf("could not find network interfaces for providerID %q", providerID)
|
||||
}
|
||||
networkInterface := instance.NetworkInterfaces[0]
|
||||
|
||||
nodeAddresses := []v1.NodeAddress{{Type: v1.NodeInternalIP, Address: networkInterface.NetworkIP}}
|
||||
for _, config := range networkInterface.AccessConfigs {
|
||||
nodeAddresses = append(nodeAddresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: config.NatIP})
|
||||
}
|
||||
|
||||
return nodeAddresses, nil
|
||||
}
|
||||
|
||||
// instanceByProviderID returns the cloudprovider instance of the node
|
||||
// with the specified unique providerID
|
||||
func (gce *GCECloud) instanceByProviderID(providerID string) (*gceInstance, error) {
|
||||
project, zone, name, err := splitProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance, err := gce.getInstanceFromProjectInZoneByName(project, zone, name)
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node
|
||||
// with the specified unique providerID This method will not be called from the
|
||||
// node that is requesting this ID. i.e. metadata service and other local
|
||||
// methods cannot be used here
|
||||
func (gce *GCECloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
instance, err := gce.instanceByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return instance.Type, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName (deprecated).
|
||||
func (gce *GCECloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instanceName) {
|
||||
externalInstanceID, err := getCurrentExternalIDViaMetadata()
|
||||
if err == nil {
|
||||
return externalInstanceID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to GCE API call if metadata server fails to retrieve ID
|
||||
inst, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.FormatUint(inst.ID, 10), nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (gce *GCECloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
_, err := gce.instanceByProviderID(providerID)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
func (gce *GCECloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instanceName) {
|
||||
projectID, zone, err := getProjectAndZone()
|
||||
if err == nil {
|
||||
return projectID + "/" + zone + "/" + canonicalizeInstanceName(instanceName), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
instance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gce.projectID + "/" + instance.Zone + "/" + instance.Name, nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified node with the specified NodeName.
|
||||
func (gce *GCECloud) InstanceType(nodeName types.NodeName) (string, error) {
|
||||
instanceName := mapNodeNameToInstanceName(nodeName)
|
||||
if gce.useMetadataServer {
|
||||
// Use metadata, if possible, to fetch ID. See issue #12000
|
||||
if gce.isCurrentInstance(instanceName) {
|
||||
mType, err := getCurrentMachineTypeViaMetadata()
|
||||
if err == nil {
|
||||
return mType, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
instance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return instance.Type, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
|
||||
project, err := gce.service.Projects.Get(gce.projectID).Do()
|
||||
if err != nil {
|
||||
glog.Errorf("Could not get project: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
keyString := fmt.Sprintf("%s:%s %s@%s", user, strings.TrimSpace(string(keyData)), user, user)
|
||||
found := false
|
||||
for _, item := range project.CommonInstanceMetadata.Items {
|
||||
if item.Key == "sshKeys" {
|
||||
if strings.Contains(*item.Value, keyString) {
|
||||
// We've already added the key
|
||||
glog.Info("SSHKey already in project metadata")
|
||||
return true, nil
|
||||
}
|
||||
value := *item.Value + "\n" + keyString
|
||||
item.Value = &value
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// This is super unlikely, so log.
|
||||
glog.Infof("Failed to find sshKeys metadata, creating a new item")
|
||||
project.CommonInstanceMetadata.Items = append(project.CommonInstanceMetadata.Items,
|
||||
&compute.MetadataItems{
|
||||
Key: "sshKeys",
|
||||
Value: &keyString,
|
||||
})
|
||||
}
|
||||
|
||||
mc := newInstancesMetricContext("add_ssh_key", "")
|
||||
op, err := gce.service.Projects.SetCommonInstanceMetadata(
|
||||
gce.projectID, project.CommonInstanceMetadata).Do()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Could not Set Metadata: %v", err)
|
||||
mc.Observe(err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := gce.waitForGlobalOp(op, mc); err != nil {
|
||||
glog.Errorf("Could not Set Metadata: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
glog.Infof("Successfully added sshKey to project metadata")
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetAllCurrentZones returns all the zones in which k8s nodes are currently running
|
||||
func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) {
|
||||
if gce.nodeInformerSynced == nil {
|
||||
glog.Warningf("GCECloud object does not have informers set, should only happen in E2E binary.")
|
||||
return gce.GetAllZonesFromCloudProvider()
|
||||
}
|
||||
gce.nodeZonesLock.Lock()
|
||||
defer gce.nodeZonesLock.Unlock()
|
||||
if !gce.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("Node informer is not synced when trying to GetAllCurrentZones")
|
||||
}
|
||||
zones := sets.NewString()
|
||||
for zone, nodes := range gce.nodeZones {
|
||||
if len(nodes) > 0 {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// GetAllZonesFromCloudProvider returns all the zones in which nodes are running
|
||||
// Only use this in E2E tests to get zones, on real clusters this will
|
||||
// get all zones with compute instances in them even if not k8s instances!!!
|
||||
// ex. I have k8s nodes in us-central1-c and us-central1-b. I also have
|
||||
// a non-k8s compute in us-central1-a. This func will return a,b, and c.
|
||||
func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) {
|
||||
zones := sets.NewString()
|
||||
|
||||
for _, zone := range gce.managedZones {
|
||||
mc := newInstancesMetricContext("list", zone)
|
||||
// We only retrieve one page in each zone - we only care about existence
|
||||
listCall := gce.service.Instances.List(gce.projectID, zone)
|
||||
|
||||
listCall = listCall.Fields("items(name)")
|
||||
res, err := listCall.Do()
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
mc.Observe(nil)
|
||||
|
||||
if len(res.Items) != 0 {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// InsertInstance creates a new instance on GCP
|
||||
func (gce *GCECloud) InsertInstance(project string, zone string, rb *compute.Instance) error {
|
||||
mc := newInstancesMetricContext("create", zone)
|
||||
op, err := gce.service.Instances.Insert(project, zone, rb).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
// ListInstanceNames returns a string of instance names seperated by spaces.
|
||||
func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) {
|
||||
res, err := gce.service.Instances.List(project, zone).Fields("items(name)").Do()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var output string
|
||||
for _, item := range res.Items {
|
||||
output += item.Name + " "
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// DeleteInstance deletes an instance specified by project, zone, and name
|
||||
func (gce *GCECloud) DeleteInstance(project, zone, name string) (*compute.Operation, error) {
|
||||
return gce.service.Instances.Delete(project, zone, name).Do()
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (gce *GCECloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// AliasRanges returns a list of CIDR ranges that are assigned to the
|
||||
// `node` for allocation to pods. Returns a list of the form
|
||||
// "<ip>/<netmask>".
|
||||
func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err error) {
|
||||
var instance *gceInstance
|
||||
instance, err = gce.getInstanceByName(mapNodeNameToInstanceName(nodeName))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var res *computebeta.Instance
|
||||
res, err = gce.serviceBeta.Instances.Get(
|
||||
gce.projectID, instance.Zone, instance.Name).Do()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, networkInterface := range res.NetworkInterfaces {
|
||||
for _, aliasIpRange := range networkInterface.AliasIpRanges {
|
||||
cidrs = append(cidrs, aliasIpRange.IpCidrRange)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AddAliasToInstance adds an alias to the given instance from the named
|
||||
// secondary range.
|
||||
func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) error {
|
||||
|
||||
v1instance, err := gce.getInstanceByName(mapNodeNameToInstanceName(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instance, err := gce.serviceAlpha.Instances.Get(gce.projectID, v1instance.Zone, v1instance.Name).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch len(instance.NetworkInterfaces) {
|
||||
case 0:
|
||||
return fmt.Errorf("Instance %q has no network interfaces", nodeName)
|
||||
case 1:
|
||||
default:
|
||||
glog.Warningf("Instance %q has more than one network interface, using only the first (%v)",
|
||||
nodeName, instance.NetworkInterfaces)
|
||||
}
|
||||
|
||||
iface := instance.NetworkInterfaces[0]
|
||||
iface.AliasIpRanges = append(iface.AliasIpRanges, &computealpha.AliasIpRange{
|
||||
IpCidrRange: alias.String(),
|
||||
SubnetworkRangeName: gce.secondaryRangeName,
|
||||
})
|
||||
|
||||
mc := newInstancesMetricContext("addalias", v1instance.Zone)
|
||||
op, err := gce.serviceAlpha.Instances.UpdateNetworkInterface(
|
||||
gce.projectID, lastComponent(instance.Zone), instance.Name, iface.Name, iface).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForZoneOp(op, v1instance.Zone, mc)
|
||||
}
|
||||
|
||||
// Gets the named instances, returning cloudprovider.InstanceNotFound if any instance is not found
|
||||
func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) {
|
||||
instances := make(map[string]*gceInstance)
|
||||
remaining := len(names)
|
||||
|
||||
nodeInstancePrefix := gce.nodeInstancePrefix
|
||||
for _, name := range names {
|
||||
name = canonicalizeInstanceName(name)
|
||||
if !strings.HasPrefix(name, gce.nodeInstancePrefix) {
|
||||
glog.Warningf("instance '%s' does not conform to prefix '%s', removing filter", name, gce.nodeInstancePrefix)
|
||||
nodeInstancePrefix = ""
|
||||
}
|
||||
instances[name] = nil
|
||||
}
|
||||
|
||||
for _, zone := range gce.managedZones {
|
||||
if remaining == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
pageToken := ""
|
||||
page := 0
|
||||
for ; page == 0 || (pageToken != "" && page < maxPages); page++ {
|
||||
listCall := gce.service.Instances.List(gce.projectID, zone)
|
||||
|
||||
if nodeInstancePrefix != "" {
|
||||
// Add the filter for hosts
|
||||
listCall = listCall.Filter("name eq " + nodeInstancePrefix + ".*")
|
||||
}
|
||||
|
||||
// TODO(zmerlynn): Internal bug 29524655
|
||||
// listCall = listCall.Fields("items(name,id,disks,machineType)")
|
||||
if pageToken != "" {
|
||||
listCall.PageToken(pageToken)
|
||||
}
|
||||
|
||||
res, err := listCall.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pageToken = res.NextPageToken
|
||||
for _, i := range res.Items {
|
||||
name := i.Name
|
||||
if _, ok := instances[name]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
instance := &gceInstance{
|
||||
Zone: zone,
|
||||
Name: name,
|
||||
ID: i.Id,
|
||||
Disks: i.Disks,
|
||||
Type: lastComponent(i.MachineType),
|
||||
}
|
||||
instances[name] = instance
|
||||
remaining--
|
||||
}
|
||||
}
|
||||
if page >= maxPages {
|
||||
glog.Errorf("getInstancesByNames exceeded maxPages=%d for Instances.List: truncating.", maxPages)
|
||||
}
|
||||
}
|
||||
|
||||
instanceArray := make([]*gceInstance, len(names))
|
||||
for i, name := range names {
|
||||
name = canonicalizeInstanceName(name)
|
||||
instance := instances[name]
|
||||
if instance == nil {
|
||||
glog.Errorf("Failed to retrieve instance: %q", name)
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
instanceArray[i] = instances[name]
|
||||
}
|
||||
|
||||
return instanceArray, nil
|
||||
}
|
||||
|
||||
// Gets the named instance, returning cloudprovider.InstanceNotFound if the instance is not found
|
||||
func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) {
|
||||
// Avoid changing behaviour when not managing multiple zones
|
||||
for _, zone := range gce.managedZones {
|
||||
instance, err := gce.getInstanceFromProjectInZoneByName(gce.projectID, zone, name)
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
continue
|
||||
}
|
||||
glog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err)
|
||||
return nil, err
|
||||
}
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
func (gce *GCECloud) getInstanceFromProjectInZoneByName(project, zone, name string) (*gceInstance, error) {
|
||||
name = canonicalizeInstanceName(name)
|
||||
mc := newInstancesMetricContext("get", zone)
|
||||
res, err := gce.service.Instances.Get(project, zone, name).Do()
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gceInstance{
|
||||
Zone: lastComponent(res.Zone),
|
||||
Name: res.Name,
|
||||
ID: res.Id,
|
||||
Disks: res.Disks,
|
||||
Type: lastComponent(res.MachineType),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getInstanceIDViaMetadata() (string, error) {
|
||||
result, err := metadata.Get("instance/hostname")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts := strings.Split(result, ".")
|
||||
if len(parts) == 0 {
|
||||
return "", fmt.Errorf("unexpected response: %s", result)
|
||||
}
|
||||
return parts[0], nil
|
||||
}
|
||||
|
||||
func getCurrentExternalIDViaMetadata() (string, error) {
|
||||
externalID, err := metadata.Get("instance/id")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't get external ID: %v", err)
|
||||
}
|
||||
return externalID, nil
|
||||
}
|
||||
|
||||
func getCurrentMachineTypeViaMetadata() (string, error) {
|
||||
mType, err := metadata.Get("instance/machine-type")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't get machine type: %v", err)
|
||||
}
|
||||
parts := strings.Split(mType, "/")
|
||||
if len(parts) != 4 {
|
||||
return "", fmt.Errorf("unexpected response for machine type: %s", mType)
|
||||
}
|
||||
|
||||
return parts[3], nil
|
||||
}
|
||||
|
||||
// isCurrentInstance uses metadata server to check if specified
|
||||
// instanceID matches current machine's instanceID
|
||||
func (gce *GCECloud) isCurrentInstance(instanceID string) bool {
|
||||
currentInstanceID, err := getInstanceIDViaMetadata()
|
||||
if err != nil {
|
||||
// Log and swallow error
|
||||
glog.Errorf("Failed to fetch instanceID via Metadata: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return currentInstanceID == canonicalizeInstanceName(instanceID)
|
||||
}
|
||||
|
||||
// ComputeHostTags grabs all tags from all instances being added to the pool.
|
||||
// * The longest tag that is a prefix of the instance name is used
|
||||
// * If any instance has no matching prefix tag, return error
|
||||
// Invoking this method to get host tags is risky since it depends on the format
|
||||
// of the host names in the cluster. Only use it as a fallback if gce.nodeTags
|
||||
// is unspecified
|
||||
func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) {
|
||||
// TODO: We could store the tags in gceInstance, so we could have already fetched it
|
||||
hostNamesByZone := make(map[string]map[string]bool) // map of zones -> map of names -> bool (for easy lookup)
|
||||
nodeInstancePrefix := gce.nodeInstancePrefix
|
||||
for _, host := range hosts {
|
||||
if !strings.HasPrefix(host.Name, gce.nodeInstancePrefix) {
|
||||
glog.Warningf("instance '%s' does not conform to prefix '%s', ignoring filter", host, gce.nodeInstancePrefix)
|
||||
nodeInstancePrefix = ""
|
||||
}
|
||||
|
||||
z, ok := hostNamesByZone[host.Zone]
|
||||
if !ok {
|
||||
z = make(map[string]bool)
|
||||
hostNamesByZone[host.Zone] = z
|
||||
}
|
||||
z[host.Name] = true
|
||||
}
|
||||
|
||||
tags := sets.NewString()
|
||||
|
||||
for zone, hostNames := range hostNamesByZone {
|
||||
pageToken := ""
|
||||
page := 0
|
||||
for ; page == 0 || (pageToken != "" && page < maxPages); page++ {
|
||||
listCall := gce.service.Instances.List(gce.projectID, zone)
|
||||
|
||||
if nodeInstancePrefix != "" {
|
||||
// Add the filter for hosts
|
||||
listCall = listCall.Filter("name eq " + nodeInstancePrefix + ".*")
|
||||
}
|
||||
|
||||
// Add the fields we want
|
||||
// TODO(zmerlynn): Internal bug 29524655
|
||||
// listCall = listCall.Fields("items(name,tags)")
|
||||
|
||||
if pageToken != "" {
|
||||
listCall = listCall.PageToken(pageToken)
|
||||
}
|
||||
|
||||
res, err := listCall.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pageToken = res.NextPageToken
|
||||
for _, instance := range res.Items {
|
||||
if !hostNames[instance.Name] {
|
||||
continue
|
||||
}
|
||||
|
||||
longest_tag := ""
|
||||
for _, tag := range instance.Tags.Items {
|
||||
if strings.HasPrefix(instance.Name, tag) && len(tag) > len(longest_tag) {
|
||||
longest_tag = tag
|
||||
}
|
||||
}
|
||||
if len(longest_tag) > 0 {
|
||||
tags.Insert(longest_tag)
|
||||
} else {
|
||||
return nil, fmt.Errorf("Could not find any tag that is a prefix of instance name for instance %s", instance.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if page >= maxPages {
|
||||
glog.Errorf("computeHostTags exceeded maxPages=%d for Instances.List: truncating.", maxPages)
|
||||
}
|
||||
}
|
||||
if len(tags) == 0 {
|
||||
return nil, fmt.Errorf("No instances found")
|
||||
}
|
||||
return tags.List(), nil
|
||||
}
|
||||
|
||||
// GetNodeTags will first try returning the list of tags specified in GCE cloud Configuration.
|
||||
// If they weren't provided, it'll compute the host tags with the given hostnames. If the list
|
||||
// of hostnames has not changed, a cached set of nodetags are returned.
|
||||
func (gce *GCECloud) GetNodeTags(nodeNames []string) ([]string, error) {
|
||||
// If nodeTags were specified through configuration, use them
|
||||
if len(gce.nodeTags) > 0 {
|
||||
return gce.nodeTags, nil
|
||||
}
|
||||
|
||||
gce.computeNodeTagLock.Lock()
|
||||
defer gce.computeNodeTagLock.Unlock()
|
||||
|
||||
// Early return if hosts have not changed
|
||||
hosts := sets.NewString(nodeNames...)
|
||||
if hosts.Equal(gce.lastKnownNodeNames) {
|
||||
return gce.lastComputedNodeTags, nil
|
||||
}
|
||||
|
||||
// Get GCE instance data by hostname
|
||||
instances, err := gce.getInstancesByNames(nodeNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine list of host tags
|
||||
tags, err := gce.computeHostTags(instances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save the list of tags
|
||||
gce.lastKnownNodeNames = hosts
|
||||
gce.lastComputedNodeTags = tags
|
||||
return tags, nil
|
||||
}
|
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_interfaces.go
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_interfaces.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// These interfaces are added for testability.
|
||||
|
||||
// CloudAddressService is an interface for managing addresses
|
||||
type CloudAddressService interface {
|
||||
ReserveRegionAddress(address *compute.Address, region string) error
|
||||
GetRegionAddress(name string, region string) (*compute.Address, error)
|
||||
GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error)
|
||||
DeleteRegionAddress(name, region string) error
|
||||
// TODO: Mock Global endpoints
|
||||
|
||||
// Alpha API.
|
||||
GetAlphaRegionAddress(name, region string) (*computealpha.Address, error)
|
||||
ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error
|
||||
|
||||
// Beta API
|
||||
ReserveBetaRegionAddress(address *computebeta.Address, region string) error
|
||||
GetBetaRegionAddress(name string, region string) (*computebeta.Address, error)
|
||||
GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error)
|
||||
|
||||
// TODO(#51665): Remove this once the Network Tiers becomes Alpha in GCP.
|
||||
getNetworkTierFromAddress(name, region string) (string, error)
|
||||
}
|
||||
|
||||
// CloudForwardingRuleService is an interface for managing forwarding rules.
|
||||
// TODO: Expand the interface to include more methods.
|
||||
type CloudForwardingRuleService interface {
|
||||
GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error)
|
||||
CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error
|
||||
DeleteRegionForwardingRule(name, region string) error
|
||||
|
||||
// Alpha API.
|
||||
GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error)
|
||||
CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error
|
||||
|
||||
// Needed for the Alpha "Network Tiers" feature.
|
||||
getNetworkTierFromForwardingRule(name, region string) (string, error)
|
||||
}
|
204
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go
generated
vendored
Normal file
204
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
netsets "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
)
|
||||
|
||||
type cidrs struct {
|
||||
ipn netsets.IPNet
|
||||
isSet bool
|
||||
}
|
||||
|
||||
var (
|
||||
lbSrcRngsFlag cidrs
|
||||
)
|
||||
|
||||
func newLoadBalancerMetricContext(request, region string) *metricContext {
|
||||
return newGenericMetricContext("loadbalancer", request, region, unusedMetricLabel, computeV1Version)
|
||||
}
|
||||
|
||||
type lbScheme string
|
||||
|
||||
const (
|
||||
schemeExternal lbScheme = "EXTERNAL"
|
||||
schemeInternal lbScheme = "INTERNAL"
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
// LB L7 proxies and all L3/4/7 health checkers have client addresses within these known CIDRs.
|
||||
lbSrcRngsFlag.ipn, err = netsets.ParseIPNets([]string{"130.211.0.0/22", "35.191.0.0/16", "209.85.152.0/22", "209.85.204.0/22"}...)
|
||||
if err != nil {
|
||||
panic("Incorrect default GCE L7 source ranges")
|
||||
}
|
||||
|
||||
flag.Var(&lbSrcRngsFlag, "cloud-provider-gce-lb-src-cidrs", "CIDRs opened in GCE firewall for LB traffic proxy & health checks")
|
||||
}
|
||||
|
||||
// String is the method to format the flag's value, part of the flag.Value interface.
|
||||
func (c *cidrs) String() string {
|
||||
return strings.Join(c.ipn.StringSlice(), ",")
|
||||
}
|
||||
|
||||
// Set supports a value of CSV or the flag repeated multiple times
|
||||
func (c *cidrs) Set(value string) error {
|
||||
// On first Set(), clear the original defaults
|
||||
if !c.isSet {
|
||||
c.isSet = true
|
||||
c.ipn = make(netsets.IPNet)
|
||||
} else {
|
||||
return fmt.Errorf("GCE LB CIDRs have already been set")
|
||||
}
|
||||
|
||||
for _, cidr := range strings.Split(value, ",") {
|
||||
_, ipnet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.ipn.Insert(ipnet)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBalancerSrcRanges contains the ranges of ips used by the GCE load balancers (l4 & L7)
|
||||
// for proxying client requests and performing health checks.
|
||||
func LoadBalancerSrcRanges() []string {
|
||||
return lbSrcRngsFlag.ipn.StringSlice()
|
||||
}
|
||||
|
||||
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
|
||||
func (gce *GCECloud) GetLoadBalancer(clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
fwd, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region)
|
||||
if err == nil {
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: fwd.IPAddress}}
|
||||
|
||||
return status, true, nil
|
||||
}
|
||||
return nil, false, ignoreNotFound(err)
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer is an implementation of LoadBalancer.EnsureLoadBalancer.
|
||||
func (gce *GCECloud) EnsureLoadBalancer(clusterName string, svc *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
desiredScheme := getSvcScheme(svc)
|
||||
clusterID, err := gce.ClusterID.GetID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): ensure %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, desiredScheme)
|
||||
|
||||
existingFwdRule, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if existingFwdRule != nil {
|
||||
existingScheme := lbScheme(strings.ToUpper(existingFwdRule.LoadBalancingScheme))
|
||||
|
||||
// If the loadbalancer type changes between INTERNAL and EXTERNAL, the old load balancer should be deleted.
|
||||
if existingScheme != desiredScheme {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): deleting existing %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, existingScheme)
|
||||
switch existingScheme {
|
||||
case schemeInternal:
|
||||
err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc)
|
||||
default:
|
||||
err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc)
|
||||
}
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, existingScheme, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Assume the ensureDeleted function successfully deleted the forwarding rule.
|
||||
existingFwdRule = nil
|
||||
}
|
||||
}
|
||||
|
||||
var status *v1.LoadBalancerStatus
|
||||
switch desiredScheme {
|
||||
case schemeInternal:
|
||||
status, err = gce.ensureInternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes)
|
||||
default:
|
||||
status, err = gce.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes)
|
||||
}
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err)
|
||||
return status, err
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer is an implementation of LoadBalancer.UpdateLoadBalancer.
|
||||
func (gce *GCECloud) UpdateLoadBalancer(clusterName string, svc *v1.Service, nodes []*v1.Node) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
scheme := getSvcScheme(svc)
|
||||
clusterID, err := gce.ClusterID.GetID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): updating with %d nodes", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, len(nodes))
|
||||
|
||||
switch scheme {
|
||||
case schemeInternal:
|
||||
err = gce.updateInternalLoadBalancer(clusterName, clusterID, svc, nodes)
|
||||
default:
|
||||
err = gce.updateExternalLoadBalancer(clusterName, svc, nodes)
|
||||
}
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): done updating. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted is an implementation of LoadBalancer.EnsureLoadBalancerDeleted.
|
||||
func (gce *GCECloud) EnsureLoadBalancerDeleted(clusterName string, svc *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
scheme := getSvcScheme(svc)
|
||||
clusterID, err := gce.ClusterID.GetID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): deleting loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region)
|
||||
|
||||
switch scheme {
|
||||
case schemeInternal:
|
||||
err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc)
|
||||
default:
|
||||
err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc)
|
||||
}
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func getSvcScheme(svc *v1.Service) lbScheme {
|
||||
if typ, ok := GetLoadBalancerAnnotationType(svc); ok && typ == LBTypeInternal {
|
||||
return schemeInternal
|
||||
}
|
||||
return schemeExternal
|
||||
}
|
1079
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go
generated
vendored
Normal file
1079
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
239
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external_test.go
generated
vendored
Normal file
239
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external_test.go
generated
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestEnsureStaticIP(t *testing.T) {
|
||||
fcas := NewFakeCloudAddressService()
|
||||
ipName := "some-static-ip"
|
||||
serviceName := ""
|
||||
region := "us-central1"
|
||||
|
||||
// First ensure call
|
||||
ip, existed, err := ensureStaticIP(fcas, ipName, serviceName, region, "", NetworkTierDefault)
|
||||
if err != nil || existed || ip == "" {
|
||||
t.Fatalf(`ensureStaticIP(%v, %v, %v, %v, "") = %v, %v, %v; want valid ip, false, nil`, fcas, ipName, serviceName, region, ip, existed, err)
|
||||
}
|
||||
|
||||
// Second ensure call
|
||||
var ipPrime string
|
||||
ipPrime, existed, err = ensureStaticIP(fcas, ipName, serviceName, region, ip, NetworkTierDefault)
|
||||
if err != nil || !existed || ip != ipPrime {
|
||||
t.Fatalf(`ensureStaticIP(%v, %v, %v, %v, %v) = %v, %v, %v; want %v, true, nil`, fcas, ipName, serviceName, region, ip, ipPrime, existed, err, ip)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureStaticIPWithTier(t *testing.T) {
|
||||
s := NewFakeCloudAddressService()
|
||||
serviceName := ""
|
||||
region := "us-east1"
|
||||
|
||||
for desc, tc := range map[string]struct {
|
||||
name string
|
||||
netTier NetworkTier
|
||||
expected string
|
||||
}{
|
||||
"Premium (default)": {
|
||||
name: "foo-1",
|
||||
netTier: NetworkTierPremium,
|
||||
expected: "PREMIUM",
|
||||
},
|
||||
"Standard": {
|
||||
name: "foo-2",
|
||||
netTier: NetworkTierStandard,
|
||||
expected: "STANDARD",
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
ip, existed, err := ensureStaticIP(s, tc.name, serviceName, region, "", tc.netTier)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, existed)
|
||||
assert.NotEqual(t, "", ip)
|
||||
// Get the Address from the fake address service and verify that the tier
|
||||
// is set correctly.
|
||||
alphaAddr, err := s.GetAlphaRegionAddress(tc.name, region)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expected, alphaAddr.NetworkTier)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyRequestedIP(t *testing.T) {
|
||||
region := "test-region"
|
||||
lbRef := "test-lb"
|
||||
s := NewFakeCloudAddressService()
|
||||
|
||||
for desc, tc := range map[string]struct {
|
||||
requestedIP string
|
||||
fwdRuleIP string
|
||||
netTier NetworkTier
|
||||
addrList []*computealpha.Address
|
||||
expectErr bool
|
||||
expectUserOwned bool
|
||||
}{
|
||||
"requested IP exists": {
|
||||
requestedIP: "1.1.1.1",
|
||||
netTier: NetworkTierPremium,
|
||||
addrList: []*computealpha.Address{{Name: "foo", Address: "1.1.1.1", NetworkTier: "PREMIUM"}},
|
||||
expectErr: false,
|
||||
expectUserOwned: true,
|
||||
},
|
||||
"requested IP is not static, but is in use by the fwd rule": {
|
||||
requestedIP: "1.1.1.1",
|
||||
fwdRuleIP: "1.1.1.1",
|
||||
netTier: NetworkTierPremium,
|
||||
expectErr: false,
|
||||
},
|
||||
"requested IP is not static and is not used by the fwd rule": {
|
||||
requestedIP: "1.1.1.1",
|
||||
fwdRuleIP: "2.2.2.2",
|
||||
netTier: NetworkTierPremium,
|
||||
expectErr: true,
|
||||
},
|
||||
"no requested IP": {
|
||||
netTier: NetworkTierPremium,
|
||||
expectErr: false,
|
||||
},
|
||||
"requested IP exists, but network tier does not match": {
|
||||
requestedIP: "1.1.1.1",
|
||||
netTier: NetworkTierStandard,
|
||||
addrList: []*computealpha.Address{{Name: "foo", Address: "1.1.1.1", NetworkTier: "PREMIUM"}},
|
||||
expectErr: true,
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
s.SetRegionalAddresses(region, tc.addrList)
|
||||
isUserOwnedIP, err := verifyUserRequestedIP(s, region, tc.requestedIP, tc.fwdRuleIP, lbRef, tc.netTier)
|
||||
assert.Equal(t, tc.expectErr, err != nil, fmt.Sprintf("err: %v", err))
|
||||
assert.Equal(t, tc.expectUserOwned, isUserOwnedIP)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateForwardingRuleWithTier(t *testing.T) {
|
||||
s := NewFakeCloudForwardingRuleService()
|
||||
// Common variables among the tests.
|
||||
ports := []v1.ServicePort{{Name: "foo", Protocol: v1.ProtocolTCP, Port: int32(123)}}
|
||||
region := "test-region"
|
||||
target := "test-target-pool"
|
||||
svcName := "foo-svc"
|
||||
|
||||
for desc, tc := range map[string]struct {
|
||||
netTier NetworkTier
|
||||
expectedRule *computealpha.ForwardingRule
|
||||
}{
|
||||
"Premium tier": {
|
||||
netTier: NetworkTierPremium,
|
||||
expectedRule: &computealpha.ForwardingRule{
|
||||
Name: "lb-1",
|
||||
Description: `{"kubernetes.io/service-name":"foo-svc"}`,
|
||||
IPAddress: "1.1.1.1",
|
||||
IPProtocol: "TCP",
|
||||
PortRange: "123-123",
|
||||
Target: target,
|
||||
NetworkTier: "PREMIUM",
|
||||
},
|
||||
},
|
||||
"Standard tier": {
|
||||
netTier: NetworkTierStandard,
|
||||
expectedRule: &computealpha.ForwardingRule{
|
||||
Name: "lb-2",
|
||||
Description: `{"kubernetes.io/service-name":"foo-svc"}`,
|
||||
IPAddress: "2.2.2.2",
|
||||
IPProtocol: "TCP",
|
||||
PortRange: "123-123",
|
||||
Target: target,
|
||||
NetworkTier: "STANDARD",
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
lbName := tc.expectedRule.Name
|
||||
ipAddr := tc.expectedRule.IPAddress
|
||||
|
||||
err := createForwardingRule(s, lbName, svcName, region, ipAddr, target, ports, tc.netTier)
|
||||
assert.NoError(t, err)
|
||||
|
||||
alphaRule, err := s.GetAlphaRegionForwardingRule(lbName, region)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedRule, alphaRule)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteAddressWithWrongTier(t *testing.T) {
|
||||
region := "test-region"
|
||||
lbRef := "test-lb"
|
||||
s := NewFakeCloudAddressService()
|
||||
|
||||
for desc, tc := range map[string]struct {
|
||||
addrName string
|
||||
netTier NetworkTier
|
||||
addrList []*computealpha.Address
|
||||
expectDelete bool
|
||||
}{
|
||||
"Network tiers (premium) match; do nothing": {
|
||||
addrName: "foo1",
|
||||
netTier: NetworkTierPremium,
|
||||
addrList: []*computealpha.Address{{Name: "foo1", Address: "1.1.1.1", NetworkTier: "PREMIUM"}},
|
||||
},
|
||||
"Network tiers (standard) match; do nothing": {
|
||||
addrName: "foo2",
|
||||
netTier: NetworkTierStandard,
|
||||
addrList: []*computealpha.Address{{Name: "foo2", Address: "1.1.1.2", NetworkTier: "STANDARD"}},
|
||||
},
|
||||
"Wrong network tier (standard); delete address": {
|
||||
addrName: "foo3",
|
||||
netTier: NetworkTierPremium,
|
||||
addrList: []*computealpha.Address{{Name: "foo3", Address: "1.1.1.3", NetworkTier: "STANDARD"}},
|
||||
expectDelete: true,
|
||||
},
|
||||
"Wrong network tier (preimium); delete address": {
|
||||
addrName: "foo4",
|
||||
netTier: NetworkTierStandard,
|
||||
addrList: []*computealpha.Address{{Name: "foo4", Address: "1.1.1.4", NetworkTier: "PREMIUM"}},
|
||||
expectDelete: true,
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
s.SetRegionalAddresses(region, tc.addrList)
|
||||
// Sanity check to ensure we inject the right address.
|
||||
_, err := s.GetRegionAddress(tc.addrName, region)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = deleteAddressWithWrongTier(s, region, tc.addrName, lbRef, tc.netTier)
|
||||
assert.NoError(t, err)
|
||||
// Check whether the address still exists.
|
||||
_, err = s.GetRegionAddress(tc.addrName, region)
|
||||
if tc.expectDelete {
|
||||
assert.True(t, isNotFound(err))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
698
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go
generated
vendored
Normal file
698
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go
generated
vendored
Normal file
@ -0,0 +1,698 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
v1_service "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
allInstances = "ALL"
|
||||
)
|
||||
|
||||
func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
ports, protocol := getPortsAndProtocol(svc.Spec.Ports)
|
||||
scheme := schemeInternal
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity)
|
||||
backendServiceLink := gce.getBackendServiceLink(backendServiceName)
|
||||
|
||||
// Ensure instance groups exist and nodes are assigned to groups
|
||||
igName := makeInstanceGroupName(clusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get existing backend service (if exists)
|
||||
var existingBackendService *compute.BackendService
|
||||
if existingFwdRule != nil && existingFwdRule.BackendService != "" {
|
||||
existingBSName := getNameFromLink(existingFwdRule.BackendService)
|
||||
if existingBackendService, err = gce.GetRegionBackendService(existingBSName, gce.region); err != nil && !isNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Lock the sharedResourceLock to prevent any deletions of shared resources while assembling shared resources here
|
||||
gce.sharedResourceLock.Lock()
|
||||
defer gce.sharedResourceLock.Unlock()
|
||||
|
||||
// Ensure health check exists before creating the backend service. The health check is shared
|
||||
// if externalTrafficPolicy=Cluster.
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(svc)
|
||||
hcName := makeHealthCheckName(loadBalancerName, clusterID, sharedHealthCheck)
|
||||
hcPath, hcPort := GetNodesHealthCheckPath(), GetNodesHealthCheckPort()
|
||||
if !sharedHealthCheck {
|
||||
// Service requires a special health check, retrieve the OnlyLocal port & path
|
||||
hcPath, hcPort = v1_service.GetServiceHealthCheckPathPort(svc)
|
||||
}
|
||||
hc, err := gce.ensureInternalHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine IP which will be used for this LB. If no forwarding rule has been established
|
||||
// or specified in the Service spec, then requestedIP = "".
|
||||
requestedIP := determineRequestedIP(svc, existingFwdRule)
|
||||
ipToUse := requestedIP
|
||||
|
||||
var addrMgr *addressManager
|
||||
// If the network is not a legacy network, use the address manager
|
||||
if !gce.IsLegacyNetwork() {
|
||||
addrMgr = newAddressManager(gce, nm.String(), gce.Region(), gce.SubnetworkURL(), loadBalancerName, requestedIP, schemeInternal)
|
||||
ipToUse, err = addrMgr.HoldAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse)
|
||||
}
|
||||
|
||||
// Ensure firewall rules if necessary
|
||||
if err = gce.ensureInternalFirewalls(loadBalancerName, ipToUse, clusterID, nm, svc, strconv.Itoa(int(hcPort)), sharedHealthCheck, nodes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
expectedFwdRule := &compute.ForwardingRule{
|
||||
Name: loadBalancerName,
|
||||
Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, nm.String()),
|
||||
IPAddress: ipToUse,
|
||||
BackendService: backendServiceLink,
|
||||
Ports: ports,
|
||||
IPProtocol: string(protocol),
|
||||
LoadBalancingScheme: string(scheme),
|
||||
}
|
||||
|
||||
// Specify subnetwork if known
|
||||
if len(gce.subnetworkURL) > 0 {
|
||||
expectedFwdRule.Subnetwork = gce.subnetworkURL
|
||||
} else {
|
||||
expectedFwdRule.Network = gce.networkURL
|
||||
}
|
||||
|
||||
fwdRuleDeleted := false
|
||||
if existingFwdRule != nil && !fwdRuleEqual(existingFwdRule, expectedFwdRule) {
|
||||
glog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress)
|
||||
if err = ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fwdRuleDeleted = true
|
||||
}
|
||||
|
||||
bsDescription := makeBackendServiceDescription(nm, sharedBackend)
|
||||
err = gce.ensureInternalBackendService(backendServiceName, bsDescription, svc.Spec.SessionAffinity, scheme, protocol, igLinks, hc.SelfLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we previously deleted the forwarding rule or it never existed, finally create it.
|
||||
if fwdRuleDeleted || existingFwdRule == nil {
|
||||
glog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName)
|
||||
if err = gce.CreateRegionForwardingRule(expectedFwdRule, gce.region); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName)
|
||||
}
|
||||
|
||||
// Delete the previous internal load balancer resources if necessary
|
||||
if existingBackendService != nil {
|
||||
gce.clearPreviousInternalResources(svc, loadBalancerName, existingBackendService, backendServiceName, hcName)
|
||||
}
|
||||
|
||||
if addrMgr != nil {
|
||||
// Now that the controller knows the forwarding rule exists, we can release the address.
|
||||
if err := addrMgr.ReleaseAddress(); err != nil {
|
||||
glog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the most recent forwarding rule for the address.
|
||||
updatedFwdRule, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: updatedFwdRule.IPAddress}}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName string, existingBackendService *compute.BackendService, expectedBSName, expectedHCName string) {
|
||||
// If a new backend service was created, delete the old one.
|
||||
if existingBackendService.Name != expectedBSName {
|
||||
glog.V(2).Infof("clearPreviousInternalResources(%v): expected backend service %q does not match previous %q - deleting backend service", loadBalancerName, expectedBSName, existingBackendService.Name)
|
||||
if err := gce.teardownInternalBackendService(existingBackendService.Name); err != nil && !isNotFound(err) {
|
||||
glog.Warningf("clearPreviousInternalResources: could not delete old backend service: %v, err: %v", existingBackendService.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If a new health check was created, delete the old one.
|
||||
if len(existingBackendService.HealthChecks) == 1 {
|
||||
existingHCName := getNameFromLink(existingBackendService.HealthChecks[0])
|
||||
if existingHCName != expectedHCName {
|
||||
glog.V(2).Infof("clearPreviousInternalResources(%v): expected health check %q does not match previous %q - deleting health check", loadBalancerName, expectedHCName, existingHCName)
|
||||
if err := gce.teardownInternalHealthCheckAndFirewall(svc, existingHCName); err != nil {
|
||||
glog.Warningf("clearPreviousInternalResources: could not delete existing healthcheck: %v, err: %v", existingHCName, err)
|
||||
}
|
||||
}
|
||||
} else if len(existingBackendService.HealthChecks) > 1 {
|
||||
glog.Warningf("clearPreviousInternalResources(%v): more than one health check on the backend service %v, %v", loadBalancerName, existingBackendService.Name, existingBackendService.HealthChecks)
|
||||
}
|
||||
}
|
||||
|
||||
// updateInternalLoadBalancer is called when the list of nodes has changed. Therefore, only the instance groups
|
||||
// and possibly the backend service need to be updated.
|
||||
func (gce *GCECloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, nodes []*v1.Node) error {
|
||||
gce.sharedResourceLock.Lock()
|
||||
defer gce.sharedResourceLock.Unlock()
|
||||
|
||||
igName := makeInstanceGroupName(clusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate the backend service name
|
||||
_, protocol := getPortsAndProtocol(svc.Spec.Ports)
|
||||
scheme := schemeInternal
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, shareBackendService(svc), scheme, protocol, svc.Spec.SessionAffinity)
|
||||
// Ensure the backend service has the proper backend/instance-group links
|
||||
return gce.ensureInternalBackendServiceGroups(backendServiceName, igLinks)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, svc *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
_, protocol := getPortsAndProtocol(svc.Spec.Ports)
|
||||
scheme := schemeInternal
|
||||
sharedBackend := shareBackendService(svc)
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(svc)
|
||||
|
||||
gce.sharedResourceLock.Lock()
|
||||
defer gce.sharedResourceLock.Unlock()
|
||||
|
||||
glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): attempting delete of region internal address", loadBalancerName)
|
||||
ensureAddressDeleted(gce, loadBalancerName, gce.region)
|
||||
|
||||
glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName)
|
||||
if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity)
|
||||
glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region backend service %v", loadBalancerName, backendServiceName)
|
||||
if err := gce.teardownInternalBackendService(backendServiceName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName)
|
||||
if err := ignoreNotFound(gce.DeleteFirewall(loadBalancerName)); err != nil {
|
||||
if isForbidden(err) && gce.OnXPN() {
|
||||
glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName)
|
||||
gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, gce.NetworkProjectID()))
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
hcName := makeHealthCheckName(loadBalancerName, clusterID, sharedHealthCheck)
|
||||
glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting health check %v and its firewall", loadBalancerName, hcName)
|
||||
if err := gce.teardownInternalHealthCheckAndFirewall(svc, hcName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Try deleting instance groups - expect ResourceInuse error if needed by other LBs
|
||||
igName := makeInstanceGroupName(clusterID)
|
||||
if err := gce.ensureInternalInstanceGroupsDeleted(igName); err != nil && !isInUsedByError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) teardownInternalBackendService(bsName string) error {
|
||||
if err := gce.DeleteRegionBackendService(bsName, gce.region); err != nil {
|
||||
if isNotFound(err) {
|
||||
glog.V(2).Infof("teardownInternalBackendService(%v): backend service already deleted. err: %v", bsName, err)
|
||||
return nil
|
||||
} else if isInUsedByError(err) {
|
||||
glog.V(2).Infof("teardownInternalBackendService(%v): backend service in use.", bsName)
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("failed to delete backend service: %v, err: %v", bsName, err)
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("teardownInternalBackendService(%v): backend service deleted", bsName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcName string) error {
|
||||
if err := gce.DeleteHealthCheck(hcName); err != nil {
|
||||
if isNotFound(err) {
|
||||
glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check does not exist.", hcName)
|
||||
// Purposely do not early return - double check the firewall does not exist
|
||||
} else if isInUsedByError(err) {
|
||||
glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check in use.", hcName)
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("failed to delete health check: %v, err: %v", hcName, err)
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName)
|
||||
|
||||
hcFirewallName := makeHealthCheckFirewallNameFromHC(hcName)
|
||||
if err := ignoreNotFound(gce.DeleteFirewall(hcFirewallName)); err != nil {
|
||||
if isForbidden(err) && gce.OnXPN() {
|
||||
glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName)
|
||||
gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(hcFirewallName, gce.NetworkProjectID()))
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to delete health check firewall: %v, err: %v", hcFirewallName, err)
|
||||
}
|
||||
glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check firewall deleted", hcFirewallName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, ports []string, protocol v1.Protocol, nodes []*v1.Node) error {
|
||||
glog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName)
|
||||
targetTags, err := gce.GetNodeTags(nodeNames(nodes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingFirewall, err := gce.GetFirewall(fwName)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
expectedFirewall := &compute.Firewall{
|
||||
Name: fwName,
|
||||
Description: fwDesc,
|
||||
Network: gce.networkURL,
|
||||
SourceRanges: sourceRanges,
|
||||
TargetTags: targetTags,
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: strings.ToLower(string(protocol)),
|
||||
Ports: ports,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if existingFirewall == nil {
|
||||
glog.V(2).Infof("ensureInternalFirewall(%v): creating firewall", fwName)
|
||||
err = gce.CreateFirewall(expectedFirewall)
|
||||
if err != nil && isForbidden(err) && gce.OnXPN() {
|
||||
glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", fwName)
|
||||
gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(expectedFirewall, gce.NetworkProjectID()))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if firewallRuleEqual(expectedFirewall, existingFirewall) {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensureInternalFirewall(%v): updating firewall", fwName)
|
||||
err = gce.UpdateFirewall(expectedFirewall)
|
||||
if err != nil && isForbidden(err) && gce.OnXPN() {
|
||||
glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", fwName)
|
||||
gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(expectedFirewall, gce.NetworkProjectID()))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalFirewalls(loadBalancerName, ipAddress, clusterID string, nm types.NamespacedName, svc *v1.Service, healthCheckPort string, sharedHealthCheck bool, nodes []*v1.Node) error {
|
||||
// First firewall is for ingress traffic
|
||||
fwDesc := makeFirewallDescription(nm.String(), ipAddress)
|
||||
ports, protocol := getPortsAndProtocol(svc.Spec.Ports)
|
||||
sourceRanges, err := v1_service.GetLoadBalancerSourceRanges(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = gce.ensureInternalFirewall(svc, loadBalancerName, fwDesc, sourceRanges.StringSlice(), ports, protocol, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Second firewall is for health checking nodes / services
|
||||
fwHCName := makeHealthCheckFirewallName(loadBalancerName, clusterID, sharedHealthCheck)
|
||||
hcSrcRanges := LoadBalancerSrcRanges()
|
||||
return gce.ensureInternalFirewall(svc, fwHCName, "", hcSrcRanges, []string{healthCheckPort}, v1.ProtocolTCP, nodes)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) (*compute.HealthCheck, error) {
|
||||
glog.V(2).Infof("ensureInternalHealthCheck(%v, %v, %v): checking existing health check", name, path, port)
|
||||
expectedHC := newInternalLBHealthCheck(name, svcName, shared, path, port)
|
||||
|
||||
hc, err := gce.GetHealthCheck(name)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hc == nil {
|
||||
glog.V(2).Infof("ensureInternalHealthCheck: did not find health check %v, creating one with port %v path %v", name, port, path)
|
||||
if err = gce.CreateHealthCheck(expectedHC); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hc, err = gce.GetHealthCheck(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get http health check %v", err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("ensureInternalHealthCheck: created health check %v", name)
|
||||
return hc, nil
|
||||
}
|
||||
|
||||
if healthChecksEqual(expectedHC, hc) {
|
||||
return hc, nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name)
|
||||
if err := gce.UpdateHealthCheck(expectedHC); err != nil {
|
||||
glog.Warningf("Failed to reconcile http health check %v parameters", name)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("ensureInternalHealthCheck: corrected health check %v parameters successful", name)
|
||||
return hc, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) (string, error) {
|
||||
glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): checking group that it contains %v nodes", name, zone, len(nodes))
|
||||
ig, err := gce.GetInstanceGroup(name, zone)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
kubeNodes := sets.NewString()
|
||||
for _, n := range nodes {
|
||||
kubeNodes.Insert(n.Name)
|
||||
}
|
||||
|
||||
gceNodes := sets.NewString()
|
||||
if ig == nil {
|
||||
glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone)
|
||||
newIG := &compute.InstanceGroup{Name: name}
|
||||
if err = gce.CreateInstanceGroup(newIG, zone); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ig, err = gce.GetInstanceGroup(name, zone)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
instances, err := gce.ListInstancesInInstanceGroup(name, zone, allInstances)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, ins := range instances.Items {
|
||||
parts := strings.Split(ins.Instance, "/")
|
||||
gceNodes.Insert(parts[len(parts)-1])
|
||||
}
|
||||
}
|
||||
|
||||
removeNodes := gceNodes.Difference(kubeNodes).List()
|
||||
addNodes := kubeNodes.Difference(gceNodes).List()
|
||||
|
||||
if len(removeNodes) != 0 {
|
||||
glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): removing nodes: %v", name, zone, removeNodes)
|
||||
instanceRefs := gce.ToInstanceReferences(zone, removeNodes)
|
||||
// Possible we'll receive 404's here if the instance was deleted before getting to this point.
|
||||
if err = gce.RemoveInstancesFromInstanceGroup(name, zone, instanceRefs); err != nil && !isNotFound(err) {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if len(addNodes) != 0 {
|
||||
glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): adding nodes: %v", name, zone, addNodes)
|
||||
instanceRefs := gce.ToInstanceReferences(zone, addNodes)
|
||||
if err = gce.AddInstancesToInstanceGroup(name, zone, instanceRefs); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return ig.SelfLink, nil
|
||||
}
|
||||
|
||||
// ensureInternalInstanceGroups generates an unmanaged instance group for every zone
|
||||
// where a K8s node exists. It also ensures that each node belongs to an instance group
|
||||
func (gce *GCECloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) ([]string, error) {
|
||||
zonedNodes := splitNodesByZone(nodes)
|
||||
glog.V(2).Infof("ensureInternalInstanceGroups(%v): %d nodes over %d zones in region %v", name, len(nodes), len(zonedNodes), gce.region)
|
||||
var igLinks []string
|
||||
for zone, nodes := range zonedNodes {
|
||||
igLink, err := gce.ensureInternalInstanceGroup(name, zone, nodes)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
igLinks = append(igLinks, igLink)
|
||||
}
|
||||
|
||||
return igLinks, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalInstanceGroupsDeleted(name string) error {
|
||||
// List of nodes isn't available here - fetch all zones in region and try deleting this cluster's ig
|
||||
zones, err := gce.ListZonesInRegion(gce.region)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensureInternalInstanceGroupsDeleted(%v): attempting delete instance group in all %d zones", name, len(zones))
|
||||
for _, z := range zones {
|
||||
if err := gce.DeleteInstanceGroup(name, z.Name); err != nil && !isNotFoundOrInUse(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalBackendService(name, description string, affinityType v1.ServiceAffinity, scheme lbScheme, protocol v1.Protocol, igLinks []string, hcLink string) error {
|
||||
glog.V(2).Infof("ensureInternalBackendService(%v, %v, %v): checking existing backend service with %d groups", name, scheme, protocol, len(igLinks))
|
||||
bs, err := gce.GetRegionBackendService(name, gce.region)
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
backends := backendsFromGroupLinks(igLinks)
|
||||
expectedBS := &compute.BackendService{
|
||||
Name: name,
|
||||
Protocol: string(protocol),
|
||||
Description: description,
|
||||
HealthChecks: []string{hcLink},
|
||||
Backends: backends,
|
||||
SessionAffinity: translateAffinityType(affinityType),
|
||||
LoadBalancingScheme: string(scheme),
|
||||
}
|
||||
|
||||
// Create backend service if none was found
|
||||
if bs == nil {
|
||||
glog.V(2).Infof("ensureInternalBackendService: creating backend service %v", name)
|
||||
err := gce.CreateRegionBackendService(expectedBS, gce.region)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name)
|
||||
return nil
|
||||
}
|
||||
// Check existing backend service
|
||||
existingIGLinks := sets.NewString()
|
||||
for _, be := range bs.Backends {
|
||||
existingIGLinks.Insert(be.Group)
|
||||
}
|
||||
|
||||
if backendSvcEqual(expectedBS, bs) {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensureInternalBackendService: updating backend service %v", name)
|
||||
// Set fingerprint for optimistic locking
|
||||
expectedBS.Fingerprint = bs.Fingerprint
|
||||
if err := gce.UpdateRegionBackendService(expectedBS, gce.region); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("ensureInternalBackendService: updated backend service %v successfully", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureInternalBackendServiceGroups updates backend services if their list of backend instance groups is incorrect.
|
||||
func (gce *GCECloud) ensureInternalBackendServiceGroups(name string, igLinks []string) error {
|
||||
glog.V(2).Infof("ensureInternalBackendServiceGroups(%v): checking existing backend service's groups", name)
|
||||
bs, err := gce.GetRegionBackendService(name, gce.region)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backends := backendsFromGroupLinks(igLinks)
|
||||
if backendsListEqual(bs.Backends, backends) {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name)
|
||||
if err := gce.UpdateRegionBackendService(bs, gce.region); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("ensureInternalBackendServiceGroups: updated backend service %v successfully", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func shareBackendService(svc *v1.Service) bool {
|
||||
return GetLoadBalancerAnnotationBackendShare(svc) && !v1_service.RequestsOnlyLocalTraffic(svc)
|
||||
}
|
||||
|
||||
func backendsFromGroupLinks(igLinks []string) []*compute.Backend {
|
||||
var backends []*compute.Backend
|
||||
for _, igLink := range igLinks {
|
||||
backends = append(backends, &compute.Backend{
|
||||
Group: igLink,
|
||||
})
|
||||
}
|
||||
return backends
|
||||
}
|
||||
|
||||
func newInternalLBHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) *compute.HealthCheck {
|
||||
httpSettings := compute.HTTPHealthCheck{
|
||||
Port: int64(port),
|
||||
RequestPath: path,
|
||||
}
|
||||
desc := ""
|
||||
if !shared {
|
||||
desc = makeHealthCheckDescription(svcName.String())
|
||||
}
|
||||
return &compute.HealthCheck{
|
||||
Name: name,
|
||||
CheckIntervalSec: gceHcCheckIntervalSeconds,
|
||||
TimeoutSec: gceHcTimeoutSeconds,
|
||||
HealthyThreshold: gceHcHealthyThreshold,
|
||||
UnhealthyThreshold: gceHcUnhealthyThreshold,
|
||||
HttpHealthCheck: &httpSettings,
|
||||
Type: "HTTP",
|
||||
Description: desc,
|
||||
}
|
||||
}
|
||||
|
||||
func firewallRuleEqual(a, b *compute.Firewall) bool {
|
||||
return a.Description == b.Description &&
|
||||
len(a.Allowed) == 1 && len(a.Allowed) == len(b.Allowed) &&
|
||||
a.Allowed[0].IPProtocol == b.Allowed[0].IPProtocol &&
|
||||
equalStringSets(a.Allowed[0].Ports, b.Allowed[0].Ports) &&
|
||||
equalStringSets(a.SourceRanges, b.SourceRanges) &&
|
||||
equalStringSets(a.TargetTags, b.TargetTags)
|
||||
}
|
||||
|
||||
func healthChecksEqual(a, b *compute.HealthCheck) bool {
|
||||
return a.HttpHealthCheck != nil && b.HttpHealthCheck != nil &&
|
||||
a.HttpHealthCheck.Port == b.HttpHealthCheck.Port &&
|
||||
a.HttpHealthCheck.RequestPath == b.HttpHealthCheck.RequestPath &&
|
||||
a.Description == b.Description &&
|
||||
a.CheckIntervalSec == b.CheckIntervalSec &&
|
||||
a.TimeoutSec == b.TimeoutSec &&
|
||||
a.UnhealthyThreshold == b.UnhealthyThreshold &&
|
||||
a.HealthyThreshold == b.HealthyThreshold
|
||||
}
|
||||
|
||||
// backendsListEqual asserts that backend lists are equal by instance group link only
|
||||
func backendsListEqual(a, b []*compute.Backend) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if len(a) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
aSet := sets.NewString()
|
||||
for _, v := range a {
|
||||
aSet.Insert(v.Group)
|
||||
}
|
||||
bSet := sets.NewString()
|
||||
for _, v := range b {
|
||||
bSet.Insert(v.Group)
|
||||
}
|
||||
|
||||
return aSet.Equal(bSet)
|
||||
}
|
||||
|
||||
func backendSvcEqual(a, b *compute.BackendService) bool {
|
||||
return a.Protocol == b.Protocol &&
|
||||
a.Description == b.Description &&
|
||||
a.SessionAffinity == b.SessionAffinity &&
|
||||
a.LoadBalancingScheme == b.LoadBalancingScheme &&
|
||||
equalStringSets(a.HealthChecks, b.HealthChecks) &&
|
||||
backendsListEqual(a.Backends, b.Backends)
|
||||
}
|
||||
|
||||
func fwdRuleEqual(a, b *compute.ForwardingRule) bool {
|
||||
return (a.IPAddress == "" || b.IPAddress == "" || a.IPAddress == b.IPAddress) &&
|
||||
a.IPProtocol == b.IPProtocol &&
|
||||
a.LoadBalancingScheme == b.LoadBalancingScheme &&
|
||||
equalStringSets(a.Ports, b.Ports) &&
|
||||
a.BackendService == b.BackendService
|
||||
}
|
||||
|
||||
func getPortsAndProtocol(svcPorts []v1.ServicePort) (ports []string, protocol v1.Protocol) {
|
||||
if len(svcPorts) == 0 {
|
||||
return []string{}, v1.ProtocolUDP
|
||||
}
|
||||
|
||||
// GCP doesn't support multiple protocols for a single load balancer
|
||||
protocol = svcPorts[0].Protocol
|
||||
for _, p := range svcPorts {
|
||||
ports = append(ports, strconv.Itoa(int(p.Port)))
|
||||
}
|
||||
return ports, protocol
|
||||
}
|
||||
|
||||
func (gce *GCECloud) getBackendServiceLink(name string) string {
|
||||
return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", gce.region, "backendServices", name}, "/")
|
||||
}
|
||||
|
||||
func getNameFromLink(link string) string {
|
||||
if link == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
fields := strings.Split(link, "/")
|
||||
return fields[len(fields)-1]
|
||||
}
|
||||
|
||||
func determineRequestedIP(svc *v1.Service, fwdRule *compute.ForwardingRule) string {
|
||||
if svc.Spec.LoadBalancerIP != "" {
|
||||
return svc.Spec.LoadBalancerIP
|
||||
}
|
||||
|
||||
if fwdRule != nil {
|
||||
return fwdRule.IPAddress
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
120
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go
generated
vendored
Normal file
120
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// Internal Load Balancer
|
||||
|
||||
// Instance groups remain legacy named to stay consistent with ingress
|
||||
func makeInstanceGroupName(clusterID string) string {
|
||||
return fmt.Sprintf("k8s-ig--%s", clusterID)
|
||||
}
|
||||
|
||||
func makeBackendServiceName(loadBalancerName, clusterID string, shared bool, scheme lbScheme, protocol v1.Protocol, svcAffinity v1.ServiceAffinity) string {
|
||||
if shared {
|
||||
hash := sha1.New()
|
||||
|
||||
// For every non-nil option, hash its value. Currently, only service affinity is relevant.
|
||||
hash.Write([]byte(string(svcAffinity)))
|
||||
|
||||
hashed := hex.EncodeToString(hash.Sum(nil))
|
||||
hashed = hashed[:16]
|
||||
|
||||
// k8s- 4
|
||||
// {clusterid}- 17
|
||||
// {scheme}- 9 (internal/external)
|
||||
// {protocol}- 4 (tcp/udp)
|
||||
// nmv1- 5 (naming convention version)
|
||||
// {suffix} 16 (hash of settings)
|
||||
// -----------------
|
||||
// 55 characters used
|
||||
return fmt.Sprintf("k8s-%s-%s-%s-nmv1-%s", clusterID, strings.ToLower(string(scheme)), strings.ToLower(string(protocol)), hashed)
|
||||
}
|
||||
return loadBalancerName
|
||||
}
|
||||
|
||||
func makeHealthCheckName(loadBalancerName, clusterID string, shared bool) string {
|
||||
if shared {
|
||||
return fmt.Sprintf("k8s-%s-node", clusterID)
|
||||
}
|
||||
|
||||
return loadBalancerName
|
||||
}
|
||||
|
||||
func makeHealthCheckFirewallNameFromHC(healthCheckName string) string {
|
||||
return healthCheckName + "-hc"
|
||||
}
|
||||
|
||||
func makeHealthCheckFirewallName(loadBalancerName, clusterID string, shared bool) string {
|
||||
if shared {
|
||||
return fmt.Sprintf("k8s-%s-node-hc", clusterID)
|
||||
}
|
||||
return loadBalancerName + "-hc"
|
||||
}
|
||||
|
||||
func makeBackendServiceDescription(nm types.NamespacedName, shared bool) string {
|
||||
if shared {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, nm.String())
|
||||
}
|
||||
|
||||
// External Load Balancer
|
||||
|
||||
// makeServiceDescription is used to generate descriptions for forwarding rules and addresses.
|
||||
func makeServiceDescription(serviceName string) string {
|
||||
return fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName)
|
||||
}
|
||||
|
||||
// MakeNodesHealthCheckName returns name of the health check resource used by
|
||||
// the GCE load balancers (l4) for performing health checks on nodes.
|
||||
func MakeNodesHealthCheckName(clusterID string) string {
|
||||
return fmt.Sprintf("k8s-%v-node", clusterID)
|
||||
}
|
||||
|
||||
func makeHealthCheckDescription(serviceName string) string {
|
||||
return fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName)
|
||||
}
|
||||
|
||||
// MakeHealthCheckFirewallName returns the firewall name used by the GCE load
|
||||
// balancers (l4) for performing health checks.
|
||||
func MakeHealthCheckFirewallName(clusterID, hcName string, isNodesHealthCheck bool) string {
|
||||
if isNodesHealthCheck {
|
||||
return MakeNodesHealthCheckName(clusterID) + "-http-hc"
|
||||
}
|
||||
return "k8s-" + hcName + "-http-hc"
|
||||
}
|
||||
|
||||
// MakeFirewallName returns the firewall name used by the GCE load
|
||||
// balancers (l4) for serving traffic.
|
||||
func MakeFirewallName(name string) string {
|
||||
return fmt.Sprintf("k8s-fw-%s", name)
|
||||
}
|
||||
|
||||
func makeFirewallDescription(serviceName, ipAddress string) string {
|
||||
return fmt.Sprintf(`{"kubernetes.io/service-name":"%s", "kubernetes.io/service-ip":"%s"}`,
|
||||
serviceName, ipAddress)
|
||||
}
|
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go
generated
vendored
Normal file
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
NEGLoadBalancerType = "LOAD_BALANCING"
|
||||
NEGIPPortNetworkEndpointType = "GCE_VM_IP_PORT"
|
||||
)
|
||||
|
||||
func newNetworkEndpointGroupMetricContext(request string, zone string) *metricContext {
|
||||
return newGenericMetricContext("networkendpointgroup_", request, unusedMetricLabel, zone, computeAlphaVersion)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) GetNetworkEndpointGroup(name string, zone string) (*computealpha.NetworkEndpointGroup, error) {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("get", zone)
|
||||
v, err := gce.serviceAlpha.NetworkEndpointGroups.Get(gce.ProjectID(), zone, name).Do()
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListNetworkEndpointGroup(zone string) ([]*computealpha.NetworkEndpointGroup, error) {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("list", zone)
|
||||
networkEndpointGroups := []*computealpha.NetworkEndpointGroup{}
|
||||
err := gce.serviceAlpha.NetworkEndpointGroups.List(gce.ProjectID(), zone).Pages(context.Background(), func(res *computealpha.NetworkEndpointGroupList) error {
|
||||
networkEndpointGroups = append(networkEndpointGroups, res.Items...)
|
||||
return nil
|
||||
})
|
||||
return networkEndpointGroups, mc.Observe(err)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*computealpha.NetworkEndpointGroup, error) {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("aggregated_list", "")
|
||||
zoneNetworkEndpointGroupMap := map[string][]*computealpha.NetworkEndpointGroup{}
|
||||
err := gce.serviceAlpha.NetworkEndpointGroups.AggregatedList(gce.ProjectID()).Pages(context.Background(), func(res *computealpha.NetworkEndpointGroupAggregatedList) error {
|
||||
for key, negs := range res.Items {
|
||||
if len(negs.NetworkEndpointGroups) == 0 {
|
||||
continue
|
||||
}
|
||||
// key has the format of "zones/${zone_name}"
|
||||
zone := strings.Split(key, "/")[1]
|
||||
if _, ok := zoneNetworkEndpointGroupMap[zone]; !ok {
|
||||
zoneNetworkEndpointGroupMap[zone] = []*computealpha.NetworkEndpointGroup{}
|
||||
}
|
||||
zoneNetworkEndpointGroupMap[zone] = append(zoneNetworkEndpointGroupMap[zone], negs.NetworkEndpointGroups...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return zoneNetworkEndpointGroupMap, mc.Observe(err)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) CreateNetworkEndpointGroup(neg *computealpha.NetworkEndpointGroup, zone string) error {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("create", zone)
|
||||
op, err := gce.serviceAlpha.NetworkEndpointGroups.Insert(gce.ProjectID(), zone, neg).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DeleteNetworkEndpointGroup(name string, zone string) error {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("delete", zone)
|
||||
op, err := gce.serviceAlpha.NetworkEndpointGroups.Delete(gce.ProjectID(), zone, name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("attach", zone)
|
||||
op, err := gce.serviceAlpha.NetworkEndpointGroups.AttachNetworkEndpoints(gce.ProjectID(), zone, name, &computealpha.NetworkEndpointGroupsAttachEndpointsRequest{
|
||||
NetworkEndpoints: endpoints,
|
||||
}).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("detach", zone)
|
||||
op, err := gce.serviceAlpha.NetworkEndpointGroups.DetachNetworkEndpoints(gce.ProjectID(), zone, name, &computealpha.NetworkEndpointGroupsDetachEndpointsRequest{
|
||||
NetworkEndpoints: endpoints,
|
||||
}).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computealpha.NetworkEndpointWithHealthStatus, error) {
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
healthStatus := "SKIP"
|
||||
if showHealthStatus {
|
||||
healthStatus = "SHOW"
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("list_networkendpoints", zone)
|
||||
networkEndpoints := []*computealpha.NetworkEndpointWithHealthStatus{}
|
||||
err := gce.serviceAlpha.NetworkEndpointGroups.ListNetworkEndpoints(gce.ProjectID(), zone, name, &computealpha.NetworkEndpointGroupsListEndpointsRequest{
|
||||
HealthStatus: healthStatus,
|
||||
}).Pages(context.Background(), func(res *computealpha.NetworkEndpointGroupsListNetworkEndpoints) error {
|
||||
networkEndpoints = append(networkEndpoints, res.Items...)
|
||||
return nil
|
||||
})
|
||||
return networkEndpoints, mc.Observe(err)
|
||||
}
|
180
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_op.go
generated
vendored
Normal file
180
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_op.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
computev1 "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operationName string) (*computev1.Operation, error), mc *metricContext) error {
|
||||
if op == nil {
|
||||
return mc.Observe(fmt.Errorf("operation must not be nil"))
|
||||
}
|
||||
|
||||
if opIsDone(op) {
|
||||
return getErrorFromOp(op)
|
||||
}
|
||||
|
||||
opStart := time.Now()
|
||||
opName := op.Name
|
||||
|
||||
return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) {
|
||||
start := time.Now()
|
||||
gce.operationPollRateLimiter.Accept()
|
||||
duration := time.Now().Sub(start)
|
||||
if duration > 5*time.Second {
|
||||
glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName)
|
||||
}
|
||||
pollOp, err := getOperation(opName)
|
||||
if err != nil {
|
||||
glog.Warningf("GCE poll operation %s failed: pollOp: [%v] err: [%v] getErrorFromOp: [%v]",
|
||||
opName, pollOp, err, getErrorFromOp(pollOp))
|
||||
}
|
||||
|
||||
done := opIsDone(pollOp)
|
||||
if done {
|
||||
duration := time.Now().Sub(opStart)
|
||||
if duration > 1*time.Minute {
|
||||
// Log the JSON. It's cleaner than the %v structure.
|
||||
enc, err := pollOp.MarshalJSON()
|
||||
if err != nil {
|
||||
glog.Warningf("waitForOperation: long operation (%v): %v (failed to encode to JSON: %v)",
|
||||
duration, pollOp, err)
|
||||
} else {
|
||||
glog.V(2).Infof("waitForOperation: long operation (%v): %v",
|
||||
duration, string(enc))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return done, mc.Observe(getErrorFromOp(pollOp))
|
||||
})
|
||||
}
|
||||
|
||||
func opIsDone(op *computev1.Operation) bool {
|
||||
return op != nil && op.Status == "DONE"
|
||||
}
|
||||
|
||||
func getErrorFromOp(op *computev1.Operation) error {
|
||||
if op != nil && op.Error != nil && len(op.Error.Errors) > 0 {
|
||||
err := &googleapi.Error{
|
||||
Code: int(op.HttpErrorStatusCode),
|
||||
Message: op.Error.Errors[0].Message,
|
||||
}
|
||||
glog.Errorf("GCE operation failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForGlobalOp(op gceObject, mc *metricContext) error {
|
||||
return gce.waitForGlobalOpInProject(op, gce.ProjectID(), mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForRegionOp(op gceObject, region string, mc *metricContext) error {
|
||||
return gce.waitForRegionOpInProject(op, gce.ProjectID(), region, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForZoneOp(op gceObject, zone string, mc *metricContext) error {
|
||||
return gce.waitForZoneOpInProject(op, gce.ProjectID(), zone, mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForGlobalOpInProject(op gceObject, projectID string, mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceAlpha.GlobalOperations.Get(projectID, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computebeta.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceBeta.GlobalOperations.Get(projectID, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computev1.Operation:
|
||||
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
|
||||
return gce.service.GlobalOperations.Get(projectID, operationName).Do()
|
||||
}, mc)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForRegionOpInProject(op gceObject, projectID, region string, mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceAlpha.RegionOperations.Get(projectID, region, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computebeta.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceBeta.RegionOperations.Get(projectID, region, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computev1.Operation:
|
||||
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
|
||||
return gce.service.RegionOperations.Get(projectID, region, operationName).Do()
|
||||
}, mc)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (gce *GCECloud) waitForZoneOpInProject(op gceObject, projectID, zone string, mc *metricContext) error {
|
||||
switch v := op.(type) {
|
||||
case *computealpha.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceAlpha.ZoneOperations.Get(projectID, zone, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computebeta.Operation:
|
||||
return gce.waitForOp(convertToV1Operation(op), func(operationName string) (*computev1.Operation, error) {
|
||||
op, err := gce.serviceBeta.ZoneOperations.Get(projectID, zone, operationName).Do()
|
||||
return convertToV1Operation(op), err
|
||||
}, mc)
|
||||
case *computev1.Operation:
|
||||
return gce.waitForOp(op.(*computev1.Operation), func(operationName string) (*computev1.Operation, error) {
|
||||
return gce.service.ZoneOperations.Get(projectID, zone, operationName).Do()
|
||||
}, mc)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func convertToV1Operation(object gceObject) *computev1.Operation {
|
||||
enc, err := object.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to encode to json: %v", err))
|
||||
}
|
||||
var op computev1.Operation
|
||||
if err := json.Unmarshal(enc, &op); err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert GCE apiObject %v to v1 operation: %v", object, err))
|
||||
}
|
||||
return &op
|
||||
}
|
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go
generated
vendored
Normal file
116
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/golang/glog"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func newRoutesMetricContext(request string) *metricContext {
|
||||
return newGenericMetricContext("routes", request, unusedMetricLabel, unusedMetricLabel, computeV1Version)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
var routes []*cloudprovider.Route
|
||||
pageToken := ""
|
||||
page := 0
|
||||
for ; page == 0 || (pageToken != "" && page < maxPages); page++ {
|
||||
mc := newRoutesMetricContext("list_page")
|
||||
listCall := gce.service.Routes.List(gce.NetworkProjectID())
|
||||
|
||||
prefix := truncateClusterName(clusterName)
|
||||
// Filter for routes starting with clustername AND belonging to the
|
||||
// relevant gcp network AND having description = "k8s-node-route".
|
||||
filter := "(name eq " + prefix + "-.*) "
|
||||
filter = filter + "(network eq " + gce.NetworkURL() + ") "
|
||||
filter = filter + "(description eq " + k8sNodeRouteTag + ")"
|
||||
listCall = listCall.Filter(filter)
|
||||
if pageToken != "" {
|
||||
listCall = listCall.PageToken(pageToken)
|
||||
}
|
||||
res, err := listCall.Do()
|
||||
mc.Observe(err)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting routes from GCE: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
pageToken = res.NextPageToken
|
||||
for _, r := range res.Items {
|
||||
target := path.Base(r.NextHopInstance)
|
||||
// TODO: Should we lastComponent(target) this?
|
||||
targetNodeName := types.NodeName(target) // NodeName == Instance Name on GCE
|
||||
routes = append(routes, &cloudprovider.Route{Name: r.Name, TargetNode: targetNodeName, DestinationCIDR: r.DestRange})
|
||||
}
|
||||
}
|
||||
if page >= maxPages {
|
||||
glog.Errorf("ListRoutes exceeded maxPages=%d for Routes.List; truncating.", maxPages)
|
||||
}
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
routeName := truncateClusterName(clusterName) + "-" + nameHint
|
||||
|
||||
instanceName := mapNodeNameToInstanceName(route.TargetNode)
|
||||
targetInstance, err := gce.getInstanceByName(instanceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mc := newRoutesMetricContext("create")
|
||||
insertOp, err := gce.service.Routes.Insert(gce.NetworkProjectID(), &compute.Route{
|
||||
Name: routeName,
|
||||
DestRange: route.DestinationCIDR,
|
||||
NextHopInstance: fmt.Sprintf("zones/%s/instances/%s", targetInstance.Zone, targetInstance.Name),
|
||||
Network: gce.NetworkURL(),
|
||||
Priority: 1000,
|
||||
Description: k8sNodeRouteTag,
|
||||
}).Do()
|
||||
if err != nil {
|
||||
if isHTTPErrorCode(err, http.StatusConflict) {
|
||||
glog.Infof("Route %v already exists.", routeName)
|
||||
return nil
|
||||
} else {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
}
|
||||
return gce.waitForGlobalOpInProject(insertOp, gce.NetworkProjectID(), mc)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
mc := newRoutesMetricContext("delete")
|
||||
deleteOp, err := gce.service.Routes.Delete(gce.NetworkProjectID(), route.Name).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForGlobalOpInProject(deleteOp, gce.NetworkProjectID(), mc)
|
||||
}
|
||||
|
||||
func truncateClusterName(clusterName string) string {
|
||||
if len(clusterName) > 26 {
|
||||
return clusterName[:26]
|
||||
}
|
||||
return clusterName
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user