mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor updates
This commit is contained in:
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD
generated
vendored
@ -24,8 +24,10 @@ go_library(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library",
|
||||
@ -72,14 +74,14 @@ go_test(
|
||||
"openstack_routes_test.go",
|
||||
"openstack_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS
generated
vendored
@ -2,9 +2,9 @@ approvers:
|
||||
- anguslees
|
||||
- NickrenREN
|
||||
- dims
|
||||
- FengyunPan
|
||||
- FengyunPan2
|
||||
reviewers:
|
||||
- anguslees
|
||||
- NickrenREN
|
||||
- dims
|
||||
- FengyunPan
|
||||
- FengyunPan2
|
||||
|
35
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go
generated
vendored
35
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go
generated
vendored
@ -33,19 +33,19 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// metadataUrlTemplate allows building an OpenStack Metadata service URL.
|
||||
// metadataURLTemplate allows building an OpenStack Metadata service URL.
|
||||
// It's a hardcoded IPv4 link-local address as documented in "OpenStack Cloud
|
||||
// Administrator Guide", chapter Compute - Networking with nova-network.
|
||||
// https://docs.openstack.org/admin-guide/compute-networking-nova.html#metadata-service
|
||||
//https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service
|
||||
defaultMetadataVersion = "2012-08-10"
|
||||
metadataUrlTemplate = "http://169.254.169.254/openstack/%s/meta_data.json"
|
||||
metadataURLTemplate = "http://169.254.169.254/openstack/%s/meta_data.json"
|
||||
|
||||
// metadataID is used as an identifier on the metadata search order configuration.
|
||||
metadataID = "metadataService"
|
||||
|
||||
// Config drive is defined as an iso9660 or vfat (deprecated) drive
|
||||
// with the "config-2" label.
|
||||
// http://docs.openstack.org/user-guide/cli-config-drive.html
|
||||
//https://docs.openstack.org/nova/latest/user/config-drive.html
|
||||
configDriveLabel = "config-2"
|
||||
configDrivePathTemplate = "openstack/%s/meta_data.json"
|
||||
|
||||
@ -53,10 +53,10 @@ const (
|
||||
configDriveID = "configDrive"
|
||||
)
|
||||
|
||||
// ErrBadMetadata is used to indicate a problem parsing data from metadata server
|
||||
var ErrBadMetadata = errors.New("invalid OpenStack metadata, got empty uuid")
|
||||
|
||||
// There are multiple device types. To keep it simple, we're using a single structure
|
||||
// for all device metadata types.
|
||||
// DeviceMetadata is a single/simplified data structure for all kinds of device metadata types.
|
||||
type DeviceMetadata struct {
|
||||
Type string `json:"type"`
|
||||
Bus string `json:"bus,omitempty"`
|
||||
@ -65,11 +65,12 @@ type DeviceMetadata struct {
|
||||
// .. and other fields.
|
||||
}
|
||||
|
||||
// Assumes the "2012-08-10" meta_data.json format.
|
||||
// Metadata has the information fetched from OpenStack metadata service or
|
||||
// config drives. Assumes the "2012-08-10" meta_data.json format.
|
||||
// See http://docs.openstack.org/user-guide/cli_config_drive.html
|
||||
type Metadata struct {
|
||||
Uuid string `json:"uuid"`
|
||||
Name string `json:"name"`
|
||||
UUID string `json:"uuid"`
|
||||
Hostname string `json:"hostname"`
|
||||
AvailabilityZone string `json:"availability_zone"`
|
||||
Devices []DeviceMetadata `json:"devices,omitempty"`
|
||||
// .. and other fields we don't care about. Expand as necessary.
|
||||
@ -84,15 +85,15 @@ func parseMetadata(r io.Reader) (*Metadata, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if metadata.Uuid == "" {
|
||||
if metadata.UUID == "" {
|
||||
return nil, ErrBadMetadata
|
||||
}
|
||||
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
func getMetadataUrl(metadataVersion string) string {
|
||||
return fmt.Sprintf(metadataUrlTemplate, metadataVersion)
|
||||
func getMetadataURL(metadataVersion string) string {
|
||||
return fmt.Sprintf(metadataURLTemplate, metadataVersion)
|
||||
}
|
||||
|
||||
func getConfigDrivePath(metadataVersion string) string {
|
||||
@ -147,16 +148,16 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) {
|
||||
|
||||
func getMetadataFromMetadataService(metadataVersion string) (*Metadata, error) {
|
||||
// Try to get JSON from metadata server.
|
||||
metadataUrl := getMetadataUrl(metadataVersion)
|
||||
glog.V(4).Infof("Attempting to fetch metadata from %s", metadataUrl)
|
||||
resp, err := http.Get(metadataUrl)
|
||||
metadataURL := getMetadataURL(metadataVersion)
|
||||
glog.V(4).Infof("Attempting to fetch metadata from %s", metadataURL)
|
||||
resp, err := http.Get(metadataURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching %s: %v", metadataUrl, err)
|
||||
return nil, fmt.Errorf("error fetching %s: %v", metadataURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("unexpected status code when reading metadata from %s: %s", metadataUrl, resp.Status)
|
||||
err = fmt.Errorf("unexpected status code when reading metadata from %s: %s", metadataURL, resp.Status)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata_test.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata_test.go
generated
vendored
@ -22,8 +22,8 @@ import (
|
||||
)
|
||||
|
||||
var FakeMetadata = Metadata{
|
||||
Uuid: "83679162-1378-4288-a2d4-70e13ec132aa",
|
||||
Name: "test",
|
||||
UUID: "83679162-1378-4288-a2d4-70e13ec132aa",
|
||||
Hostname: "test",
|
||||
AvailabilityZone: "nova",
|
||||
}
|
||||
|
||||
@ -81,12 +81,12 @@ func TestParseMetadata(t *testing.T) {
|
||||
t.Fatalf("Should succeed when provided with valid data: %s", err)
|
||||
}
|
||||
|
||||
if md.Name != "test" {
|
||||
t.Errorf("incorrect name: %s", md.Name)
|
||||
if md.Hostname != "test.novalocal" {
|
||||
t.Errorf("incorrect hostname: %s", md.Hostname)
|
||||
}
|
||||
|
||||
if md.Uuid != "83679162-1378-4288-a2d4-70e13ec132aa" {
|
||||
t.Errorf("incorrect uuid: %s", md.Uuid)
|
||||
if md.UUID != "83679162-1378-4288-a2d4-70e13ec132aa" {
|
||||
t.Errorf("incorrect uuid: %s", md.UUID)
|
||||
}
|
||||
|
||||
if md.AvailabilityZone != "nova" {
|
||||
@ -108,4 +108,8 @@ func TestParseMetadata(t *testing.T) {
|
||||
if md.Devices[0].Type != "disk" {
|
||||
t.Errorf("incorrect device type: %s", md.Devices[0].Type)
|
||||
}
|
||||
|
||||
if md.Devices[0].Serial != "6df1888b-f373-41cf-b960-3786e60a28ef" {
|
||||
t.Errorf("incorrect device serial: %s", md.Devices[0].Serial)
|
||||
}
|
||||
}
|
||||
|
210
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
210
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
@ -17,12 +17,15 @@ limitations under the License.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@ -48,20 +51,27 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// ProviderName is the name of the openstack provider
|
||||
ProviderName = "openstack"
|
||||
AvailabilityZone = "availability_zone"
|
||||
availabilityZone = "availability_zone"
|
||||
defaultTimeOut = 60 * time.Second
|
||||
)
|
||||
|
||||
// ErrNotFound is used to inform that the object is missing
|
||||
var ErrNotFound = errors.New("failed to find object")
|
||||
|
||||
// ErrMultipleResults is used when we unexpectedly get back multiple results
|
||||
var ErrMultipleResults = errors.New("multiple results where only one expected")
|
||||
|
||||
// ErrNoAddressFound is used when we cannot find an ip address for the host
|
||||
var ErrNoAddressFound = errors.New("no address found for host")
|
||||
|
||||
// encoding.TextUnmarshaler interface for time.Duration
|
||||
// MyDuration is the encoding.TextUnmarshaler interface for time.Duration
|
||||
type MyDuration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
// UnmarshalText is used to convert from text to Duration
|
||||
func (d *MyDuration) UnmarshalText(text []byte) error {
|
||||
res, err := time.ParseDuration(string(text))
|
||||
if err != nil {
|
||||
@ -71,6 +81,7 @@ func (d *MyDuration) UnmarshalText(text []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBalancer is used for creating and maintaining load balancers
|
||||
type LoadBalancer struct {
|
||||
network *gophercloud.ServiceClient
|
||||
compute *gophercloud.ServiceClient
|
||||
@ -78,11 +89,12 @@ type LoadBalancer struct {
|
||||
opts LoadBalancerOpts
|
||||
}
|
||||
|
||||
// LoadBalancerOpts have the options to talk to Neutron LBaaSV2 or Octavia
|
||||
type LoadBalancerOpts struct {
|
||||
LBVersion string `gcfg:"lb-version"` // overrides autodetection. Only support v2.
|
||||
UseOctavia bool `gcfg:"use-octavia"` // uses Octavia V2 service catalog endpoint
|
||||
SubnetId string `gcfg:"subnet-id"` // overrides autodetection.
|
||||
FloatingNetworkId string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip.
|
||||
SubnetID string `gcfg:"subnet-id"` // overrides autodetection.
|
||||
FloatingNetworkID string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip.
|
||||
LBMethod string `gcfg:"lb-method"` // default to ROUND_ROBIN.
|
||||
LBProvider string `gcfg:"lb-provider"`
|
||||
CreateMonitor bool `gcfg:"create-monitor"`
|
||||
@ -93,16 +105,19 @@ type LoadBalancerOpts struct {
|
||||
NodeSecurityGroupIDs []string // Do not specify, get it automatically when enable manage-security-groups. TODO(FengyunPan): move it into cache
|
||||
}
|
||||
|
||||
// BlockStorageOpts is used to talk to Cinder service
|
||||
type BlockStorageOpts struct {
|
||||
BSVersion string `gcfg:"bs-version"` // overrides autodetection. v1 or v2. Defaults to auto
|
||||
TrustDevicePath bool `gcfg:"trust-device-path"` // See Issue #33128
|
||||
IgnoreVolumeAZ bool `gcfg:"ignore-volume-az"`
|
||||
}
|
||||
|
||||
// RouterOpts is used for Neutron routes
|
||||
type RouterOpts struct {
|
||||
RouterId string `gcfg:"router-id"` // required
|
||||
RouterID string `gcfg:"router-id"` // required
|
||||
}
|
||||
|
||||
// MetadataOpts is used for configuring how to talk to metadata service or config drive
|
||||
type MetadataOpts struct {
|
||||
SearchOrder string `gcfg:"search-order"`
|
||||
RequestTimeout MyDuration `gcfg:"request-timeout"`
|
||||
@ -120,16 +135,17 @@ type OpenStack struct {
|
||||
localInstanceID string
|
||||
}
|
||||
|
||||
// Config is used to read and store information from the cloud configuration file
|
||||
type Config struct {
|
||||
Global struct {
|
||||
AuthUrl string `gcfg:"auth-url"`
|
||||
AuthURL string `gcfg:"auth-url"`
|
||||
Username string
|
||||
UserId string `gcfg:"user-id"`
|
||||
UserID string `gcfg:"user-id"`
|
||||
Password string
|
||||
TenantId string `gcfg:"tenant-id"`
|
||||
TenantID string `gcfg:"tenant-id"`
|
||||
TenantName string `gcfg:"tenant-name"`
|
||||
TrustId string `gcfg:"trust-id"`
|
||||
DomainId string `gcfg:"domain-id"`
|
||||
TrustID string `gcfg:"trust-id"`
|
||||
DomainID string `gcfg:"domain-id"`
|
||||
DomainName string `gcfg:"domain-name"`
|
||||
Region string
|
||||
CAFile string `gcfg:"ca-file"`
|
||||
@ -141,7 +157,7 @@ type Config struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterMetrics()
|
||||
registerMetrics()
|
||||
|
||||
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := readConfig(config)
|
||||
@ -154,13 +170,13 @@ func init() {
|
||||
|
||||
func (cfg Config) toAuthOptions() gophercloud.AuthOptions {
|
||||
return gophercloud.AuthOptions{
|
||||
IdentityEndpoint: cfg.Global.AuthUrl,
|
||||
IdentityEndpoint: cfg.Global.AuthURL,
|
||||
Username: cfg.Global.Username,
|
||||
UserID: cfg.Global.UserId,
|
||||
UserID: cfg.Global.UserID,
|
||||
Password: cfg.Global.Password,
|
||||
TenantID: cfg.Global.TenantId,
|
||||
TenantID: cfg.Global.TenantID,
|
||||
TenantName: cfg.Global.TenantName,
|
||||
DomainID: cfg.Global.DomainId,
|
||||
DomainID: cfg.Global.DomainID,
|
||||
DomainName: cfg.Global.DomainName,
|
||||
|
||||
// Persistent service, so we need to be able to renew tokens.
|
||||
@ -170,22 +186,64 @@ func (cfg Config) toAuthOptions() gophercloud.AuthOptions {
|
||||
|
||||
func (cfg Config) toAuth3Options() tokens3.AuthOptions {
|
||||
return tokens3.AuthOptions{
|
||||
IdentityEndpoint: cfg.Global.AuthUrl,
|
||||
IdentityEndpoint: cfg.Global.AuthURL,
|
||||
Username: cfg.Global.Username,
|
||||
UserID: cfg.Global.UserId,
|
||||
UserID: cfg.Global.UserID,
|
||||
Password: cfg.Global.Password,
|
||||
DomainID: cfg.Global.DomainId,
|
||||
DomainID: cfg.Global.DomainID,
|
||||
DomainName: cfg.Global.DomainName,
|
||||
AllowReauth: true,
|
||||
}
|
||||
}
|
||||
|
||||
// configFromEnv allows setting up credentials etc using the
|
||||
// standard OS_* OpenStack client environment variables.
|
||||
func configFromEnv() (cfg Config, ok bool) {
|
||||
cfg.Global.AuthURL = os.Getenv("OS_AUTH_URL")
|
||||
cfg.Global.Username = os.Getenv("OS_USERNAME")
|
||||
cfg.Global.Password = os.Getenv("OS_PASSWORD")
|
||||
cfg.Global.Region = os.Getenv("OS_REGION_NAME")
|
||||
cfg.Global.UserID = os.Getenv("OS_USER_ID")
|
||||
cfg.Global.TrustID = os.Getenv("OS_TRUST_ID")
|
||||
|
||||
cfg.Global.TenantID = os.Getenv("OS_TENANT_ID")
|
||||
if cfg.Global.TenantID == "" {
|
||||
cfg.Global.TenantID = os.Getenv("OS_PROJECT_ID")
|
||||
}
|
||||
cfg.Global.TenantName = os.Getenv("OS_TENANT_NAME")
|
||||
if cfg.Global.TenantName == "" {
|
||||
cfg.Global.TenantName = os.Getenv("OS_PROJECT_NAME")
|
||||
}
|
||||
|
||||
cfg.Global.DomainID = os.Getenv("OS_DOMAIN_ID")
|
||||
if cfg.Global.DomainID == "" {
|
||||
cfg.Global.DomainID = os.Getenv("OS_USER_DOMAIN_ID")
|
||||
}
|
||||
cfg.Global.DomainName = os.Getenv("OS_DOMAIN_NAME")
|
||||
if cfg.Global.DomainName == "" {
|
||||
cfg.Global.DomainName = os.Getenv("OS_USER_DOMAIN_NAME")
|
||||
}
|
||||
|
||||
ok = cfg.Global.AuthURL != "" &&
|
||||
cfg.Global.Username != "" &&
|
||||
cfg.Global.Password != "" &&
|
||||
(cfg.Global.TenantID != "" || cfg.Global.TenantName != "" ||
|
||||
cfg.Global.DomainID != "" || cfg.Global.DomainName != "" ||
|
||||
cfg.Global.Region != "" || cfg.Global.UserID != "" ||
|
||||
cfg.Global.TrustID != "")
|
||||
|
||||
cfg.Metadata.SearchOrder = fmt.Sprintf("%s,%s", configDriveID, metadataID)
|
||||
cfg.BlockStorage.BSVersion = "auto"
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func readConfig(config io.Reader) (Config, error) {
|
||||
if config == nil {
|
||||
return Config{}, fmt.Errorf("no OpenStack cloud provider config file given")
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
cfg, _ := configFromEnv()
|
||||
|
||||
// Set default values for config params
|
||||
cfg.BlockStorage.BSVersion = "auto"
|
||||
@ -197,13 +255,13 @@ func readConfig(config io.Reader) (Config, error) {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// Tiny helper for conditional unwind logic
|
||||
type Caller bool
|
||||
// caller is a tiny helper for conditional unwind logic
|
||||
type caller bool
|
||||
|
||||
func NewCaller() Caller { return Caller(true) }
|
||||
func (c *Caller) Disarm() { *c = false }
|
||||
func newCaller() caller { return caller(true) }
|
||||
func (c *caller) disarm() { *c = false }
|
||||
|
||||
func (c *Caller) Call(f func()) {
|
||||
func (c *caller) call(f func()) {
|
||||
if *c {
|
||||
f()
|
||||
}
|
||||
@ -228,7 +286,7 @@ func readInstanceID(searchOrder string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return md.Uuid, nil
|
||||
return md.UUID, nil
|
||||
}
|
||||
|
||||
// check opts for OpenStack
|
||||
@ -249,16 +307,11 @@ func checkOpenStackOpts(openstackOpts *OpenStack) error {
|
||||
return fmt.Errorf("monitor-max-retries not set in cloud provider config")
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkMetadataSearchOrder(openstackOpts.metadataOpts.SearchOrder); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return checkMetadataSearchOrder(openstackOpts.metadataOpts.SearchOrder)
|
||||
}
|
||||
|
||||
func newOpenStack(cfg Config) (*OpenStack, error) {
|
||||
provider, err := openstack.NewClient(cfg.Global.AuthUrl)
|
||||
provider, err := openstack.NewClient(cfg.Global.AuthURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -272,10 +325,10 @@ func newOpenStack(cfg Config) (*OpenStack, error) {
|
||||
provider.HTTPClient.Transport = netutil.SetOldTransportDefaults(&http.Transport{TLSClientConfig: config})
|
||||
|
||||
}
|
||||
if cfg.Global.TrustId != "" {
|
||||
if cfg.Global.TrustID != "" {
|
||||
opts := cfg.toAuth3Options()
|
||||
authOptsExt := trusts.AuthOptsExt{
|
||||
TrustID: cfg.Global.TrustId,
|
||||
TrustID: cfg.Global.TrustID,
|
||||
AuthOptionsBuilder: &opts,
|
||||
}
|
||||
err = openstack.AuthenticateV3(provider, authOptsExt, gophercloud.EndpointOpts{})
|
||||
@ -319,6 +372,22 @@ func mapNodeNameToServerName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// GetNodeNameByID maps instanceid to types.NodeName
|
||||
func (os *OpenStack) GetNodeNameByID(instanceID string) (types.NodeName, error) {
|
||||
client, err := os.NewComputeV2()
|
||||
var nodeName types.NodeName
|
||||
if err != nil {
|
||||
return nodeName, err
|
||||
}
|
||||
|
||||
server, err := servers.Get(client, instanceID).Extract()
|
||||
if err != nil {
|
||||
return nodeName, err
|
||||
}
|
||||
nodeName = mapServerToNodeName(server)
|
||||
return nodeName, nil
|
||||
}
|
||||
|
||||
// mapServerToNodeName maps an OpenStack Server to a k8s NodeName
|
||||
func mapServerToNodeName(server *servers.Server) types.NodeName {
|
||||
// Node names are always lowercase, and (at least)
|
||||
@ -346,11 +415,14 @@ func foreachServer(client *gophercloud.ServiceClient, opts servers.ListOptsBuild
|
||||
return err
|
||||
}
|
||||
|
||||
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) {
|
||||
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName, showOnlyActive bool) (*servers.Server, error) {
|
||||
opts := servers.ListOpts{
|
||||
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
|
||||
Status: "ACTIVE",
|
||||
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
|
||||
}
|
||||
if showOnlyActive {
|
||||
opts.Status = "ACTIVE"
|
||||
}
|
||||
|
||||
pager := servers.List(client, opts)
|
||||
|
||||
serverList := make([]servers.Server, 0, 1)
|
||||
@ -381,7 +453,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
|
||||
addrs := []v1.NodeAddress{}
|
||||
|
||||
type Address struct {
|
||||
IpType string `mapstructure:"OS-EXT-IPS:type"`
|
||||
IPType string `mapstructure:"OS-EXT-IPS:type"`
|
||||
Addr string
|
||||
}
|
||||
|
||||
@ -394,7 +466,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
|
||||
for network, addrList := range addresses {
|
||||
for _, props := range addrList {
|
||||
var addressType v1.NodeAddressType
|
||||
if props.IpType == "floating" || network == "public" {
|
||||
if props.IPType == "floating" || network == "public" {
|
||||
addressType = v1.NodeExternalIP
|
||||
} else {
|
||||
addressType = v1.NodeInternalIP
|
||||
@ -432,7 +504,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
|
||||
}
|
||||
|
||||
func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
srv, err := getServerByName(client, name)
|
||||
srv, err := getServerByName(client, name, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -440,7 +512,7 @@ func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName)
|
||||
return nodeAddresses(srv)
|
||||
}
|
||||
|
||||
func getAddressByName(client *gophercloud.ServiceClient, name types.NodeName) (string, error) {
|
||||
func getAddressByName(client *gophercloud.ServiceClient, name types.NodeName, needIPv6 bool) (string, error) {
|
||||
addrs, err := getAddressesByName(client, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -449,12 +521,20 @@ func getAddressByName(client *gophercloud.ServiceClient, name types.NodeName) (s
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if addr.Type == v1.NodeInternalIP {
|
||||
isIPv6 := net.ParseIP(addr.Address).To4() == nil
|
||||
if (addr.Type == v1.NodeInternalIP) && (isIPv6 == needIPv6) {
|
||||
return addr.Address, nil
|
||||
}
|
||||
}
|
||||
|
||||
return addrs[0].Address, nil
|
||||
for _, addr := range addrs {
|
||||
isIPv6 := net.ParseIP(addr.Address).To4() == nil
|
||||
if (addr.Type == v1.NodeExternalIP) && (isIPv6 == needIPv6) {
|
||||
return addr.Address, nil
|
||||
}
|
||||
}
|
||||
// It should never return an address from a different IP Address family than the one needed
|
||||
return "", ErrNoAddressFound
|
||||
}
|
||||
|
||||
// getAttachedInterfacesByID returns the node interfaces of the specified instance.
|
||||
@ -477,6 +557,7 @@ func getAttachedInterfacesByID(client *gophercloud.ServiceClient, serviceID stri
|
||||
return interfaces, nil
|
||||
}
|
||||
|
||||
// Clusters is a no-op
|
||||
func (os *OpenStack) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
@ -486,16 +567,12 @@ func (os *OpenStack) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (os *OpenStack) ScrubDNS(nameServers, searches []string) ([]string, []string) {
|
||||
return nameServers, searches
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (os *OpenStack) HasClusterID() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// LoadBalancer initializes a LbaasV2 object
|
||||
func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
glog.V(4).Info("openstack.LoadBalancer() called")
|
||||
|
||||
@ -532,12 +609,14 @@ func isNotFound(err error) bool {
|
||||
return ok && e.Actual == http.StatusNotFound
|
||||
}
|
||||
|
||||
// Zones indicates that we support zones
|
||||
func (os *OpenStack) Zones() (cloudprovider.Zones, bool) {
|
||||
glog.V(1).Info("Claiming to support Zones")
|
||||
return os, true
|
||||
}
|
||||
|
||||
func (os *OpenStack) GetZone() (cloudprovider.Zone, error) {
|
||||
// GetZone returns the current zone
|
||||
func (os *OpenStack) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
md, err := getMetadata(os.metadataOpts.SearchOrder)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
@ -554,7 +633,7 @@ func (os *OpenStack) GetZone() (cloudprovider.Zone, error) {
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (os *OpenStack) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
||||
func (os *OpenStack) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
instanceID, err := instanceIDFromProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
@ -571,7 +650,7 @@ func (os *OpenStack) GetZoneByProviderID(providerID string) (cloudprovider.Zone,
|
||||
}
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: srv.Metadata[AvailabilityZone],
|
||||
FailureDomain: srv.Metadata[availabilityZone],
|
||||
Region: os.region,
|
||||
}
|
||||
glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone)
|
||||
@ -581,13 +660,13 @@ func (os *OpenStack) GetZoneByProviderID(providerID string) (cloudprovider.Zone,
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (os *OpenStack) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
func (os *OpenStack) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
compute, err := os.NewComputeV2()
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
srv, err := getServerByName(compute, nodeName)
|
||||
srv, err := getServerByName(compute, nodeName, true)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return cloudprovider.Zone{}, cloudprovider.InstanceNotFound
|
||||
@ -596,13 +675,14 @@ func (os *OpenStack) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Z
|
||||
}
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: srv.Metadata[AvailabilityZone],
|
||||
FailureDomain: srv.Metadata[availabilityZone],
|
||||
Region: os.region,
|
||||
}
|
||||
glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone)
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// Routes initializes routes support
|
||||
func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
|
||||
glog.V(4).Info("openstack.Routes() called")
|
||||
|
||||
@ -618,7 +698,7 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
|
||||
}
|
||||
|
||||
if !netExts["extraroute"] {
|
||||
glog.V(3).Infof("Neutron extraroute extension not found, required for Routes support")
|
||||
glog.V(3).Info("Neutron extraroute extension not found, required for Routes support")
|
||||
return nil, false
|
||||
}
|
||||
|
||||
@ -651,21 +731,21 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Using Blockstorage API V1")
|
||||
glog.V(3).Info("Using Blockstorage API V1")
|
||||
return &VolumesV1{sClient, os.bsOpts}, nil
|
||||
case "v2":
|
||||
sClient, err := os.NewBlockStorageV2()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Using Blockstorage API V2")
|
||||
glog.V(3).Info("Using Blockstorage API V2")
|
||||
return &VolumesV2{sClient, os.bsOpts}, nil
|
||||
case "v3":
|
||||
sClient, err := os.NewBlockStorageV3()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Using Blockstorage API V3")
|
||||
glog.V(3).Info("Using Blockstorage API V3")
|
||||
return &VolumesV3{sClient, os.bsOpts}, nil
|
||||
case "auto":
|
||||
// Currently kubernetes support Cinder v1 / Cinder v2 / Cinder v3.
|
||||
@ -673,26 +753,26 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) {
|
||||
// If kubernetes can't initialize cinder v2 client, try to initialize cinder v1 client.
|
||||
// Return appropriate message when kubernetes can't initialize them.
|
||||
if sClient, err := os.NewBlockStorageV3(); err == nil {
|
||||
glog.V(3).Infof("Using Blockstorage API V3")
|
||||
glog.V(3).Info("Using Blockstorage API V3")
|
||||
return &VolumesV3{sClient, os.bsOpts}, nil
|
||||
}
|
||||
|
||||
if sClient, err := os.NewBlockStorageV2(); err == nil {
|
||||
glog.V(3).Infof("Using Blockstorage API V2")
|
||||
glog.V(3).Info("Using Blockstorage API V2")
|
||||
return &VolumesV2{sClient, os.bsOpts}, nil
|
||||
}
|
||||
|
||||
if sClient, err := os.NewBlockStorageV1(); err == nil {
|
||||
glog.V(3).Infof("Using Blockstorage API V1")
|
||||
glog.V(3).Info("Using Blockstorage API V1")
|
||||
return &VolumesV1{sClient, os.bsOpts}, nil
|
||||
}
|
||||
|
||||
err_txt := "BlockStorage API version autodetection failed. " +
|
||||
errTxt := "BlockStorage API version autodetection failed. " +
|
||||
"Please set it explicitly in cloud.conf in section [BlockStorage] with key `bs-version`"
|
||||
return nil, errors.New(err_txt)
|
||||
return nil, errors.New(errTxt)
|
||||
default:
|
||||
err_txt := fmt.Sprintf("Config error: unrecognised bs-version \"%v\"", os.bsOpts.BSVersion)
|
||||
return nil, errors.New(err_txt)
|
||||
errTxt := fmt.Sprintf("Config error: unrecognised bs-version \"%v\"", os.bsOpts.BSVersion)
|
||||
return nil, errors.New(errTxt)
|
||||
}
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_client.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_client.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
)
|
||||
|
||||
// NewNetworkV2 creates a ServiceClient that may be used with the neutron v2 API
|
||||
func (os *OpenStack) NewNetworkV2() (*gophercloud.ServiceClient, error) {
|
||||
network, err := openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
@ -33,6 +34,7 @@ func (os *OpenStack) NewNetworkV2() (*gophercloud.ServiceClient, error) {
|
||||
return network, nil
|
||||
}
|
||||
|
||||
// NewComputeV2 creates a ServiceClient that may be used with the nova v2 API
|
||||
func (os *OpenStack) NewComputeV2() (*gophercloud.ServiceClient, error) {
|
||||
compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
@ -43,6 +45,7 @@ func (os *OpenStack) NewComputeV2() (*gophercloud.ServiceClient, error) {
|
||||
return compute, nil
|
||||
}
|
||||
|
||||
// NewBlockStorageV1 creates a ServiceClient that may be used with the Cinder v1 API
|
||||
func (os *OpenStack) NewBlockStorageV1() (*gophercloud.ServiceClient, error) {
|
||||
storage, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
@ -53,6 +56,7 @@ func (os *OpenStack) NewBlockStorageV1() (*gophercloud.ServiceClient, error) {
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// NewBlockStorageV2 creates a ServiceClient that may be used with the Cinder v2 API
|
||||
func (os *OpenStack) NewBlockStorageV2() (*gophercloud.ServiceClient, error) {
|
||||
storage, err := openstack.NewBlockStorageV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
@ -63,6 +67,7 @@ func (os *OpenStack) NewBlockStorageV2() (*gophercloud.ServiceClient, error) {
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// NewBlockStorageV3 creates a ServiceClient that may be used with the Cinder v3 API
|
||||
func (os *OpenStack) NewBlockStorageV3() (*gophercloud.ServiceClient, error) {
|
||||
storage, err := openstack.NewBlockStorageV3(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
@ -73,6 +78,7 @@ func (os *OpenStack) NewBlockStorageV3() (*gophercloud.ServiceClient, error) {
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// NewLoadBalancerV2 creates a ServiceClient that may be used with the Neutron LBaaS v2 API
|
||||
func (os *OpenStack) NewLoadBalancerV2() (*gophercloud.ServiceClient, error) {
|
||||
var lb *gophercloud.ServiceClient
|
||||
var err error
|
||||
|
38
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
@ -29,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// Instances encapsulates an implementation of Instances for OpenStack.
|
||||
type Instances struct {
|
||||
compute *gophercloud.ServiceClient
|
||||
opts MetadataOpts
|
||||
@ -43,7 +45,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
glog.V(1).Info("Claiming to support Instances")
|
||||
glog.V(4).Info("Claiming to support Instances")
|
||||
|
||||
return &Instances{
|
||||
compute: compute,
|
||||
@ -51,21 +53,23 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
|
||||
}, true
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
// CurrentNodeName implements Instances.CurrentNodeName
|
||||
// Note this is *not* necessarily the same as hostname.
|
||||
func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
func (i *Instances) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
md, err := getMetadata(i.opts.SearchOrder)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return types.NodeName(md.Name), nil
|
||||
return types.NodeName(md.Hostname), nil
|
||||
}
|
||||
|
||||
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
// AddSSHKeyToAllInstances is not implemented for OpenStack
|
||||
func (i *Instances) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
// NodeAddresses implements Instances.NodeAddresses
|
||||
func (i *Instances) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
glog.V(4).Infof("NodeAddresses(%v) called", name)
|
||||
|
||||
addrs, err := getAddressesByName(i.compute, name)
|
||||
@ -80,7 +84,7 @@ func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
func (i *Instances) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
instanceID, err := instanceIDFromProviderID(providerID)
|
||||
|
||||
if err != nil {
|
||||
@ -102,8 +106,8 @@ func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddre
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (i *Instances) ExternalID(name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
func (i *Instances) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name, true)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
@ -115,7 +119,7 @@ func (i *Instances) ExternalID(name types.NodeName) (string, error) {
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (i *Instances) InstanceExistsByProviderID(providerID string) (bool, error) {
|
||||
func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
instanceID, err := instanceIDFromProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -150,8 +154,8 @@ func (os *OpenStack) InstanceID() (string, error) {
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (i *Instances) InstanceID(name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
func (i *Instances) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name, true)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
@ -166,7 +170,7 @@ func (i *Instances) InstanceID(name types.NodeName) (string, error) {
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
func (i *Instances) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
instanceID, err := instanceIDFromProviderID(providerID)
|
||||
|
||||
if err != nil {
|
||||
@ -183,8 +187,8 @@ func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error)
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (i *Instances) InstanceType(name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
func (i *Instances) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name, true)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -212,9 +216,9 @@ func srvInstanceType(srv *servers.Server) (string, error) {
|
||||
// See cloudprovider.GetInstanceProviderID and Instances.InstanceID.
|
||||
func instanceIDFromProviderID(providerID string) (instanceID string, err error) {
|
||||
// If Instances.InstanceID or cloudprovider.GetInstanceProviderID is changed, the regexp should be changed too.
|
||||
var providerIdRegexp = regexp.MustCompile(`^` + ProviderName + `:///([^/]+)$`)
|
||||
var providerIDRegexp = regexp.MustCompile(`^` + ProviderName + `:///([^/]+)$`)
|
||||
|
||||
matches := providerIdRegexp.FindStringSubmatch(providerID)
|
||||
matches := providerIDRegexp.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("ProviderID \"%s\" didn't match expected format \"openstack:///InstanceID\"", providerID)
|
||||
}
|
||||
|
283
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
283
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
@ -17,8 +17,10 @@ limitations under the License.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -52,7 +54,7 @@ const (
|
||||
// going into ACTIVE loadbalancer provisioning status. Starting with 1
|
||||
// seconds, multiplying by 1.2 with each step and taking 19 steps at maximum
|
||||
// it will time out after 128s, which roughly corresponds to 120s
|
||||
loadbalancerActiveInitDealy = 1 * time.Second
|
||||
loadbalancerActiveInitDelay = 1 * time.Second
|
||||
loadbalancerActiveFactor = 1.2
|
||||
loadbalancerActiveSteps = 19
|
||||
|
||||
@ -60,14 +62,14 @@ const (
|
||||
// waiting for delete operation to complete. Starting with 1
|
||||
// seconds, multiplying by 1.2 with each step and taking 13 steps at maximum
|
||||
// it will time out after 32s, which roughly corresponds to 30s
|
||||
loadbalancerDeleteInitDealy = 1 * time.Second
|
||||
loadbalancerDeleteInitDelay = 1 * time.Second
|
||||
loadbalancerDeleteFactor = 1.2
|
||||
loadbalancerDeleteSteps = 13
|
||||
|
||||
activeStatus = "ACTIVE"
|
||||
errorStatus = "ERROR"
|
||||
|
||||
ServiceAnnotationLoadBalancerFloatingNetworkId = "loadbalancer.openstack.org/floating-network-id"
|
||||
ServiceAnnotationLoadBalancerFloatingNetworkID = "loadbalancer.openstack.org/floating-network-id"
|
||||
|
||||
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
|
||||
// to indicate that we want an internal loadbalancer service.
|
||||
@ -75,7 +77,7 @@ const (
|
||||
ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/openstack-internal-load-balancer"
|
||||
)
|
||||
|
||||
// LoadBalancer implementation for LBaaS v2
|
||||
// LbaasV2 is a LoadBalancer implementation for Neutron LBaaS v2 API
|
||||
type LbaasV2 struct {
|
||||
LoadBalancer
|
||||
}
|
||||
@ -328,7 +330,7 @@ func getSecurityGroupRules(client *gophercloud.ServiceClient, opts rules.ListOpt
|
||||
|
||||
func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, loadbalancerID string) (string, error) {
|
||||
backoff := wait.Backoff{
|
||||
Duration: loadbalancerActiveInitDealy,
|
||||
Duration: loadbalancerActiveInitDelay,
|
||||
Factor: loadbalancerActiveFactor,
|
||||
Steps: loadbalancerActiveSteps,
|
||||
}
|
||||
@ -358,7 +360,7 @@ func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient,
|
||||
|
||||
func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: loadbalancerDeleteInitDealy,
|
||||
Duration: loadbalancerDeleteInitDelay,
|
||||
Factor: loadbalancerDeleteFactor,
|
||||
Steps: loadbalancerDeleteSteps,
|
||||
}
|
||||
@ -367,12 +369,10 @@ func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID s
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
return false, nil
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
@ -441,7 +441,7 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string, inter
|
||||
createOpts := loadbalancers.CreateOpts{
|
||||
Name: name,
|
||||
Description: fmt.Sprintf("Kubernetes external service %s", name),
|
||||
VipSubnetID: lbaas.opts.SubnetId,
|
||||
VipSubnetID: lbaas.opts.SubnetID,
|
||||
Provider: lbaas.opts.LBProvider,
|
||||
}
|
||||
|
||||
@ -457,7 +457,8 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string, inter
|
||||
return loadbalancer, nil
|
||||
}
|
||||
|
||||
func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
// GetLoadBalancer returns whether the specified load balancer exists and its status
|
||||
func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName)
|
||||
if err == ErrNotFound {
|
||||
@ -484,7 +485,7 @@ func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *v1.Service) (
|
||||
}
|
||||
|
||||
// The LB needs to be configured with instance addresses on the same
|
||||
// subnet as the LB (aka opts.SubnetId). Currently we're just
|
||||
// subnet as the LB (aka opts.SubnetID). Currently we're just
|
||||
// guessing that the node's InternalIP is the right address - and that
|
||||
// should be sufficient for all "normal" cases.
|
||||
func nodeAddressForLB(node *v1.Node) (string, error) {
|
||||
@ -537,7 +538,7 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string,
|
||||
for _, intf := range interfaces {
|
||||
for _, fixedIP := range intf.FixedIPs {
|
||||
if fixedIP.IPAddress == ipAddress {
|
||||
return intf.NetID, nil
|
||||
return fixedIP.SubnetID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -551,7 +552,7 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeName := types.NodeName(node.Name)
|
||||
srv, err := getServerByName(compute, nodeName)
|
||||
srv, err := getServerByName(compute, nodeName, true)
|
||||
if err != nil {
|
||||
return nodeSecurityGroupIDs.List(), err
|
||||
}
|
||||
@ -568,8 +569,23 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1
|
||||
return nodeSecurityGroupIDs.List(), nil
|
||||
}
|
||||
|
||||
// getFloatingNetworkIdForLB returns a floating-network-id for cluster.
|
||||
func getFloatingNetworkIdForLB(client *gophercloud.ServiceClient) (string, error) {
|
||||
// isSecurityGroupNotFound return true while 'err' is object of gophercloud.ErrResourceNotFound
|
||||
func isSecurityGroupNotFound(err error) bool {
|
||||
errType := reflect.TypeOf(err).String()
|
||||
errTypeSlice := strings.Split(errType, ".")
|
||||
errTypeValue := ""
|
||||
if len(errTypeSlice) != 0 {
|
||||
errTypeValue = errTypeSlice[len(errTypeSlice)-1]
|
||||
}
|
||||
if errTypeValue == "ErrResourceNotFound" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getFloatingNetworkIDForLB returns a floating-network-id for cluster.
|
||||
func getFloatingNetworkIDForLB(client *gophercloud.ServiceClient) (string, error) {
|
||||
var floatingNetworkIds []string
|
||||
|
||||
type NetworkWithExternalExt struct {
|
||||
@ -619,23 +635,24 @@ func getFloatingNetworkIdForLB(client *gophercloud.ServiceClient) (string, error
|
||||
// a list of regions (from config) and query/create loadbalancers in
|
||||
// each region.
|
||||
|
||||
func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one.
|
||||
func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name)
|
||||
}
|
||||
|
||||
if len(lbaas.opts.SubnetId) == 0 {
|
||||
// Get SubnetId automatically.
|
||||
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node.
|
||||
if len(lbaas.opts.SubnetID) == 0 {
|
||||
// Get SubnetID automatically.
|
||||
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node.
|
||||
subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0])
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
return nil, fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+
|
||||
"and failed to find subnet-id from OpenStack: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
lbaas.opts.SubnetId = subnetID
|
||||
lbaas.opts.SubnetID = subnetID
|
||||
}
|
||||
|
||||
ports := apiService.Spec.Ports
|
||||
@ -643,10 +660,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
return nil, fmt.Errorf("no ports provided to openstack load balancer")
|
||||
}
|
||||
|
||||
floatingPool := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerFloatingNetworkId, lbaas.opts.FloatingNetworkId)
|
||||
floatingPool := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerFloatingNetworkID, lbaas.opts.FloatingNetworkID)
|
||||
if len(floatingPool) == 0 {
|
||||
var err error
|
||||
floatingPool, err = getFloatingNetworkIdForLB(lbaas.network)
|
||||
floatingPool, err = getFloatingNetworkIDForLB(lbaas.network)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find floating-network-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
@ -714,7 +731,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
glog.V(2).Infof("LoadBalancer %s already exists", name)
|
||||
}
|
||||
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
|
||||
lbmethod := v2pools.LBMethod(lbaas.opts.LBMethod)
|
||||
if lbmethod == "" {
|
||||
@ -739,7 +759,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
// Unknown error, retry later
|
||||
return nil, fmt.Errorf("error creating LB listener: %v", err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID)
|
||||
@ -765,7 +788,11 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
// Unknown error, retry later
|
||||
return nil, fmt.Errorf("error creating pool for listener %s: %v", listener.ID, err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID)
|
||||
@ -790,13 +817,16 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
||||
ProtocolPort: int(port.NodePort),
|
||||
Address: addr,
|
||||
SubnetID: lbaas.opts.SubnetId,
|
||||
SubnetID: lbaas.opts.SubnetID,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err)
|
||||
}
|
||||
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
} else {
|
||||
// After all members have been processed, remaining members are deleted as obsolete.
|
||||
members = popMember(members, addr, int(port.NodePort))
|
||||
@ -812,7 +842,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
|
||||
monitorID := pool.MonitorID
|
||||
@ -828,7 +861,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating LB pool healthmonitor: %v", err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
monitorID = monitor.ID
|
||||
} else if lbaas.opts.CreateMonitor == false {
|
||||
glog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID)
|
||||
@ -856,7 +892,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
// get and delete pool members
|
||||
members, err := getMembersByPoolID(lbaas.lb, pool.ID)
|
||||
@ -870,7 +909,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID)
|
||||
@ -879,19 +921,25 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
// delete listener
|
||||
err = listeners.Delete(lbaas.lb, listener.ID).ExtractErr()
|
||||
if err != nil && !isNotFound(err) {
|
||||
return nil, fmt.Errorf("error deleteting obsolete listener: %v", err)
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
glog.V(2).Infof("Deleted obsolete listener: %s", listener.ID)
|
||||
}
|
||||
|
||||
portID := loadbalancer.VipPortID
|
||||
floatIP, err := getFloatingIPByPortID(lbaas.lb, portID)
|
||||
floatIP, err := getFloatingIPByPortID(lbaas.network, portID)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return nil, fmt.Errorf("error getting floating ip for port %s: %v", portID, err)
|
||||
}
|
||||
@ -925,17 +973,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
|
||||
err := lbaas.ensureSecurityGroup(clusterName, apiService, nodes, loadbalancer)
|
||||
if err != nil {
|
||||
// cleanup what was created so far
|
||||
_ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService)
|
||||
_ = lbaas.EnsureLoadBalancerDeleted(ctx, clusterName, apiService)
|
||||
return status, err
|
||||
}
|
||||
|
||||
// delete the old Security Group for the service
|
||||
// Related to #53764
|
||||
// TODO(FengyunPan): Remove it at V1.10
|
||||
err = lbaas.EnsureOldSecurityGroupDeleted(clusterName, apiService)
|
||||
if err != nil {
|
||||
return status, fmt.Errorf("Failed to delete the Security Group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return status, nil
|
||||
@ -970,10 +1010,8 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
lbSecGroupName := getSecurityGroupName(apiService)
|
||||
lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName)
|
||||
if err != nil {
|
||||
// check whether security group does not exist
|
||||
_, ok := err.(*gophercloud.ErrResourceNotFound)
|
||||
if ok {
|
||||
// create it later
|
||||
// If the security group of LB not exist, create it later
|
||||
if isSecurityGroupNotFound(err) {
|
||||
lbSecGroupID = ""
|
||||
} else {
|
||||
return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err)
|
||||
@ -1019,7 +1057,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
_, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error occured creating rule for SecGroup %s: %v", lbSecGroup.ID, err)
|
||||
return fmt.Errorf("error occurred creating rule for SecGroup %s: %v", lbSecGroup.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1037,7 +1075,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
_, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error occured creating rule for SecGroup %s: %v", lbSecGroup.ID, err)
|
||||
return fmt.Errorf("error occurred creating rule for SecGroup %s: %v", lbSecGroup.ID, err)
|
||||
}
|
||||
|
||||
lbSecGroupRuleCreateOpts = rules.CreateOpts{
|
||||
@ -1052,7 +1090,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
|
||||
_, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error occured creating rule for SecGroup %s: %v", lbSecGroup.ID, err)
|
||||
return fmt.Errorf("error occurred creating rule for SecGroup %s: %v", lbSecGroup.ID, err)
|
||||
}
|
||||
|
||||
// get security groups of port
|
||||
@ -1074,10 +1112,10 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
// update loadbalancer vip port
|
||||
if !found {
|
||||
port.SecurityGroups = append(port.SecurityGroups, lbSecGroup.ID)
|
||||
update_opts := neutronports.UpdateOpts{SecurityGroups: &port.SecurityGroups}
|
||||
res := neutronports.Update(lbaas.network, portID, update_opts)
|
||||
updateOpts := neutronports.UpdateOpts{SecurityGroups: &port.SecurityGroups}
|
||||
res := neutronports.Update(lbaas.network, portID, updateOpts)
|
||||
if res.Err != nil {
|
||||
msg := fmt.Sprintf("Error occured updating port %s for loadbalancer service %s/%s: %v", portID, apiService.Namespace, apiService.Name, res.Err)
|
||||
msg := fmt.Sprintf("Error occurred updating port %s for loadbalancer service %s/%s: %v", portID, apiService.Namespace, apiService.Name, res.Err)
|
||||
return fmt.Errorf(msg)
|
||||
}
|
||||
}
|
||||
@ -1107,7 +1145,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
// Add the rules in the Node Security Group
|
||||
err = createNodeSecurityGroup(lbaas.network, nodeSecurityGroupID, int(port.NodePort), port.Protocol, lbSecGroupID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error occured creating security group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
return fmt.Errorf("error occurred creating security group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1115,20 +1153,21 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes)
|
||||
|
||||
if len(lbaas.opts.SubnetId) == 0 && len(nodes) > 0 {
|
||||
// Get SubnetId automatically.
|
||||
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node.
|
||||
if len(lbaas.opts.SubnetID) == 0 && len(nodes) > 0 {
|
||||
// Get SubnetID automatically.
|
||||
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node.
|
||||
subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0])
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
return fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+
|
||||
"and failed to find subnet-id from OpenStack: %v", service.Namespace, service.Name, err)
|
||||
}
|
||||
lbaas.opts.SubnetId = subnetID
|
||||
lbaas.opts.SubnetID = subnetID
|
||||
}
|
||||
|
||||
ports := service.Spec.Ports
|
||||
@ -1217,12 +1256,15 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
|
||||
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
|
||||
Address: addr,
|
||||
ProtocolPort: int(port.NodePort),
|
||||
SubnetID: lbaas.opts.SubnetId,
|
||||
SubnetID: lbaas.opts.SubnetID,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any old members for this port
|
||||
@ -1235,7 +1277,10 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1321,7 +1366,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
// Add the rules in the Node Security Group
|
||||
err = createNodeSecurityGroup(lbaas.network, nodeSecurityGroupID, int(port.NodePort), port.Protocol, lbSecGroupID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error occured creating security group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
return fmt.Errorf("error occurred creating security group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1329,7 +1374,8 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer
|
||||
func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName)
|
||||
|
||||
@ -1341,7 +1387,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
return nil
|
||||
}
|
||||
|
||||
if loadbalancer != nil && loadbalancer.VipPortID != "" {
|
||||
if loadbalancer.VipPortID != "" {
|
||||
portID := loadbalancer.VipPortID
|
||||
floatingIP, err := getFloatingIPByPortID(lbaas.network, portID)
|
||||
if err != nil && err != ErrNotFound {
|
||||
@ -1396,7 +1442,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete all members and pools
|
||||
@ -1407,7 +1456,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete pool
|
||||
@ -1415,7 +1467,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete all listeners
|
||||
@ -1424,7 +1479,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete loadbalancer
|
||||
@ -1432,7 +1490,10 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
if err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
waitLoadbalancerDeleted(lbaas.lb, loadbalancer.ID)
|
||||
err = waitLoadbalancerDeleted(lbaas.lb, loadbalancer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete loadbalancer: %v", err)
|
||||
}
|
||||
|
||||
// Delete the Security Group
|
||||
if lbaas.opts.ManageSecurityGroups {
|
||||
@ -1440,14 +1501,6 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
}
|
||||
|
||||
// delete the old Security Group for the service
|
||||
// Related to #53764
|
||||
// TODO(FengyunPan): Remove it at V1.10
|
||||
err = lbaas.EnsureOldSecurityGroupDeleted(clusterName, service)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to delete the Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1459,77 +1512,11 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1
|
||||
lbSecGroupName := getSecurityGroupName(service)
|
||||
lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName)
|
||||
if err != nil {
|
||||
// check whether security group does not exist
|
||||
_, ok := err.(*gophercloud.ErrResourceNotFound)
|
||||
if ok {
|
||||
if isSecurityGroupNotFound(err) {
|
||||
// It is OK when the security group has been deleted by others.
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID)
|
||||
if lbSecGroup.Err != nil && !isNotFound(lbSecGroup.Err) {
|
||||
return lbSecGroup.Err
|
||||
}
|
||||
|
||||
if len(lbaas.opts.NodeSecurityGroupIDs) == 0 {
|
||||
// Just happen when nodes have not Security Group, or should not happen
|
||||
// UpdateLoadBalancer and EnsureLoadBalancer can set lbaas.opts.NodeSecurityGroupIDs when it is empty
|
||||
// And service controller call UpdateLoadBalancer to set lbaas.opts.NodeSecurityGroupIDs when controller manager service is restarted.
|
||||
glog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s",
|
||||
service.Namespace, service.Name)
|
||||
} else {
|
||||
// Delete the rules in the Node Security Group
|
||||
for _, nodeSecurityGroupID := range lbaas.opts.NodeSecurityGroupIDs {
|
||||
opts := rules.ListOpts{
|
||||
SecGroupID: nodeSecurityGroupID,
|
||||
RemoteGroupID: lbSecGroupID,
|
||||
}
|
||||
secGroupRules, err := getSecurityGroupRules(lbaas.network, opts)
|
||||
|
||||
if err != nil && !isNotFound(err) {
|
||||
msg := fmt.Sprintf("Error finding rules for remote group id %s in security group id %s: %v", lbSecGroupID, nodeSecurityGroupID, err)
|
||||
return fmt.Errorf(msg)
|
||||
}
|
||||
|
||||
for _, rule := range secGroupRules {
|
||||
res := rules.Delete(lbaas.network, rule.ID)
|
||||
if res.Err != nil && !isNotFound(res.Err) {
|
||||
return fmt.Errorf("Error occurred deleting security group rule: %s: %v", rule.ID, res.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOldSecurityGroupName is used to get the old security group name
|
||||
// Related to #53764
|
||||
// TODO(FengyunPan): Remove it at V1.10
|
||||
func getOldSecurityGroupName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("lb-sg-%s-%v", clusterName, service.Name)
|
||||
}
|
||||
|
||||
// EnsureOldSecurityGroupDeleted deleting old security group for specific loadbalancer service.
|
||||
// Related to #53764
|
||||
// TODO(FengyunPan): Remove it at V1.10
|
||||
func (lbaas *LbaasV2) EnsureOldSecurityGroupDeleted(clusterName string, service *v1.Service) error {
|
||||
glog.V(4).Infof("EnsureOldSecurityGroupDeleted(%v, %v)", clusterName, service)
|
||||
// Generate Name
|
||||
lbSecGroupName := getOldSecurityGroupName(clusterName, service)
|
||||
lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName)
|
||||
if err != nil {
|
||||
// check whether security group does not exist
|
||||
_, ok := err.(*gophercloud.ErrResourceNotFound)
|
||||
if ok {
|
||||
// It is OK when the security group has been deleted by others.
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err)
|
||||
}
|
||||
return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err)
|
||||
}
|
||||
|
||||
lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID)
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_metrics.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_metrics.go
generated
vendored
@ -19,32 +19,32 @@ package openstack
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
const (
|
||||
OpenstackSubsystem = "openstack"
|
||||
OpenstackOperationKey = "cloudprovider_openstack_api_request_duration_seconds"
|
||||
OpenstackOperationErrorKey = "cloudprovider_openstack_api_request_errors"
|
||||
openstackSubsystem = "openstack"
|
||||
openstackOperationKey = "cloudprovider_openstack_api_request_duration_seconds"
|
||||
openstackOperationErrorKey = "cloudprovider_openstack_api_request_errors"
|
||||
)
|
||||
|
||||
var (
|
||||
OpenstackOperationsLatency = prometheus.NewHistogramVec(
|
||||
openstackOperationsLatency = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: OpenstackSubsystem,
|
||||
Name: OpenstackOperationKey,
|
||||
Subsystem: openstackSubsystem,
|
||||
Name: openstackOperationKey,
|
||||
Help: "Latency of openstack api call",
|
||||
},
|
||||
[]string{"request"},
|
||||
)
|
||||
|
||||
OpenstackApiRequestErrors = prometheus.NewCounterVec(
|
||||
openstackAPIRequestErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: OpenstackSubsystem,
|
||||
Name: OpenstackOperationErrorKey,
|
||||
Subsystem: openstackSubsystem,
|
||||
Name: openstackOperationErrorKey,
|
||||
Help: "Cumulative number of openstack Api call errors",
|
||||
},
|
||||
[]string{"request"},
|
||||
)
|
||||
)
|
||||
|
||||
func RegisterMetrics() {
|
||||
prometheus.MustRegister(OpenstackOperationsLatency)
|
||||
prometheus.MustRegister(OpenstackApiRequestErrors)
|
||||
func registerMetrics() {
|
||||
prometheus.MustRegister(openstackOperationsLatency)
|
||||
prometheus.MustRegister(openstackAPIRequestErrors)
|
||||
}
|
||||
|
75
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go
generated
vendored
75
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go
generated
vendored
@ -17,7 +17,9 @@ limitations under the License.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
@ -29,17 +31,19 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var ErrNoRouterId = errors.New("router-id not set in cloud provider config")
|
||||
var errNoRouterID = errors.New("router-id not set in cloud provider config")
|
||||
|
||||
// Routes implements the cloudprovider.Routes for OpenStack clouds
|
||||
type Routes struct {
|
||||
compute *gophercloud.ServiceClient
|
||||
network *gophercloud.ServiceClient
|
||||
opts RouterOpts
|
||||
}
|
||||
|
||||
// NewRoutes creates a new instance of Routes
|
||||
func NewRoutes(compute *gophercloud.ServiceClient, network *gophercloud.ServiceClient, opts RouterOpts) (cloudprovider.Routes, error) {
|
||||
if opts.RouterId == "" {
|
||||
return nil, ErrNoRouterId
|
||||
if opts.RouterID == "" {
|
||||
return nil, errNoRouterID
|
||||
}
|
||||
|
||||
return &Routes{
|
||||
@ -49,11 +53,12 @@ func NewRoutes(compute *gophercloud.ServiceClient, network *gophercloud.ServiceC
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (r *Routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(4).Infof("ListRoutes(%v)", clusterName)
|
||||
|
||||
nodeNamesByAddr := make(map[string]types.NodeName)
|
||||
err := foreachServer(r.compute, servers.ListOpts{Status: "ACTIVE"}, func(srv *servers.Server) (bool, error) {
|
||||
err := foreachServer(r.compute, servers.ListOpts{}, func(srv *servers.Server) (bool, error) {
|
||||
addrs, err := nodeAddresses(srv)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -70,22 +75,18 @@ func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
router, err := routers.Get(r.network, r.opts.RouterId).Extract()
|
||||
router, err := routers.Get(r.network, r.opts.RouterID).Extract()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var routes []*cloudprovider.Route
|
||||
for _, item := range router.Routes {
|
||||
nodeName, ok := nodeNamesByAddr[item.NextHop]
|
||||
if !ok {
|
||||
// Not one of our routes?
|
||||
glog.V(4).Infof("Skipping route with unknown nexthop %v", item.NextHop)
|
||||
continue
|
||||
}
|
||||
nodeName, foundNode := nodeNamesByAddr[item.NextHop]
|
||||
route := cloudprovider.Route{
|
||||
Name: item.DestinationCIDR,
|
||||
TargetNode: nodeName,
|
||||
TargetNode: nodeName, //empty if NextHop is unknown
|
||||
Blackhole: !foundNode,
|
||||
DestinationCIDR: item.DestinationCIDR,
|
||||
}
|
||||
routes = append(routes, &route)
|
||||
@ -140,19 +141,23 @@ func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutron
|
||||
return unwinder, nil
|
||||
}
|
||||
|
||||
func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
// CreateRoute creates the described managed route
|
||||
func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
glog.V(4).Infof("CreateRoute(%v, %v, %v)", clusterName, nameHint, route)
|
||||
|
||||
onFailure := NewCaller()
|
||||
onFailure := newCaller()
|
||||
|
||||
ip, _, _ := net.ParseCIDR(route.DestinationCIDR)
|
||||
isCIDRv6 := ip.To4() == nil
|
||||
addr, err := getAddressByName(r.compute, route.TargetNode, isCIDRv6)
|
||||
|
||||
addr, err := getAddressByName(r.compute, route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Using nexthop %v for node %v", addr, route.TargetNode)
|
||||
|
||||
router, err := routers.Get(r.network, r.opts.RouterId).Extract()
|
||||
router, err := routers.Get(r.network, r.opts.RouterID).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -175,7 +180,7 @@ func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudpr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
defer onFailure.call(unwind)
|
||||
|
||||
// get the port of addr on target node.
|
||||
portID, err := getPortIDByIP(r.compute, route.TargetNode, addr)
|
||||
@ -204,25 +209,29 @@ func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudpr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
defer onFailure.call(unwind)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Route created: %v", route)
|
||||
onFailure.Disarm()
|
||||
onFailure.disarm()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
// DeleteRoute deletes the specified managed route
|
||||
func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error {
|
||||
glog.V(4).Infof("DeleteRoute(%v, %v)", clusterName, route)
|
||||
|
||||
onFailure := NewCaller()
|
||||
onFailure := newCaller()
|
||||
|
||||
ip, _, _ := net.ParseCIDR(route.DestinationCIDR)
|
||||
isCIDRv6 := ip.To4() == nil
|
||||
addr, err := getAddressByName(r.compute, route.TargetNode, isCIDRv6)
|
||||
|
||||
addr, err := getAddressByName(r.compute, route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
router, err := routers.Get(r.network, r.opts.RouterId).Extract()
|
||||
router, err := routers.Get(r.network, r.opts.RouterID).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -249,7 +258,7 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
defer onFailure.call(unwind)
|
||||
|
||||
// get the port of addr on target node.
|
||||
portID, err := getPortIDByIP(r.compute, route.TargetNode, addr)
|
||||
@ -261,9 +270,9 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err
|
||||
return err
|
||||
}
|
||||
|
||||
addr_pairs := port.AllowedAddressPairs
|
||||
addrPairs := port.AllowedAddressPairs
|
||||
index = -1
|
||||
for i, item := range addr_pairs {
|
||||
for i, item := range addrPairs {
|
||||
if item.IPAddress == route.DestinationCIDR {
|
||||
index = i
|
||||
break
|
||||
@ -272,23 +281,23 @@ func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) err
|
||||
|
||||
if index != -1 {
|
||||
// Delete element `index`
|
||||
addr_pairs[index] = addr_pairs[len(addr_pairs)-1]
|
||||
addr_pairs = addr_pairs[:len(addr_pairs)-1]
|
||||
addrPairs[index] = addrPairs[len(addrPairs)-1]
|
||||
addrPairs = addrPairs[:len(addrPairs)-1]
|
||||
|
||||
unwind, err := updateAllowedAddressPairs(r.network, port, addr_pairs)
|
||||
unwind, err := updateAllowedAddressPairs(r.network, port, addrPairs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
defer onFailure.call(unwind)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Route deleted: %v", route)
|
||||
onFailure.Disarm()
|
||||
onFailure.disarm()
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPortIDByIP(compute *gophercloud.ServiceClient, targetNode types.NodeName, ipAddress string) (string, error) {
|
||||
srv, err := getServerByName(compute, targetNode)
|
||||
srv, err := getServerByName(compute, targetNode, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes_test.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
@ -39,9 +40,17 @@ func TestRoutes(t *testing.T) {
|
||||
t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
|
||||
}
|
||||
|
||||
vms := getServers(os)
|
||||
_, err = os.InstanceID()
|
||||
if err != nil || len(vms) == 0 {
|
||||
t.Skipf("Please run this test in an OpenStack vm or create at least one VM in OpenStack before you run this test.")
|
||||
}
|
||||
|
||||
// We know we have at least one vm.
|
||||
servername := vms[0].Name
|
||||
|
||||
// Pick the first router and server to try a test with
|
||||
os.routeOpts.RouterId = getRouters(os)[0].ID
|
||||
servername := getServers(os)[0].Name
|
||||
os.routeOpts.RouterID = getRouters(os)[0].ID
|
||||
|
||||
r, ok := os.Routes()
|
||||
if !ok {
|
||||
@ -52,12 +61,12 @@ func TestRoutes(t *testing.T) {
|
||||
DestinationCIDR: "10.164.2.0/24",
|
||||
TargetNode: types.NodeName(servername),
|
||||
}
|
||||
err = r.CreateRoute(clusterName, "myhint", &newroute)
|
||||
err = r.CreateRoute(context.TODO(), clusterName, "myhint", &newroute)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateRoute error: %v", err)
|
||||
}
|
||||
|
||||
routelist, err := r.ListRoutes(clusterName)
|
||||
routelist, err := r.ListRoutes(context.TODO(), clusterName)
|
||||
if err != nil {
|
||||
t.Fatalf("ListRoutes() error: %v", err)
|
||||
}
|
||||
@ -70,7 +79,7 @@ func TestRoutes(t *testing.T) {
|
||||
t.Logf("%s via %s", cidr, route.TargetNode)
|
||||
}
|
||||
|
||||
err = r.DeleteRoute(clusterName, &newroute)
|
||||
err = r.DeleteRoute(context.TODO(), clusterName, &newroute)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteRoute error: %v", err)
|
||||
}
|
||||
|
259
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
259
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
@ -30,29 +31,28 @@ import (
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
volumeAvailableStatus = "available"
|
||||
volumeInUseStatus = "in-use"
|
||||
testClusterName = "testCluster"
|
||||
testClusterName = "testCluster"
|
||||
|
||||
volumeStatusTimeoutSeconds = 30
|
||||
// volumeStatus* is configuration of exponential backoff for
|
||||
// waiting for specified volume status. Starting with 1
|
||||
// seconds, multiplying by 1.2 with each step and taking 13 steps at maximum
|
||||
// it will time out after 32s, which roughly corresponds to 30s
|
||||
volumeStatusInitDealy = 1 * time.Second
|
||||
volumeStatusInitDelay = 1 * time.Second
|
||||
volumeStatusFactor = 1.2
|
||||
volumeStatusSteps = 13
|
||||
)
|
||||
|
||||
func WaitForVolumeStatus(t *testing.T, os *OpenStack, volumeName string, status string) {
|
||||
backoff := wait.Backoff{
|
||||
Duration: volumeStatusInitDealy,
|
||||
Duration: volumeStatusInitDelay,
|
||||
Factor: volumeStatusFactor,
|
||||
Steps: volumeStatusSteps,
|
||||
}
|
||||
@ -67,9 +67,8 @@ func WaitForVolumeStatus(t *testing.T, os *OpenStack, volumeName string, status
|
||||
status,
|
||||
volumeStatusTimeoutSeconds)
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
t.Logf("Volume (%s) status did not change to %s after %v seconds\n",
|
||||
@ -89,10 +88,17 @@ func TestReadConfig(t *testing.T) {
|
||||
t.Errorf("Should fail when no config is provided: %s", err)
|
||||
}
|
||||
|
||||
os.Setenv("OS_PASSWORD", "mypass")
|
||||
defer os.Unsetenv("OS_PASSWORD")
|
||||
|
||||
os.Setenv("OS_TENANT_NAME", "admin")
|
||||
defer os.Unsetenv("OS_TENANT_NAME")
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
auth-url = http://auth.url
|
||||
username = user
|
||||
user-id = user
|
||||
tenant-name = demo
|
||||
[LoadBalancer]
|
||||
create-monitor = yes
|
||||
monitor-delay = 1m
|
||||
@ -108,8 +114,21 @@ func TestReadConfig(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
}
|
||||
if cfg.Global.AuthUrl != "http://auth.url" {
|
||||
t.Errorf("incorrect authurl: %s", cfg.Global.AuthUrl)
|
||||
if cfg.Global.AuthURL != "http://auth.url" {
|
||||
t.Errorf("incorrect authurl: %s", cfg.Global.AuthURL)
|
||||
}
|
||||
|
||||
if cfg.Global.UserID != "user" {
|
||||
t.Errorf("incorrect userid: %s", cfg.Global.UserID)
|
||||
}
|
||||
|
||||
if cfg.Global.Password != "mypass" {
|
||||
t.Errorf("incorrect password: %s", cfg.Global.Password)
|
||||
}
|
||||
|
||||
// config file wins over environment variable
|
||||
if cfg.Global.TenantName != "demo" {
|
||||
t.Errorf("incorrect tenant name: %s", cfg.Global.TenantName)
|
||||
}
|
||||
|
||||
if !cfg.LoadBalancer.CreateMonitor {
|
||||
@ -141,7 +160,11 @@ func TestReadConfig(t *testing.T) {
|
||||
func TestToAuthOptions(t *testing.T) {
|
||||
cfg := Config{}
|
||||
cfg.Global.Username = "user"
|
||||
// etc.
|
||||
cfg.Global.Password = "pass"
|
||||
cfg.Global.DomainID = "2a73b8f597c04551a0fdc8e95544be8a"
|
||||
cfg.Global.DomainName = "local"
|
||||
cfg.Global.AuthURL = "http://auth.url"
|
||||
cfg.Global.UserID = "user"
|
||||
|
||||
ao := cfg.toAuthOptions()
|
||||
|
||||
@ -151,6 +174,24 @@ func TestToAuthOptions(t *testing.T) {
|
||||
if ao.Username != cfg.Global.Username {
|
||||
t.Errorf("Username %s != %s", ao.Username, cfg.Global.Username)
|
||||
}
|
||||
if ao.Password != cfg.Global.Password {
|
||||
t.Errorf("Password %s != %s", ao.Password, cfg.Global.Password)
|
||||
}
|
||||
if ao.DomainID != cfg.Global.DomainID {
|
||||
t.Errorf("DomainID %s != %s", ao.DomainID, cfg.Global.DomainID)
|
||||
}
|
||||
if ao.IdentityEndpoint != cfg.Global.AuthURL {
|
||||
t.Errorf("IdentityEndpoint %s != %s", ao.IdentityEndpoint, cfg.Global.AuthURL)
|
||||
}
|
||||
if ao.UserID != cfg.Global.UserID {
|
||||
t.Errorf("UserID %s != %s", ao.UserID, cfg.Global.UserID)
|
||||
}
|
||||
if ao.DomainName != cfg.Global.DomainName {
|
||||
t.Errorf("DomainName %s != %s", ao.DomainName, cfg.Global.DomainName)
|
||||
}
|
||||
if ao.TenantID != cfg.Global.TenantID {
|
||||
t.Errorf("TenantID %s != %s", ao.TenantID, cfg.Global.TenantID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckOpenStackOpts(t *testing.T) {
|
||||
@ -167,8 +208,8 @@ func TestCheckOpenStackOpts(t *testing.T) {
|
||||
provider: nil,
|
||||
lbOpts: LoadBalancerOpts{
|
||||
LBVersion: "v2",
|
||||
SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef",
|
||||
FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef",
|
||||
FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
LBMethod: "ROUND_ROBIN",
|
||||
LBProvider: "haproxy",
|
||||
CreateMonitor: true,
|
||||
@ -189,7 +230,7 @@ func TestCheckOpenStackOpts(t *testing.T) {
|
||||
provider: nil,
|
||||
lbOpts: LoadBalancerOpts{
|
||||
LBVersion: "v2",
|
||||
FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
LBMethod: "ROUND_ROBIN",
|
||||
CreateMonitor: true,
|
||||
MonitorDelay: delay,
|
||||
@ -209,10 +250,12 @@ func TestCheckOpenStackOpts(t *testing.T) {
|
||||
provider: nil,
|
||||
lbOpts: LoadBalancerOpts{
|
||||
LBVersion: "v2",
|
||||
SubnetId: "6261548e-ffde-4bc7-bd22-59c83578c5ef",
|
||||
FloatingNetworkId: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef",
|
||||
FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
LBMethod: "ROUND_ROBIN",
|
||||
CreateMonitor: true,
|
||||
MonitorTimeout: timeout,
|
||||
MonitorMaxRetries: uint(3),
|
||||
ManageSecurityGroups: true,
|
||||
},
|
||||
metadataOpts: MetadataOpts{
|
||||
@ -252,6 +295,46 @@ func TestCheckOpenStackOpts(t *testing.T) {
|
||||
expectedError: fmt.Errorf("invalid element %q found in section [Metadata] with key `search-order`."+
|
||||
"Supported elements include %q and %q", "value1", configDriveID, metadataID),
|
||||
},
|
||||
{
|
||||
name: "test7",
|
||||
openstackOpts: &OpenStack{
|
||||
provider: nil,
|
||||
lbOpts: LoadBalancerOpts{
|
||||
LBVersion: "v2",
|
||||
SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef",
|
||||
FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
LBMethod: "ROUND_ROBIN",
|
||||
CreateMonitor: true,
|
||||
MonitorDelay: delay,
|
||||
MonitorTimeout: timeout,
|
||||
ManageSecurityGroups: true,
|
||||
},
|
||||
metadataOpts: MetadataOpts{
|
||||
SearchOrder: configDriveID,
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("monitor-max-retries not set in cloud provider config"),
|
||||
},
|
||||
{
|
||||
name: "test8",
|
||||
openstackOpts: &OpenStack{
|
||||
provider: nil,
|
||||
lbOpts: LoadBalancerOpts{
|
||||
LBVersion: "v2",
|
||||
SubnetID: "6261548e-ffde-4bc7-bd22-59c83578c5ef",
|
||||
FloatingNetworkID: "38b8b5f9-64dc-4424-bf86-679595714786",
|
||||
LBMethod: "ROUND_ROBIN",
|
||||
CreateMonitor: true,
|
||||
MonitorDelay: delay,
|
||||
MonitorMaxRetries: uint(3),
|
||||
ManageSecurityGroups: true,
|
||||
},
|
||||
metadataOpts: MetadataOpts{
|
||||
SearchOrder: configDriveID,
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("monitor-timeout not set in cloud provider config"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
@ -271,39 +354,39 @@ func TestCaller(t *testing.T) {
|
||||
called := false
|
||||
myFunc := func() { called = true }
|
||||
|
||||
c := NewCaller()
|
||||
c.Call(myFunc)
|
||||
c := newCaller()
|
||||
c.call(myFunc)
|
||||
|
||||
if !called {
|
||||
t.Errorf("Caller failed to call function in default case")
|
||||
t.Errorf("caller failed to call function in default case")
|
||||
}
|
||||
|
||||
c.Disarm()
|
||||
c.disarm()
|
||||
called = false
|
||||
c.Call(myFunc)
|
||||
c.call(myFunc)
|
||||
|
||||
if called {
|
||||
t.Error("Caller still called function when disarmed")
|
||||
t.Error("caller still called function when disarmed")
|
||||
}
|
||||
|
||||
// Confirm the "usual" deferred Caller pattern works as expected
|
||||
// Confirm the "usual" deferred caller pattern works as expected
|
||||
|
||||
called = false
|
||||
success_case := func() {
|
||||
c := NewCaller()
|
||||
defer c.Call(func() { called = true })
|
||||
c.Disarm()
|
||||
successCase := func() {
|
||||
c := newCaller()
|
||||
defer c.call(func() { called = true })
|
||||
c.disarm()
|
||||
}
|
||||
if success_case(); called {
|
||||
if successCase(); called {
|
||||
t.Error("Deferred success case still invoked unwind")
|
||||
}
|
||||
|
||||
called = false
|
||||
failure_case := func() {
|
||||
c := NewCaller()
|
||||
defer c.Call(func() { called = true })
|
||||
failureCase := func() {
|
||||
c := newCaller()
|
||||
defer c.call(func() { called = true })
|
||||
}
|
||||
if failure_case(); !called {
|
||||
if failureCase(); !called {
|
||||
t.Error("Deferred failure case failed to invoke unwind")
|
||||
}
|
||||
}
|
||||
@ -376,60 +459,10 @@ func TestNodeAddresses(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// This allows acceptance testing against an existing OpenStack
|
||||
// install, using the standard OS_* OpenStack client environment
|
||||
// variables.
|
||||
// FIXME: it would be better to hermetically test against canned JSON
|
||||
// requests/responses.
|
||||
func configFromEnv() (cfg Config, ok bool) {
|
||||
cfg.Global.AuthUrl = os.Getenv("OS_AUTH_URL")
|
||||
|
||||
cfg.Global.TenantId = os.Getenv("OS_TENANT_ID")
|
||||
// Rax/nova _insists_ that we don't specify both tenant ID and name
|
||||
if cfg.Global.TenantId == "" {
|
||||
cfg.Global.TenantName = os.Getenv("OS_TENANT_NAME")
|
||||
}
|
||||
|
||||
cfg.Global.Username = os.Getenv("OS_USERNAME")
|
||||
cfg.Global.Password = os.Getenv("OS_PASSWORD")
|
||||
cfg.Global.Region = os.Getenv("OS_REGION_NAME")
|
||||
|
||||
cfg.Global.TenantName = os.Getenv("OS_TENANT_NAME")
|
||||
if cfg.Global.TenantName == "" {
|
||||
cfg.Global.TenantName = os.Getenv("OS_PROJECT_NAME")
|
||||
}
|
||||
|
||||
cfg.Global.TenantId = os.Getenv("OS_TENANT_ID")
|
||||
if cfg.Global.TenantId == "" {
|
||||
cfg.Global.TenantId = os.Getenv("OS_PROJECT_ID")
|
||||
}
|
||||
|
||||
cfg.Global.DomainId = os.Getenv("OS_DOMAIN_ID")
|
||||
if cfg.Global.DomainId == "" {
|
||||
cfg.Global.DomainId = os.Getenv("OS_USER_DOMAIN_ID")
|
||||
}
|
||||
|
||||
cfg.Global.DomainName = os.Getenv("OS_DOMAIN_NAME")
|
||||
if cfg.Global.DomainName == "" {
|
||||
cfg.Global.DomainName = os.Getenv("OS_USER_DOMAIN_NAME")
|
||||
}
|
||||
|
||||
ok = (cfg.Global.AuthUrl != "" &&
|
||||
cfg.Global.Username != "" &&
|
||||
cfg.Global.Password != "" &&
|
||||
(cfg.Global.TenantId != "" || cfg.Global.TenantName != "" ||
|
||||
cfg.Global.DomainId != "" || cfg.Global.DomainName != ""))
|
||||
|
||||
cfg.Metadata.SearchOrder = fmt.Sprintf("%s,%s", configDriveID, metadataID)
|
||||
cfg.BlockStorage.BSVersion = "auto"
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestNewOpenStack(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
t.Skip("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newOpenStack(cfg)
|
||||
@ -441,7 +474,7 @@ func TestNewOpenStack(t *testing.T) {
|
||||
func TestLoadBalancer(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
t.Skip("No config found in environment")
|
||||
}
|
||||
|
||||
versions := []string{"v2", ""}
|
||||
@ -460,7 +493,7 @@ func TestLoadBalancer(t *testing.T) {
|
||||
t.Fatalf("LoadBalancer() returned false - perhaps your stack doesn't support Neutron?")
|
||||
}
|
||||
|
||||
_, exists, err := lb.GetLoadBalancer(testClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "noexist"}})
|
||||
_, exists, err := lb.GetLoadBalancer(context.TODO(), testClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "noexist"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned error: %s", err)
|
||||
}
|
||||
@ -486,7 +519,7 @@ func TestZones(t *testing.T) {
|
||||
t.Fatalf("Zones() returned false")
|
||||
}
|
||||
|
||||
zone, err := z.GetZone()
|
||||
zone, err := z.GetZone(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("GetZone() returned error: %s", err)
|
||||
}
|
||||
@ -505,7 +538,7 @@ var diskPathRegexp = regexp.MustCompile("/dev/disk/(?:by-id|by-path)/")
|
||||
func TestVolumes(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
t.Skip("No config found in environment")
|
||||
}
|
||||
|
||||
os, err := newOpenStack(cfg)
|
||||
@ -528,15 +561,15 @@ func TestVolumes(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Logf("Cannot find instance id: %v - perhaps you are running this test outside a VM launched by OpenStack", err)
|
||||
} else {
|
||||
diskId, err := os.AttachDisk(id, vol)
|
||||
diskID, err := os.AttachDisk(id, vol)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot AttachDisk Cinder volume %s: %v", vol, err)
|
||||
}
|
||||
t.Logf("Volume (%s) attached, disk ID: %s\n", vol, diskId)
|
||||
t.Logf("Volume (%s) attached, disk ID: %s\n", vol, diskID)
|
||||
|
||||
WaitForVolumeStatus(t, os, vol, volumeInUseStatus)
|
||||
|
||||
devicePath := os.GetDevicePath(diskId)
|
||||
devicePath := os.GetDevicePath(diskID)
|
||||
if diskPathRegexp.FindString(devicePath) == "" {
|
||||
t.Fatalf("GetDevicePath returned and unexpected path for Cinder volume %s, returned %s", vol, devicePath)
|
||||
}
|
||||
@ -551,6 +584,18 @@ func TestVolumes(t *testing.T) {
|
||||
WaitForVolumeStatus(t, os, vol, volumeAvailableStatus)
|
||||
}
|
||||
|
||||
expectedVolSize := resource.MustParse("2Gi")
|
||||
newVolSize, err := os.ExpandVolume(vol, resource.MustParse("1Gi"), expectedVolSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot expand a Cinder volume: %v", err)
|
||||
}
|
||||
if newVolSize != expectedVolSize {
|
||||
t.Logf("Expected: %v but got: %v ", expectedVolSize, newVolSize)
|
||||
}
|
||||
t.Logf("Volume expanded to (%v) \n", newVolSize)
|
||||
|
||||
WaitForVolumeStatus(t, os, vol, volumeAvailableStatus)
|
||||
|
||||
err = os.DeleteVolume(vol)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot delete Cinder volume %s: %v", vol, err)
|
||||
@ -602,3 +647,37 @@ func TestInstanceIDFromProviderID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAuth3Options(t *testing.T) {
|
||||
cfg := Config{}
|
||||
cfg.Global.Username = "user"
|
||||
cfg.Global.Password = "pass"
|
||||
cfg.Global.DomainID = "2a73b8f597c04551a0fdc8e95544be8a"
|
||||
cfg.Global.DomainName = "local"
|
||||
cfg.Global.AuthURL = "http://auth.url"
|
||||
cfg.Global.UserID = "user"
|
||||
|
||||
ao := cfg.toAuth3Options()
|
||||
|
||||
if !ao.AllowReauth {
|
||||
t.Errorf("Will need to be able to reauthenticate")
|
||||
}
|
||||
if ao.Username != cfg.Global.Username {
|
||||
t.Errorf("Username %s != %s", ao.Username, cfg.Global.Username)
|
||||
}
|
||||
if ao.Password != cfg.Global.Password {
|
||||
t.Errorf("Password %s != %s", ao.Password, cfg.Global.Password)
|
||||
}
|
||||
if ao.DomainID != cfg.Global.DomainID {
|
||||
t.Errorf("DomainID %s != %s", ao.DomainID, cfg.Global.DomainID)
|
||||
}
|
||||
if ao.IdentityEndpoint != cfg.Global.AuthURL {
|
||||
t.Errorf("IdentityEndpoint %s != %s", ao.IdentityEndpoint, cfg.Global.AuthURL)
|
||||
}
|
||||
if ao.UserID != cfg.Global.UserID {
|
||||
t.Errorf("UserID %s != %s", ao.UserID, cfg.Global.UserID)
|
||||
}
|
||||
if ao.DomainName != cfg.Global.DomainName {
|
||||
t.Errorf("DomainName %s != %s", ao.DomainName, cfg.Global.DomainName)
|
||||
}
|
||||
}
|
||||
|
259
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
259
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
@ -24,8 +26,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
k8s_volume "k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
|
||||
@ -39,35 +46,38 @@ import (
|
||||
)
|
||||
|
||||
type volumeService interface {
|
||||
createVolume(opts VolumeCreateOpts) (string, string, error)
|
||||
createVolume(opts volumeCreateOpts) (string, string, error)
|
||||
getVolume(volumeID string) (Volume, error)
|
||||
deleteVolume(volumeName string) error
|
||||
expandVolume(volumeID string, newSize int) error
|
||||
}
|
||||
|
||||
// Volumes implementation for v1
|
||||
// VolumesV1 is a Volumes implementation for cinder v1
|
||||
type VolumesV1 struct {
|
||||
blockstorage *gophercloud.ServiceClient
|
||||
opts BlockStorageOpts
|
||||
}
|
||||
|
||||
// Volumes implementation for v2
|
||||
// VolumesV2 is a Volumes implementation for cinder v2
|
||||
type VolumesV2 struct {
|
||||
blockstorage *gophercloud.ServiceClient
|
||||
opts BlockStorageOpts
|
||||
}
|
||||
|
||||
// Volumes implementation for v3
|
||||
// VolumesV3 is a Volumes implementation for cinder v3
|
||||
type VolumesV3 struct {
|
||||
blockstorage *gophercloud.ServiceClient
|
||||
opts BlockStorageOpts
|
||||
}
|
||||
|
||||
// Volume stores information about a single volume
|
||||
type Volume struct {
|
||||
// ID of the instance, to which this volume is attached. "" if not attached
|
||||
AttachedServerId string
|
||||
AttachedServerID string
|
||||
// Device file path
|
||||
AttachedDevice string
|
||||
// availabilityZone is which availability zone the volume is in
|
||||
AvailabilityZone string
|
||||
// Unique identifier for the volume.
|
||||
ID string
|
||||
// Human-readable display name for the volume.
|
||||
@ -78,7 +88,7 @@ type Volume struct {
|
||||
Size int
|
||||
}
|
||||
|
||||
type VolumeCreateOpts struct {
|
||||
type volumeCreateOpts struct {
|
||||
Size int
|
||||
Availability string
|
||||
Name string
|
||||
@ -86,22 +96,25 @@ type VolumeCreateOpts struct {
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// implements PVLabeler.
|
||||
var _ cloudprovider.PVLabeler = (*OpenStack)(nil)
|
||||
|
||||
const (
|
||||
VolumeAvailableStatus = "available"
|
||||
VolumeInUseStatus = "in-use"
|
||||
VolumeDeletedStatus = "deleted"
|
||||
VolumeErrorStatus = "error"
|
||||
volumeAvailableStatus = "available"
|
||||
volumeInUseStatus = "in-use"
|
||||
volumeDeletedStatus = "deleted"
|
||||
volumeErrorStatus = "error"
|
||||
|
||||
// On some environments, we need to query the metadata service in order
|
||||
// to locate disks. We'll use the Newton version, which includes device
|
||||
// metadata.
|
||||
NewtonMetadataVersion = "2016-06-30"
|
||||
newtonMetadataVersion = "2016-06-30"
|
||||
)
|
||||
|
||||
func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, string, error) {
|
||||
func (volumes *VolumesV1) createVolume(opts volumeCreateOpts) (string, string, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
create_opts := volumes_v1.CreateOpts{
|
||||
createOpts := volumes_v1.CreateOpts{
|
||||
Name: opts.Name,
|
||||
Size: opts.Size,
|
||||
VolumeType: opts.VolumeType,
|
||||
@ -109,7 +122,7 @@ func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, string, e
|
||||
Metadata: opts.Metadata,
|
||||
}
|
||||
|
||||
vol, err := volumes_v1.Create(volumes.blockstorage, create_opts).Extract()
|
||||
vol, err := volumes_v1.Create(volumes.blockstorage, createOpts).Extract()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("create_v1_volume", timeTaken, err)
|
||||
if err != nil {
|
||||
@ -118,10 +131,10 @@ func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, string, e
|
||||
return vol.ID, vol.AvailabilityZone, nil
|
||||
}
|
||||
|
||||
func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, error) {
|
||||
func (volumes *VolumesV2) createVolume(opts volumeCreateOpts) (string, string, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
create_opts := volumes_v2.CreateOpts{
|
||||
createOpts := volumes_v2.CreateOpts{
|
||||
Name: opts.Name,
|
||||
Size: opts.Size,
|
||||
VolumeType: opts.VolumeType,
|
||||
@ -129,7 +142,7 @@ func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, e
|
||||
Metadata: opts.Metadata,
|
||||
}
|
||||
|
||||
vol, err := volumes_v2.Create(volumes.blockstorage, create_opts).Extract()
|
||||
vol, err := volumes_v2.Create(volumes.blockstorage, createOpts).Extract()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("create_v2_volume", timeTaken, err)
|
||||
if err != nil {
|
||||
@ -138,10 +151,10 @@ func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, e
|
||||
return vol.ID, vol.AvailabilityZone, nil
|
||||
}
|
||||
|
||||
func (volumes *VolumesV3) createVolume(opts VolumeCreateOpts) (string, string, error) {
|
||||
func (volumes *VolumesV3) createVolume(opts volumeCreateOpts) (string, string, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
create_opts := volumes_v3.CreateOpts{
|
||||
createOpts := volumes_v3.CreateOpts{
|
||||
Name: opts.Name,
|
||||
Size: opts.Size,
|
||||
VolumeType: opts.VolumeType,
|
||||
@ -149,7 +162,7 @@ func (volumes *VolumesV3) createVolume(opts VolumeCreateOpts) (string, string, e
|
||||
Metadata: opts.Metadata,
|
||||
}
|
||||
|
||||
vol, err := volumes_v3.Create(volumes.blockstorage, create_opts).Extract()
|
||||
vol, err := volumes_v3.Create(volumes.blockstorage, createOpts).Extract()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("create_v3_volume", timeTaken, err)
|
||||
if err != nil {
|
||||
@ -168,14 +181,15 @@ func (volumes *VolumesV1) getVolume(volumeID string) (Volume, error) {
|
||||
}
|
||||
|
||||
volume := Volume{
|
||||
ID: volumeV1.ID,
|
||||
Name: volumeV1.Name,
|
||||
Status: volumeV1.Status,
|
||||
Size: volumeV1.Size,
|
||||
AvailabilityZone: volumeV1.AvailabilityZone,
|
||||
ID: volumeV1.ID,
|
||||
Name: volumeV1.Name,
|
||||
Status: volumeV1.Status,
|
||||
Size: volumeV1.Size,
|
||||
}
|
||||
|
||||
if len(volumeV1.Attachments) > 0 && volumeV1.Attachments[0]["server_id"] != nil {
|
||||
volume.AttachedServerId = volumeV1.Attachments[0]["server_id"].(string)
|
||||
volume.AttachedServerID = volumeV1.Attachments[0]["server_id"].(string)
|
||||
volume.AttachedDevice = volumeV1.Attachments[0]["device"].(string)
|
||||
}
|
||||
|
||||
@ -192,14 +206,15 @@ func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) {
|
||||
}
|
||||
|
||||
volume := Volume{
|
||||
ID: volumeV2.ID,
|
||||
Name: volumeV2.Name,
|
||||
Status: volumeV2.Status,
|
||||
Size: volumeV2.Size,
|
||||
AvailabilityZone: volumeV2.AvailabilityZone,
|
||||
ID: volumeV2.ID,
|
||||
Name: volumeV2.Name,
|
||||
Status: volumeV2.Status,
|
||||
Size: volumeV2.Size,
|
||||
}
|
||||
|
||||
if len(volumeV2.Attachments) > 0 {
|
||||
volume.AttachedServerId = volumeV2.Attachments[0].ServerID
|
||||
volume.AttachedServerID = volumeV2.Attachments[0].ServerID
|
||||
volume.AttachedDevice = volumeV2.Attachments[0].Device
|
||||
}
|
||||
|
||||
@ -216,13 +231,14 @@ func (volumes *VolumesV3) getVolume(volumeID string) (Volume, error) {
|
||||
}
|
||||
|
||||
volume := Volume{
|
||||
ID: volumeV3.ID,
|
||||
Name: volumeV3.Name,
|
||||
Status: volumeV3.Status,
|
||||
AvailabilityZone: volumeV3.AvailabilityZone,
|
||||
ID: volumeV3.ID,
|
||||
Name: volumeV3.Name,
|
||||
Status: volumeV3.Status,
|
||||
}
|
||||
|
||||
if len(volumeV3.Attachments) > 0 {
|
||||
volume.AttachedServerId = volumeV3.Attachments[0].ServerID
|
||||
volume.AttachedServerID = volumeV3.Attachments[0].ServerID
|
||||
volume.AttachedDevice = volumeV3.Attachments[0].Device
|
||||
}
|
||||
|
||||
@ -255,10 +271,10 @@ func (volumes *VolumesV3) deleteVolume(volumeID string) error {
|
||||
|
||||
func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error {
|
||||
startTime := time.Now()
|
||||
create_opts := volumeexpand.ExtendSizeOpts{
|
||||
createOpts := volumeexpand.ExtendSizeOpts{
|
||||
NewSize: newSize,
|
||||
}
|
||||
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr()
|
||||
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
|
||||
return err
|
||||
@ -266,10 +282,10 @@ func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error {
|
||||
|
||||
func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error {
|
||||
startTime := time.Now()
|
||||
create_opts := volumeexpand.ExtendSizeOpts{
|
||||
createOpts := volumeexpand.ExtendSizeOpts{
|
||||
NewSize: newSize,
|
||||
}
|
||||
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr()
|
||||
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
|
||||
return err
|
||||
@ -277,25 +293,26 @@ func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error {
|
||||
|
||||
func (volumes *VolumesV3) expandVolume(volumeID string, newSize int) error {
|
||||
startTime := time.Now()
|
||||
create_opts := volumeexpand.ExtendSizeOpts{
|
||||
createOpts := volumeexpand.ExtendSizeOpts{
|
||||
NewSize: newSize,
|
||||
}
|
||||
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr()
|
||||
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// OperationPending checks if there is an operation pending on a volume
|
||||
func (os *OpenStack) OperationPending(diskName string) (bool, string, error) {
|
||||
volume, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
volumeStatus := volume.Status
|
||||
if volumeStatus == VolumeErrorStatus {
|
||||
if volumeStatus == volumeErrorStatus {
|
||||
return false, volumeStatus, nil
|
||||
}
|
||||
if volumeStatus == VolumeAvailableStatus || volumeStatus == VolumeInUseStatus || volumeStatus == VolumeDeletedStatus {
|
||||
if volumeStatus == volumeAvailableStatus || volumeStatus == volumeInUseStatus || volumeStatus == volumeDeletedStatus {
|
||||
return false, volume.Status, nil
|
||||
}
|
||||
return true, volumeStatus, nil
|
||||
@ -313,12 +330,22 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if volume.AttachedServerId != "" {
|
||||
if instanceID == volume.AttachedServerId {
|
||||
if volume.AttachedServerID != "" {
|
||||
if instanceID == volume.AttachedServerID {
|
||||
glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
|
||||
return volume.ID, nil
|
||||
}
|
||||
return "", fmt.Errorf("disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId)
|
||||
nodeName, err := os.GetNodeNameByID(volume.AttachedServerID)
|
||||
attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerID)
|
||||
if err != nil {
|
||||
glog.Error(attachErr)
|
||||
return "", errors.New(attachErr)
|
||||
}
|
||||
// using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128
|
||||
devicePath := volume.AttachedDevice
|
||||
danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath)
|
||||
glog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName)
|
||||
return "", danglingErr
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
@ -341,34 +368,34 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if volume.Status == VolumeAvailableStatus {
|
||||
if volume.Status == volumeAvailableStatus {
|
||||
// "available" is fine since that means the volume is detached from instance already.
|
||||
glog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if volume.Status != VolumeInUseStatus {
|
||||
if volume.Status != volumeInUseStatus {
|
||||
return fmt.Errorf("can not detach volume %s, its status is %s", volume.Name, volume.Status)
|
||||
}
|
||||
cClient, err := os.NewComputeV2()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if volume.AttachedServerId != instanceID {
|
||||
if volume.AttachedServerID != instanceID {
|
||||
return fmt.Errorf("disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID)
|
||||
} else {
|
||||
startTime := time.Now()
|
||||
// This is a blocking call and effects kubelet's performance directly.
|
||||
// We should consider kicking it out into a separate routine, if it is bad.
|
||||
err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("detach_disk", timeTaken, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
// This is a blocking call and effects kubelet's performance directly.
|
||||
// We should consider kicking it out into a separate routine, if it is bad.
|
||||
err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr()
|
||||
timeTaken := time.Since(startTime).Seconds()
|
||||
recordOpenstackOperationMetric("detach_disk", timeTaken, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -378,14 +405,14 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
if volume.Status != VolumeAvailableStatus {
|
||||
if volume.Status != volumeAvailableStatus {
|
||||
// cinder volume can not be expanded if its status is not available
|
||||
return oldSize, fmt.Errorf("volume status is not available")
|
||||
}
|
||||
|
||||
volSizeBytes := newSize.Value()
|
||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||
volSizeGB := int(k8s_volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB))
|
||||
|
||||
// if volume size equals to or greater than the newSize, return nil
|
||||
@ -421,7 +448,7 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str
|
||||
return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
|
||||
}
|
||||
|
||||
opts := VolumeCreateOpts{
|
||||
opts := volumeCreateOpts{
|
||||
Name: name,
|
||||
Size: size,
|
||||
VolumeType: vtype,
|
||||
@ -441,8 +468,8 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str
|
||||
return volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ, nil
|
||||
}
|
||||
|
||||
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
|
||||
func (os *OpenStack) GetDevicePathBySerialId(volumeID string) string {
|
||||
// GetDevicePathBySerialID returns the path of an attached block storage volume, specified by its id.
|
||||
func (os *OpenStack) GetDevicePathBySerialID(volumeID string) string {
|
||||
// Build a list of candidate device paths.
|
||||
// Certain Nova drivers will set the disk serial ID, including the Cinder volume id.
|
||||
candidateDeviceNodes := []string{
|
||||
@ -469,7 +496,7 @@ func (os *OpenStack) GetDevicePathBySerialId(volumeID string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (os *OpenStack) GetDevicePathFromInstanceMetadata(volumeID string) string {
|
||||
func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string {
|
||||
// Nova Hyper-V hosts cannot override disk SCSI IDs. In order to locate
|
||||
// volumes, we're querying the metadata service. Note that the Hyper-V
|
||||
// driver will include device metadata for untagged volumes as well.
|
||||
@ -477,7 +504,7 @@ func (os *OpenStack) GetDevicePathFromInstanceMetadata(volumeID string) string {
|
||||
// We're avoiding using cached metadata (or the configdrive),
|
||||
// relying on the metadata service.
|
||||
instanceMetadata, err := getMetadataFromMetadataService(
|
||||
NewtonMetadataVersion)
|
||||
newtonMetadataVersion)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof(
|
||||
@ -520,10 +547,10 @@ func (os *OpenStack) GetDevicePathFromInstanceMetadata(volumeID string) string {
|
||||
|
||||
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
|
||||
func (os *OpenStack) GetDevicePath(volumeID string) string {
|
||||
devicePath := os.GetDevicePathBySerialId(volumeID)
|
||||
devicePath := os.GetDevicePathBySerialID(volumeID)
|
||||
|
||||
if devicePath == "" {
|
||||
devicePath = os.GetDevicePathFromInstanceMetadata(volumeID)
|
||||
devicePath = os.getDevicePathFromInstanceMetadata(volumeID)
|
||||
}
|
||||
|
||||
if devicePath == "" {
|
||||
@ -533,6 +560,7 @@ func (os *OpenStack) GetDevicePath(volumeID string) string {
|
||||
return devicePath
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a volume given volume name.
|
||||
func (os *OpenStack) DeleteVolume(volumeID string) error {
|
||||
used, err := os.diskIsUsed(volumeID)
|
||||
if err != nil {
|
||||
@ -561,48 +589,101 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if volume.Status != VolumeInUseStatus {
|
||||
if volume.Status != volumeInUseStatus {
|
||||
return "", fmt.Errorf("can not get device path of volume %s, its status is %s ", volume.Name, volume.Status)
|
||||
}
|
||||
if volume.AttachedServerId != "" {
|
||||
if instanceID == volume.AttachedServerId {
|
||||
if volume.AttachedServerID != "" {
|
||||
if instanceID == volume.AttachedServerID {
|
||||
// Attachment[0]["device"] points to the device path
|
||||
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
|
||||
return volume.AttachedDevice, nil
|
||||
} else {
|
||||
return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerId)
|
||||
}
|
||||
return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerID)
|
||||
}
|
||||
return "", fmt.Errorf("volume %s has no ServerId", volumeID)
|
||||
}
|
||||
|
||||
// DiskIsAttached queries if a volume is attached to a compute instance
|
||||
func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) {
|
||||
if instanceID == "" {
|
||||
glog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID)
|
||||
}
|
||||
volume, err := os.getVolume(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return instanceID == volume.AttachedServerId, nil
|
||||
return instanceID == volume.AttachedServerID, nil
|
||||
}
|
||||
|
||||
// DiskIsAttachedByName queries if a volume is attached to a compute instance by name
|
||||
func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) {
|
||||
cClient, err := os.NewComputeV2()
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
srv, err := getServerByName(cClient, nodeName, false)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
// instance not found anymore in cloudprovider, assume that cinder is detached
|
||||
return false, "", nil
|
||||
}
|
||||
return false, "", err
|
||||
}
|
||||
instanceID := "/" + srv.ID
|
||||
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
||||
instanceID = instanceID[(ind + 1):]
|
||||
}
|
||||
attached, err := os.DiskIsAttached(instanceID, volumeID)
|
||||
return attached, instanceID, err
|
||||
}
|
||||
|
||||
// DisksAreAttached queries if a list of volumes are attached to a compute instance
|
||||
func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, volumeID := range volumeIDs {
|
||||
isAttached, _ := os.DiskIsAttached(instanceID, volumeID)
|
||||
isAttached, err := os.DiskIsAttached(instanceID, volumeID)
|
||||
if err != nil && err != ErrNotFound {
|
||||
attached[volumeID] = true
|
||||
continue
|
||||
}
|
||||
attached[volumeID] = isAttached
|
||||
}
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
// DisksAreAttachedByName queries if a list of volumes are attached to a compute instance by name
|
||||
func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
cClient, err := os.NewComputeV2()
|
||||
if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
srv, err := getServerByName(cClient, nodeName, false)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
// instance not found anymore, mark all volumes as detached
|
||||
for _, volumeID := range volumeIDs {
|
||||
attached[volumeID] = false
|
||||
}
|
||||
return attached, nil
|
||||
}
|
||||
return attached, err
|
||||
}
|
||||
instanceID := "/" + srv.ID
|
||||
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
|
||||
instanceID = instanceID[(ind + 1):]
|
||||
}
|
||||
return os.DisksAreAttached(instanceID, volumeIDs)
|
||||
}
|
||||
|
||||
// diskIsUsed returns true a disk is attached to any node.
|
||||
func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) {
|
||||
volume, err := os.getVolume(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return volume.AttachedServerId != "", nil
|
||||
return volume.AttachedServerID != "", nil
|
||||
}
|
||||
|
||||
// ShouldTrustDevicePath queries if we should trust the cinder provide deviceName, See issue #33128
|
||||
@ -610,11 +691,33 @@ func (os *OpenStack) ShouldTrustDevicePath() bool {
|
||||
return os.bsOpts.TrustDevicePath
|
||||
}
|
||||
|
||||
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
|
||||
func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
// Ignore any volumes that are being provisioned
|
||||
if pv.Spec.Cinder.VolumeID == k8s_volume.ProvisionedVolumeName {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get Volume
|
||||
volume, err := os.getVolume(pv.Spec.Cinder.VolumeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct Volume Labels
|
||||
labels := make(map[string]string)
|
||||
labels[kubeletapis.LabelZoneFailureDomain] = volume.AvailabilityZone
|
||||
labels[kubeletapis.LabelZoneRegion] = os.region
|
||||
glog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels)
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// recordOpenstackOperationMetric records openstack operation metrics
|
||||
func recordOpenstackOperationMetric(operation string, timeTaken float64, err error) {
|
||||
if err != nil {
|
||||
OpenstackApiRequestErrors.With(prometheus.Labels{"request": operation}).Inc()
|
||||
openstackAPIRequestErrors.With(prometheus.Labels{"request": operation}).Inc()
|
||||
} else {
|
||||
OpenstackOperationsLatency.With(prometheus.Labels{"request": operation}).Observe(timeTaken)
|
||||
openstackOperationsLatency.With(prometheus.Labels{"request": operation}).Observe(timeTaken)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user