vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -70,7 +70,7 @@ type DeviceMetadata struct {
// See http://docs.openstack.org/user-guide/cli_config_drive.html
type Metadata struct {
UUID string `json:"uuid"`
Hostname string `json:"hostname"`
Name string `json:"name"`
AvailabilityZone string `json:"availability_zone"`
Devices []DeviceMetadata `json:"devices,omitempty"`
// .. and other fields we don't care about. Expand as necessary.

View File

@ -23,7 +23,7 @@ import (
var FakeMetadata = Metadata{
UUID: "83679162-1378-4288-a2d4-70e13ec132aa",
Hostname: "test",
Name: "test",
AvailabilityZone: "nova",
}
@ -81,8 +81,8 @@ func TestParseMetadata(t *testing.T) {
t.Fatalf("Should succeed when provided with valid data: %s", err)
}
if md.Hostname != "test.novalocal" {
t.Errorf("incorrect hostname: %s", md.Hostname)
if md.Name != "test" {
t.Errorf("incorrect name: %s", md.Name)
}
if md.UUID != "83679162-1378-4288-a2d4-70e13ec132aa" {

View File

@ -415,13 +415,10 @@ func foreachServer(client *gophercloud.ServiceClient, opts servers.ListOptsBuild
return err
}
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName, showOnlyActive bool) (*servers.Server, error) {
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) {
opts := servers.ListOpts{
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
}
if showOnlyActive {
opts.Status = "ACTIVE"
}
pager := servers.List(client, opts)
@ -504,7 +501,7 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
}
func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]v1.NodeAddress, error) {
srv, err := getServerByName(client, name, true)
srv, err := getServerByName(client, name)
if err != nil {
return nil, err
}
@ -605,8 +602,17 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
}
func isNotFound(err error) bool {
e, ok := err.(*gophercloud.ErrUnexpectedResponseCode)
return ok && e.Actual == http.StatusNotFound
if _, ok := err.(gophercloud.ErrDefault404); ok {
return true
}
if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok {
if errCode.Actual == http.StatusNotFound {
return true
}
}
return false
}
// Zones indicates that we support zones
@ -666,7 +672,7 @@ func (os *OpenStack) GetZoneByNodeName(ctx context.Context, nodeName types.NodeN
return cloudprovider.Zone{}, err
}
srv, err := getServerByName(compute, nodeName, true)
srv, err := getServerByName(compute, nodeName)
if err != nil {
if err == ErrNotFound {
return cloudprovider.Zone{}, cloudprovider.InstanceNotFound

View File

@ -42,6 +42,7 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
compute, err := os.NewComputeV2()
if err != nil {
glog.Errorf("unable to access compute v2 API : %v", err)
return nil, false
}
@ -60,7 +61,7 @@ func (i *Instances) CurrentNodeName(ctx context.Context, hostname string) (types
if err != nil {
return "", err
}
return types.NodeName(md.Hostname), nil
return types.NodeName(md.Name), nil
}
// AddSSHKeyToAllInstances is not implemented for OpenStack
@ -105,18 +106,6 @@ func (i *Instances) NodeAddressesByProviderID(ctx context.Context, providerID st
return addresses, nil
}
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (i *Instances) ExternalID(ctx context.Context, name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name, true)
if err != nil {
if err == ErrNotFound {
return "", cloudprovider.InstanceNotFound
}
return "", err
}
return srv.ID, nil
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
@ -141,6 +130,11 @@ func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID s
return true, nil
}
// InstanceShutdownByProviderID returns true if the instances is in safe state to detach volumes
func (i *Instances) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
return false, cloudprovider.NotImplemented
}
// InstanceID returns the kubelet's cloud provider ID.
func (os *OpenStack) InstanceID() (string, error) {
if len(os.localInstanceID) == 0 {
@ -155,7 +149,7 @@ func (os *OpenStack) InstanceID() (string, error) {
// InstanceID returns the cloud provider ID of the specified instance.
func (i *Instances) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name, true)
srv, err := getServerByName(i.compute, name)
if err != nil {
if err == ErrNotFound {
return "", cloudprovider.InstanceNotFound
@ -188,7 +182,7 @@ func (i *Instances) InstanceTypeByProviderID(ctx context.Context, providerID str
// InstanceType returns the type of the specified instance.
func (i *Instances) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
srv, err := getServerByName(i.compute, name, true)
srv, err := getServerByName(i.compute, name)
if err != nil {
return "", err

View File

@ -70,6 +70,7 @@ const (
errorStatus = "ERROR"
ServiceAnnotationLoadBalancerFloatingNetworkID = "loadbalancer.openstack.org/floating-network-id"
ServiceAnnotationLoadBalancerSubnetID = "loadbalancer.openstack.org/subnet-id"
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
// to indicate that we want an internal loadbalancer service.
@ -367,7 +368,7 @@ func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID s
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
_, err := loadbalancers.Get(client, loadbalancerID).Extract()
if err != nil {
if err == ErrNotFound {
if isNotFound(err) {
return true, nil
}
return false, err
@ -486,21 +487,27 @@ func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, s
// The LB needs to be configured with instance addresses on the same
// subnet as the LB (aka opts.SubnetID). Currently we're just
// guessing that the node's InternalIP is the right address - and that
// should be sufficient for all "normal" cases.
// guessing that the node's InternalIP is the right address.
// In case no InternalIP can be found, ExternalIP is tried.
// If neither InternalIP nor ExternalIP can be found an error is
// returned.
func nodeAddressForLB(node *v1.Node) (string, error) {
addrs := node.Status.Addresses
if len(addrs) == 0 {
return "", ErrNoAddressFound
}
for _, addr := range addrs {
if addr.Type == v1.NodeInternalIP {
return addr.Address, nil
allowedAddrTypes := []v1.NodeAddressType{v1.NodeInternalIP, v1.NodeExternalIP}
for _, allowedAddrType := range allowedAddrTypes {
for _, addr := range addrs {
if addr.Type == allowedAddrType {
return addr.Address, nil
}
}
}
return addrs[0].Address, nil
return "", ErrNoAddressFound
}
//getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting
@ -547,14 +554,14 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string,
}
// getNodeSecurityGroupIDForLB lists node-security-groups for specific nodes
func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1.Node) ([]string, error) {
nodeSecurityGroupIDs := sets.NewString()
func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, network *gophercloud.ServiceClient, nodes []*v1.Node) ([]string, error) {
secGroupNames := sets.NewString()
for _, node := range nodes {
nodeName := types.NodeName(node.Name)
srv, err := getServerByName(compute, nodeName, true)
srv, err := getServerByName(compute, nodeName)
if err != nil {
return nodeSecurityGroupIDs.List(), err
return []string{}, err
}
// use the first node-security-groups
@ -562,11 +569,19 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1
// case 1: node1:SG1 node2:SG2 return SG1,SG2
// case 2: node1:SG1,SG2 node2:SG3,SG4 return SG1,SG3
// case 3: node1:SG1,SG2 node2:SG2,SG3 return SG1,SG2
securityGroupName := srv.SecurityGroups[0]["name"]
nodeSecurityGroupIDs.Insert(securityGroupName.(string))
secGroupNames.Insert(srv.SecurityGroups[0]["name"].(string))
}
return nodeSecurityGroupIDs.List(), nil
secGroupIDs := make([]string, secGroupNames.Len())
for i, name := range secGroupNames.List() {
secGroupID, err := groups.IDFromName(network, name)
if err != nil {
return []string{}, err
}
secGroupIDs[i] = secGroupID
}
return secGroupIDs, nil
}
// isSecurityGroupNotFound return true while 'err' is object of gophercloud.ErrResourceNotFound
@ -643,6 +658,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name)
}
lbaas.opts.SubnetID = getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID)
if len(lbaas.opts.SubnetID) == 0 {
// Get SubnetID automatically.
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node.
@ -815,6 +831,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
if !memberExists(members, addr, int(port.NodePort)) {
glog.V(4).Infof("Creating member for pool %s", pool.ID)
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
Name: fmt.Sprintf("member_%s_%d_%s", name, portIndex, node.Name),
ProtocolPort: int(port.NodePort),
Address: addr,
SubnetID: lbaas.opts.SubnetID,
@ -852,6 +869,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
if monitorID == "" && lbaas.opts.CreateMonitor {
glog.V(4).Infof("Creating monitor for pool %s", pool.ID)
monitor, err := v2monitors.Create(lbaas.lb, v2monitors.CreateOpts{
Name: fmt.Sprintf("monitor_%s_%d", name, portIndex),
PoolID: pool.ID,
Type: string(port.Protocol),
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
@ -987,7 +1005,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
// find node-security-group for service
var err error
if len(lbaas.opts.NodeSecurityGroupIDs) == 0 {
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes)
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, lbaas.network, nodes)
if err != nil {
return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
}
@ -1158,6 +1176,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes)
lbaas.opts.SubnetID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID)
if len(lbaas.opts.SubnetID) == 0 && len(nodes) > 0 {
// Get SubnetID automatically.
// The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node.
@ -1211,17 +1230,17 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
}
// Compose Set of member (addresses) that _should_ exist
addrs := map[string]empty{}
addrs := make(map[string]*v1.Node)
for _, node := range nodes {
addr, err := nodeAddressForLB(node)
if err != nil {
return err
}
addrs[addr] = empty{}
addrs[addr] = node
}
// Check for adding/removing members associated with each port
for _, port := range ports {
for portIndex, port := range ports {
// Get listener associated with this port
listener, ok := lbListeners[portKey{
Protocol: toListenersProtocol(port.Protocol),
@ -1248,12 +1267,13 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string
}
// Add any new members for this port
for addr := range addrs {
for addr, node := range addrs {
if _, ok := members[addr]; ok && members[addr].ProtocolPort == int(port.NodePort) {
// Already exists, do not create member
continue
}
_, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{
Name: fmt.Sprintf("member_%s_%d_%s", loadbalancer.Name, portIndex, node.Name),
Address: addr,
ProtocolPort: int(port.NodePort),
SubnetID: lbaas.opts.SubnetID,
@ -1299,7 +1319,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser
originalNodeSecurityGroupIDs := lbaas.opts.NodeSecurityGroupIDs
var err error
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes)
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, lbaas.network, nodes)
if err != nil {
return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
}
@ -1424,18 +1444,6 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
}
}
// get all members associated with each poolIDs
var memberIDs []string
for _, pool := range poolIDs {
membersList, err := getMembersByPoolID(lbaas.lb, pool)
if err != nil && !isNotFound(err) {
return fmt.Errorf("error getting pool members %s: %v", pool, err)
}
for _, member := range membersList {
memberIDs = append(memberIDs, member.ID)
}
}
// delete all monitors
for _, monitorID := range monitorIDs {
err := v2monitors.Delete(lbaas.lb, monitorID).ExtractErr()
@ -1450,9 +1458,14 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
// delete all members and pools
for _, poolID := range poolIDs {
// get members for current pool
membersList, err := getMembersByPoolID(lbaas.lb, poolID)
if err != nil && !isNotFound(err) {
return fmt.Errorf("error getting pool members %s: %v", poolID, err)
}
// delete all members for this pool
for _, memberID := range memberIDs {
err := v2pools.DeleteMember(lbaas.lb, poolID, memberID).ExtractErr()
for _, member := range membersList {
err := v2pools.DeleteMember(lbaas.lb, poolID, member.ID).ExtractErr()
if err != nil && !isNotFound(err) {
return err
}
@ -1463,7 +1476,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
}
// delete pool
err := v2pools.Delete(lbaas.lb, poolID).ExtractErr()
err = v2pools.Delete(lbaas.lb, poolID).ExtractErr()
if err != nil && !isNotFound(err) {
return err
}

View File

@ -83,9 +83,12 @@ func (r *Routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr
var routes []*cloudprovider.Route
for _, item := range router.Routes {
nodeName, foundNode := nodeNamesByAddr[item.NextHop]
if !foundNode {
nodeName = types.NodeName(item.NextHop)
}
route := cloudprovider.Route{
Name: item.DestinationCIDR,
TargetNode: nodeName, //empty if NextHop is unknown
TargetNode: nodeName, //contains the nexthop address if node was not found
Blackhole: !foundNode,
DestinationCIDR: item.DestinationCIDR,
}
@ -225,10 +228,16 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
ip, _, _ := net.ParseCIDR(route.DestinationCIDR)
isCIDRv6 := ip.To4() == nil
addr, err := getAddressByName(r.compute, route.TargetNode, isCIDRv6)
if err != nil {
return err
var addr string
// Blackhole routes are orphaned and have no counterpart in OpenStack
if !route.Blackhole {
var err error
addr, err = getAddressByName(r.compute, route.TargetNode, isCIDRv6)
if err != nil {
return err
}
}
router, err := routers.Get(r.network, r.opts.RouterID).Extract()
@ -239,7 +248,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
routes := router.Routes
index := -1
for i, item := range routes {
if item.DestinationCIDR == route.DestinationCIDR && item.NextHop == addr {
if item.DestinationCIDR == route.DestinationCIDR && (item.NextHop == addr || route.Blackhole && item.NextHop == string(route.TargetNode)) {
index = i
break
}
@ -255,7 +264,8 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
routes = routes[:len(routes)-1]
unwind, err := updateRoutes(r.network, router, routes)
if err != nil {
// If this was a blackhole route we are done, there are no ports to update
if err != nil || route.Blackhole {
return err
}
defer onFailure.call(unwind)
@ -297,7 +307,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo
}
func getPortIDByIP(compute *gophercloud.ServiceClient, targetNode types.NodeName, ipAddress string) (string, error) {
srv, err := getServerByName(compute, targetNode, true)
srv, err := getServerByName(compute, targetNode)
if err != nil {
return "", err
}

View File

@ -88,6 +88,11 @@ func TestReadConfig(t *testing.T) {
t.Errorf("Should fail when no config is provided: %s", err)
}
// Since we are setting env vars, we need to reset old
// values for other tests to succeed.
env := clearEnviron(t)
defer resetEnviron(t, env)
os.Setenv("OS_PASSWORD", "mypass")
defer os.Unsetenv("OS_PASSWORD")
@ -681,3 +686,24 @@ func TestToAuth3Options(t *testing.T) {
t.Errorf("DomainName %s != %s", ao.DomainName, cfg.Global.DomainName)
}
}
func clearEnviron(t *testing.T) []string {
env := os.Environ()
for _, pair := range env {
if strings.HasPrefix(pair, "OS_") {
i := strings.Index(pair, "=") + 1
os.Unsetenv(pair[:i-1])
}
}
return env
}
func resetEnviron(t *testing.T, items []string) {
for _, pair := range items {
if strings.HasPrefix(pair, "OS_") {
i := strings.Index(pair, "=") + 1
if err := os.Setenv(pair[:i-1], pair[i:]); err != nil {
t.Errorf("Setenv(%q, %q) failed during reset: %v", pair[:i-1], pair[i:], err)
}
}
}
}

View File

@ -310,7 +310,8 @@ func (os *OpenStack) OperationPending(diskName string) (bool, string, error) {
}
volumeStatus := volume.Status
if volumeStatus == volumeErrorStatus {
return false, volumeStatus, nil
err = fmt.Errorf("status of volume %s is %s", diskName, volumeStatus)
return false, volumeStatus, err
}
if volumeStatus == volumeAvailableStatus || volumeStatus == volumeInUseStatus || volumeStatus == volumeDeletedStatus {
return false, volume.Status, nil
@ -622,7 +623,7 @@ func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID stri
if err != nil {
return false, "", err
}
srv, err := getServerByName(cClient, nodeName, false)
srv, err := getServerByName(cClient, nodeName)
if err != nil {
if err == ErrNotFound {
// instance not found anymore in cloudprovider, assume that cinder is detached
@ -659,7 +660,7 @@ func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs [
if err != nil {
return attached, err
}
srv, err := getServerByName(cClient, nodeName, false)
srv, err := getServerByName(cClient, nodeName)
if err != nil {
if err == ErrNotFound {
// instance not found anymore, mark all volumes as detached