mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
@ -68,6 +68,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
@ -132,6 +133,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
@ -67,7 +67,17 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
newVM := compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: vm.StorageProfile.OsDisk,
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -77,7 +87,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
|
||||
if err != nil {
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
@ -126,7 +136,18 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
newVM := compute.VirtualMachineScaleSetVM{
|
||||
Sku: vm.Sku,
|
||||
Location: vm.Location,
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
HardwareProfile: vm.HardwareProfile,
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: vm.StorageProfile.OsDisk,
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
@ -135,7 +156,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
|
||||
if err != nil {
|
||||
klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err)
|
||||
} else {
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@ -96,7 +96,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
for _, address := range ipAddress.IPV4.IPAddress {
|
||||
if len(ipAddress.IPV4.IPAddress) > 0 && len(ipAddress.IPV4.IPAddress[0].PrivateIP) > 0 {
|
||||
address := ipAddress.IPV4.IPAddress[0]
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
@ -108,7 +109,8 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, address := range ipAddress.IPV6.IPAddress {
|
||||
if len(ipAddress.IPV6.IPAddress) > 0 && len(ipAddress.IPV6.IPAddress[0].PrivateIP) > 0 {
|
||||
address := ipAddress.IPV6.IPAddress[0]
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: address.PrivateIP,
|
||||
@ -120,6 +122,13 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(addresses) == 1 {
|
||||
// No IP addresses is got from instance metadata service, clean up cache and report errors.
|
||||
az.metadata.imsCache.Delete(metadataCacheKey)
|
||||
return nil, fmt.Errorf("get empty IP addresses from instance metadata service")
|
||||
}
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
|
121
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
121
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
@ -21,10 +21,12 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
@ -216,3 +218,122 @@ func TestInstanceShutdownByProviderID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAddresses(t *testing.T) {
|
||||
cloud := getTestCloud()
|
||||
cloud.Config.UseInstanceMetadata = true
|
||||
metadataTemplate := `{"compute":{"name":"%s"},"network":{"interface":[{"ipv4":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]},"ipv6":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]}}]}}`
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
nodeName string
|
||||
ipV4 string
|
||||
ipV6 string
|
||||
ipV4Public string
|
||||
ipV6Public string
|
||||
expected []v1.NodeAddress
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "NodeAddresses should get both ipV4 and ipV6 private addresses",
|
||||
nodeName: "vm1",
|
||||
ipV4: "10.240.0.1",
|
||||
ipV6: "1111:11111:00:00:1111:1111:000:111",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "10.240.0.1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "1111:11111:00:00:1111:1111:000:111",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should report error when IPs are empty",
|
||||
nodeName: "vm1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should get ipV4 private and public addresses",
|
||||
nodeName: "vm1",
|
||||
ipV4: "10.240.0.1",
|
||||
ipV4Public: "9.9.9.9",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "10.240.0.1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: "9.9.9.9",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NodeAddresses should get ipV6 private and public addresses",
|
||||
nodeName: "vm1",
|
||||
ipV6: "1111:11111:00:00:1111:1111:000:111",
|
||||
ipV6Public: "2222:22221:00:00:2222:2222:000:111",
|
||||
expected: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "vm1",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "1111:11111:00:00:1111:1111:000:111",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: "2222:22221:00:00:2222:2222:000:111",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, fmt.Sprintf(metadataTemplate, test.nodeName, test.ipV4, test.ipV4Public, test.ipV6, test.ipV6Public))
|
||||
}))
|
||||
go func() {
|
||||
http.Serve(listener, mux)
|
||||
}()
|
||||
defer listener.Close()
|
||||
|
||||
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
ipAddresses, err := cloud.NodeAddresses(context.Background(), types.NodeName(test.nodeName))
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test [%s] unexpected nil err", test.name)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ipAddresses, test.expected) {
|
||||
t.Errorf("Test [%s] unexpected ipAddresses: %s, expected %q", test.name, ipAddresses, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
Reference in New Issue
Block a user