mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
1
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD
generated
vendored
@ -38,6 +38,7 @@ go_test(
|
||||
srcs = ["reconciler_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go
generated
vendored
@ -169,7 +169,7 @@ func (rc *reconciler) reconcile() {
|
||||
// Volume is mounted, unmount it
|
||||
glog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", ""))
|
||||
err := rc.operationExecutor.UnmountVolume(
|
||||
mountedVolume.MountedVolume, rc.actualStateOfWorld)
|
||||
mountedVolume.MountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
@ -254,6 +254,22 @@ func (rc *reconciler) reconcile() {
|
||||
glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr))
|
||||
}
|
||||
}
|
||||
} else if cache.IsFSResizeRequiredError(err) &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", ""))
|
||||
err := rc.operationExecutor.ExpandVolumeFSWithoutUnmounting(
|
||||
volumeToMount.VolumeToMount,
|
||||
rc.actualStateOfWorld)
|
||||
if err != nil &&
|
||||
!nestedpendingoperations.IsAlreadyExists(err) &&
|
||||
!exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
glog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error())
|
||||
}
|
||||
if err == nil {
|
||||
glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -357,21 +373,39 @@ func (rc *reconciler) syncStates() {
|
||||
return
|
||||
}
|
||||
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume)
|
||||
volumeNeedReport := []v1.UniqueVolumeName{}
|
||||
for _, volume := range podVolumes {
|
||||
if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
// There is nothing to reconstruct
|
||||
continue
|
||||
}
|
||||
volumeInDSW := rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName)
|
||||
|
||||
reconstructedVolume, err := rc.reconstructVolume(volume)
|
||||
if err != nil {
|
||||
if volumeInDSW {
|
||||
// Some pod needs the volume, don't clean it up and hope that
|
||||
// reconcile() calls SetUp and reconstructs the volume in ASW.
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// No pod needs the volume.
|
||||
glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err)
|
||||
rc.cleanupMounts(volume)
|
||||
continue
|
||||
}
|
||||
if volumeInDSW {
|
||||
// Some pod needs the volume. And it exists on disk. Some previous
|
||||
// kubelet must have created the directory, therefore it must have
|
||||
// reported the volume as in use. Mark the volume as in use also in
|
||||
// this new kubelet so reconcile() calls SetUp and re-mounts the
|
||||
// volume if it's necessary.
|
||||
volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName)
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// There is no pod that uses the volume.
|
||||
if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
glog.Warning("Volume is in pending operation, skip cleaning up mounts")
|
||||
}
|
||||
@ -386,7 +420,9 @@ func (rc *reconciler) syncStates() {
|
||||
glog.Errorf("Error occurred during reconstruct volume from disk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(volumeNeedReport) > 0 {
|
||||
rc.desiredStateOfWorld.MarkVolumesReportedInUse(volumeNeedReport)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
@ -401,7 +437,7 @@ func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
}
|
||||
// TODO: Currently cleanupMounts only includes UnmountVolume operation. In the next PR, we will add
|
||||
// to unmount both volume and device in the same routine.
|
||||
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld)
|
||||
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
|
||||
if err != nil {
|
||||
glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error())
|
||||
return
|
||||
@ -455,7 +491,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||
if checkErr != nil {
|
||||
return nil, err
|
||||
return nil, checkErr
|
||||
}
|
||||
// If mount or symlink doesn't exist, volume reconstruction should be failed
|
||||
if !isExist {
|
||||
@ -573,7 +609,8 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re
|
||||
volume.mounter,
|
||||
volume.blockVolumeMapper,
|
||||
volume.outerVolumeSpecName,
|
||||
volume.volumeGidValue)
|
||||
volume.volumeGidValue,
|
||||
volume.volumeSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not add pod to volume information to actual state of world: %v", err)
|
||||
continue
|
||||
|
187
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
187
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
@ -479,7 +480,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -508,12 +509,8 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
1 /* expectedAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -570,7 +567,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -600,12 +597,8 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -662,7 +655,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -691,12 +684,8 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
1 /* expectedAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -764,7 +753,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
@ -795,12 +784,8 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetGlobalMapPathCallCount(
|
||||
1 /* expectedGetGlobalMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetPodDeviceMapPathCallCount(
|
||||
1 /* expectedPodDeviceMapPathCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifySetUpDeviceCallCount(
|
||||
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyGetMapDeviceCallCount(
|
||||
1 /* expectedGetMapDeviceCallCount */, fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
|
||||
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||
|
||||
@ -908,7 +893,7 @@ func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
|
||||
volumeToUnmount := operationexecutor.MountedVolume{
|
||||
PluginName: "fake-file-plugin",
|
||||
VolumeSpec: tmpSpec}
|
||||
err := oex.UnmountVolume(volumeToUnmount, asw)
|
||||
err := oex.UnmountVolume(volumeToUnmount, asw, "" /* podsDir */)
|
||||
// Assert
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expectedErrMsg)
|
||||
@ -965,6 +950,120 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||
// Enables controllerAttachDetachEnabled.
|
||||
// Calls Run()
|
||||
// Wait for volume mounted.
|
||||
// Mark volume as fsResizeRequired in ASW.
|
||||
// Verifies volume's fsResizeRequired flag is cleared later.
|
||||
func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExpandInUsePersistentVolumes))
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv",
|
||||
UID: "pvuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Name: "pvc"},
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc",
|
||||
UID: "pvcuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv",
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createtestClientWithPVPVC(pv, pvc)
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
kubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
|
||||
reconciler := NewReconciler(
|
||||
kubeClient,
|
||||
true, /* controllerAttachDetachEnabled */
|
||||
reconcilerLoopSleepDuration,
|
||||
reconcilerSyncStatesSleepPeriod,
|
||||
waitForAttachTimeout,
|
||||
nodeName,
|
||||
dsw,
|
||||
asw,
|
||||
hasAddedPods,
|
||||
oex,
|
||||
&mount.FakeMounter{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
|
||||
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
volumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
|
||||
|
||||
// Start the reconciler to fill ASW.
|
||||
stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
reconciler.Run(stopChan)
|
||||
close(stoppedChan)
|
||||
}()
|
||||
waitForMount(t, fakePlugin, volumeName, asw)
|
||||
// Stop the reconciler.
|
||||
close(stopChan)
|
||||
<-stoppedChan
|
||||
|
||||
// Mark volume as fsResizeRequired.
|
||||
asw.MarkFSResizeRequired(volumeName, podName)
|
||||
_, _, podExistErr := asw.PodExistsInVolume(podName, volumeName)
|
||||
if !cache.IsFSResizeRequiredError(podExistErr) {
|
||||
t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr)
|
||||
}
|
||||
|
||||
// Start the reconciler again, we hope reconciler will perform the
|
||||
// resize operation and clear the fsResizeRequired flag for volume.
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
|
||||
waitErr := retryWithExponentialBackOff(500*time.Millisecond, func() (done bool, err error) {
|
||||
mounted, _, err := asw.PodExistsInVolume(podName, volumeName)
|
||||
return mounted && err == nil, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
t.Fatal("Volume resize should succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
func waitForMount(
|
||||
t *testing.T,
|
||||
fakePlugin *volumetesting.FakeVolumePlugin,
|
||||
@ -1029,11 +1128,10 @@ func createTestClient() *fake.Clientset {
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake-plugin/volume-name",
|
||||
Name: "fake-plugin/fake-device1",
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
Spec: v1.NodeSpec{ExternalID: string(nodeName)},
|
||||
}, nil
|
||||
})
|
||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
@ -1045,3 +1143,30 @@ func createTestClient() *fake.Clientset {
|
||||
func runReconciler(reconciler Reconciler) {
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
}
|
||||
|
||||
func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) *fake.Clientset {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("get", "nodes",
|
||||
func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake-plugin/pv",
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
}, nil
|
||||
})
|
||||
fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, pvc, nil
|
||||
})
|
||||
fakeClient.AddReactor("get", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, pv, nil
|
||||
})
|
||||
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
||||
})
|
||||
return fakeClient
|
||||
}
|
||||
|
Reference in New Issue
Block a user