mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Changes to accommodate client-go changes and kube vendor update
to v1.18.0 Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
4c96ad3c85
commit
34fc1d847e
1
vendor/k8s.io/kubernetes/test/utils/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/utils/BUILD
generated
vendored
@ -39,6 +39,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
77
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
77
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
@ -19,13 +19,17 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -52,12 +56,12 @@ func RetryWithExponentialBackOff(fn wait.ConditionFunc) error {
|
||||
|
||||
func IsRetryableAPIError(err error) bool {
|
||||
// These errors may indicate a transient error that we can retry in tests.
|
||||
if apierrs.IsInternalError(err) || apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) ||
|
||||
apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) {
|
||||
if apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsServerTimeout(err) ||
|
||||
apierrors.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) {
|
||||
return true
|
||||
}
|
||||
// If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
|
||||
if _, shouldRetry := apierrs.SuggestsClientDelay(err); shouldRetry {
|
||||
if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -68,8 +72,8 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod)
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -85,8 +89,8 @@ func CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.Replic
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ReplicationControllers(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -102,8 +106,8 @@ func CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().ReplicaSets(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -119,8 +123,8 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().Deployments(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.AppsV1().Deployments(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -136,8 +140,8 @@ func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *ap
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().DaemonSets(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -153,8 +157,8 @@ func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Jo
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.BatchV1().Jobs(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -170,8 +174,8 @@ func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Se
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Secrets(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -187,8 +191,8 @@ func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ConfigMaps(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -204,8 +208,25 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Services(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().Services(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateStorageClassWithRetries(c clientset.Interface, obj *storage.StorageClass) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -221,8 +242,8 @@ func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -238,8 +259,8 @@ func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.Persistent
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().PersistentVolumes().Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
@ -255,8 +276,8 @@ func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace str
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
_, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
|
27
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
@ -19,9 +19,10 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -31,35 +32,35 @@ import (
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error {
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
switch kind {
|
||||
case api.Kind("Pod"):
|
||||
return c.CoreV1().Pods(namespace).Delete(name, options)
|
||||
return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options)
|
||||
case api.Kind("ReplicationController"):
|
||||
return c.CoreV1().ReplicationControllers(namespace).Delete(name, options)
|
||||
return c.CoreV1().ReplicationControllers(namespace).Delete(context.TODO(), name, options)
|
||||
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
|
||||
return c.AppsV1().ReplicaSets(namespace).Delete(name, options)
|
||||
return c.AppsV1().ReplicaSets(namespace).Delete(context.TODO(), name, options)
|
||||
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
|
||||
return c.AppsV1().Deployments(namespace).Delete(name, options)
|
||||
return c.AppsV1().Deployments(namespace).Delete(context.TODO(), name, options)
|
||||
case extensionsinternal.Kind("DaemonSet"):
|
||||
return c.AppsV1().DaemonSets(namespace).Delete(name, options)
|
||||
return c.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, options)
|
||||
case batchinternal.Kind("Job"):
|
||||
return c.BatchV1().Jobs(namespace).Delete(name, options)
|
||||
return c.BatchV1().Jobs(namespace).Delete(context.TODO(), name, options)
|
||||
case api.Kind("Secret"):
|
||||
return c.CoreV1().Secrets(namespace).Delete(name, options)
|
||||
return c.CoreV1().Secrets(namespace).Delete(context.TODO(), name, options)
|
||||
case api.Kind("ConfigMap"):
|
||||
return c.CoreV1().ConfigMaps(namespace).Delete(name, options)
|
||||
return c.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, options)
|
||||
case api.Kind("Service"):
|
||||
return c.CoreV1().Services(namespace).Delete(name, options)
|
||||
return c.CoreV1().Services(namespace).Delete(context.TODO(), name, options)
|
||||
default:
|
||||
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error {
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
deleteFunc := func() (bool, error) {
|
||||
err := deleteResource(c, kind, namespace, name, options)
|
||||
if err == nil || apierrs.IsNotFound(err) {
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
|
15
vendor/k8s.io/kubernetes/test/utils/density_utils.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/utils/density_utils.go
generated
vendored
@ -17,12 +17,13 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -42,9 +43,9 @@ func AddLabelsToNode(c clientset.Interface, nodeName string, labels map[string]s
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
|
||||
var err error
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
_, err = c.CoreV1().Nodes().Patch(nodeName, types.MergePatchType, []byte(patch))
|
||||
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
if !apierrs.IsConflict(err) {
|
||||
if !apierrors.IsConflict(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@ -61,7 +62,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri
|
||||
var node *v1.Node
|
||||
var err error
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
node, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -74,9 +75,9 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri
|
||||
}
|
||||
delete(node.Labels, labelKey)
|
||||
}
|
||||
_, err = c.CoreV1().Nodes().Update(node)
|
||||
_, err = c.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if !apierrs.IsConflict(err) {
|
||||
if !apierrors.IsConflict(err) {
|
||||
return err
|
||||
} else {
|
||||
klog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName)
|
||||
@ -92,7 +93,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri
|
||||
// VerifyLabelsRemoved checks if Node for given nodeName does not have any of labels from labelKeys.
|
||||
// Return non-nil error if it does.
|
||||
func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []string) error {
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
21
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -51,7 +52,7 @@ func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.R
|
||||
func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet, logf LogfFn) {
|
||||
minReadySeconds := deployment.Spec.MinReadySeconds
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return c.CoreV1().Pods(namespace).List(options)
|
||||
return c.CoreV1().Pods(namespace).List(context.TODO(), options)
|
||||
}
|
||||
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
@ -80,7 +81,7 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.D
|
||||
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
deployment, err = c.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -174,7 +175,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
var reason string
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -205,7 +206,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
|
||||
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
|
||||
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get deployment %s during revision check: %v", deploymentName, err)
|
||||
}
|
||||
@ -259,12 +260,12 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string,
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
if deployment, err = c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
if deployment, err = c.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(deployment)
|
||||
if deployment, err = c.AppsV1().Deployments(namespace).Update(deployment); err == nil {
|
||||
if deployment, err = c.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}); err == nil {
|
||||
logf("Updating deployment %s", name)
|
||||
return true, nil
|
||||
}
|
||||
@ -279,14 +280,14 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string,
|
||||
|
||||
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
|
||||
return deploymentutil.WaitForObservedDeployment(func() (*apps.Deployment, error) {
|
||||
return c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
return c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
}, desiredGeneration, 2*time.Second, 1*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -306,7 +307,7 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
|
||||
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error {
|
||||
var deployment *apps.Deployment
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
d, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -322,7 +323,7 @@ func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
var deployment *apps.Deployment
|
||||
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
d, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
19
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -31,6 +31,7 @@ type RegistryList struct {
|
||||
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
|
||||
DockerGluster string `yaml:"dockerGluster"`
|
||||
E2eRegistry string `yaml:"e2eRegistry"`
|
||||
PromoterE2eRegistry string `yaml:"promoterE2eRegistry"`
|
||||
InvalidRegistry string `yaml:"invalidRegistry"`
|
||||
GcRegistry string `yaml:"gcRegistry"`
|
||||
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
|
||||
@ -69,6 +70,8 @@ func initReg() RegistryList {
|
||||
DockerLibraryRegistry: "docker.io/library",
|
||||
DockerGluster: "docker.io/gluster",
|
||||
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
|
||||
// TODO: After the domain flip, this should instead be k8s.gcr.io/k8s-artifacts-prod/e2e-test-images
|
||||
PromoterE2eRegistry: "us.gcr.io/k8s-artifacts-prod/e2e-test-images",
|
||||
InvalidRegistry: "invalid.com/invalid",
|
||||
GcRegistry: "k8s.gcr.io",
|
||||
GcrReleaseRegistry: "gcr.io/gke-release",
|
||||
@ -100,6 +103,7 @@ var (
|
||||
dockerLibraryRegistry = registry.DockerLibraryRegistry
|
||||
dockerGluster = registry.DockerGluster
|
||||
e2eRegistry = registry.E2eRegistry
|
||||
promoterE2eRegistry = registry.PromoterE2eRegistry
|
||||
gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry
|
||||
gcRegistry = registry.GcRegistry
|
||||
gcrReleaseRegistry = registry.GcrReleaseRegistry
|
||||
@ -136,8 +140,6 @@ const (
|
||||
CudaVectorAdd
|
||||
// CudaVectorAdd2 image
|
||||
CudaVectorAdd2
|
||||
// Dnsutils image
|
||||
Dnsutils
|
||||
// EchoServer image
|
||||
EchoServer
|
||||
// Etcd image
|
||||
@ -187,14 +189,10 @@ const (
|
||||
RegressionIssue74839
|
||||
// ResourceConsumer image
|
||||
ResourceConsumer
|
||||
// ResourceController image
|
||||
ResourceController
|
||||
// SdDummyExporter image
|
||||
SdDummyExporter
|
||||
// StartupScript image
|
||||
StartupScript
|
||||
// TestWebserver image
|
||||
TestWebserver
|
||||
// VolumeNFSServer image
|
||||
VolumeNFSServer
|
||||
// VolumeISCSIServer image
|
||||
@ -207,17 +205,16 @@ const (
|
||||
|
||||
func initImageConfigs() map[int]Config {
|
||||
configs := map[int]Config{}
|
||||
configs[Agnhost] = Config{e2eRegistry, "agnhost", "2.8"}
|
||||
configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.12"}
|
||||
configs[AgnhostPrivate] = Config{PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{gcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
configs[APIServer] = Config{e2eRegistry, "sample-apiserver", "1.10"}
|
||||
configs[APIServer] = Config{e2eRegistry, "sample-apiserver", "1.17"}
|
||||
configs[AppArmorLoader] = Config{e2eRegistry, "apparmor-loader", "1.0"}
|
||||
configs[BusyBox] = Config{dockerLibraryRegistry, "busybox", "1.29"}
|
||||
configs[CheckMetadataConcealment] = Config{e2eRegistry, "metadata-concealment", "1.2"}
|
||||
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{e2eRegistry, "cuda-vector-add", "2.0"}
|
||||
configs[Dnsutils] = Config{e2eRegistry, "dnsutils", "1.1"}
|
||||
configs[EchoServer] = Config{e2eRegistry, "echoserver", "2.2"}
|
||||
configs[Etcd] = Config{gcRegistry, "etcd", "3.4.3"}
|
||||
configs[GlusterDynamicProvisioner] = Config{dockerGluster, "glusterdynamic-provisioner", "v1.0"}
|
||||
@ -236,17 +233,15 @@ func initImageConfigs() map[int]Config {
|
||||
configs[Nonewprivs] = Config{e2eRegistry, "nonewprivs", "1.0"}
|
||||
configs[NonRoot] = Config{e2eRegistry, "nonroot", "1.0"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{gcRegistry, "pause", "3.1"}
|
||||
configs[Pause] = Config{gcRegistry, "pause", "3.2"}
|
||||
configs[Perl] = Config{dockerLibraryRegistry, "perl", "5.26"}
|
||||
configs[PrometheusDummyExporter] = Config{gcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{gcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
configs[Redis] = Config{dockerLibraryRegistry, "redis", "5.0.5-alpine"}
|
||||
configs[RegressionIssue74839] = Config{e2eRegistry, "regression-issue-74839-amd64", "1.0"}
|
||||
configs[ResourceConsumer] = Config{e2eRegistry, "resource-consumer", "1.5"}
|
||||
configs[ResourceController] = Config{e2eRegistry, "resource-consumer-controller", "1.0"}
|
||||
configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[StartupScript] = Config{googleContainerRegistry, "startup-script", "v1"}
|
||||
configs[TestWebserver] = Config{e2eRegistry, "test-webserver", "1.0"}
|
||||
configs[VolumeNFSServer] = Config{e2eRegistry, "volume/nfs", "1.0"}
|
||||
configs[VolumeISCSIServer] = Config{e2eRegistry, "volume/iscsi", "2.0"}
|
||||
configs[VolumeGlusterServer] = Config{e2eRegistry, "volume/gluster", "1.0"}
|
||||
|
5
vendor/k8s.io/kubernetes/test/utils/pod_store.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/utils/pod_store.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -42,13 +43,13 @@ func NewPodStore(c clientset.Interface, namespace string, label labels.Selector,
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = label.String()
|
||||
options.FieldSelector = field.String()
|
||||
obj, err := c.CoreV1().Pods(namespace).List(options)
|
||||
obj, err := c.CoreV1().Pods(namespace).List(context.TODO(), options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = label.String()
|
||||
options.FieldSelector = field.String()
|
||||
return c.CoreV1().Pods(namespace).Watch(options)
|
||||
return c.CoreV1().Pods(namespace).Watch(context.TODO(), options)
|
||||
},
|
||||
}
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
|
11
vendor/k8s.io/kubernetes/test/utils/replicaset.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/utils/replicaset.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@ -34,12 +35,12 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rs)
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Update(rs); err == nil {
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Update(context.TODO(), rs, metav1.UpdateOptions{}); err == nil {
|
||||
logf("Updating replica set %q", name)
|
||||
return true, nil
|
||||
}
|
||||
@ -56,7 +57,7 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
|
||||
func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaSet, pollInterval, pollTimeout time.Duration) error {
|
||||
desiredGeneration := rs.Generation
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
||||
newRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Get(context.TODO(), rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -72,12 +73,12 @@ func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name st
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rs)
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(rs); err == nil {
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{}); err == nil {
|
||||
logf("Updating replica set %q", name)
|
||||
return true, nil
|
||||
}
|
||||
|
217
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
217
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -28,9 +28,10 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -66,7 +67,7 @@ func removePtr(replicas *int32) int32 {
|
||||
|
||||
func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) {
|
||||
// Wait until it's scheduled
|
||||
p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"})
|
||||
p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"})
|
||||
if err == nil && p.Spec.NodeName != "" {
|
||||
return p, nil
|
||||
}
|
||||
@ -74,7 +75,7 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time
|
||||
startTime := time.Now()
|
||||
for startTime.Add(timeout).After(time.Now()) {
|
||||
time.Sleep(pollingPeriod)
|
||||
p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"})
|
||||
p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"})
|
||||
if err == nil && p.Spec.NodeName != "" {
|
||||
return p, nil
|
||||
}
|
||||
@ -852,7 +853,7 @@ func (config *RCConfig) start() error {
|
||||
if oldRunning != config.Replicas {
|
||||
// List only pods from a given replication controller.
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(options); err == nil {
|
||||
if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(context.TODO(), options); err == nil {
|
||||
for _, pod := range pods.Items {
|
||||
config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
|
||||
}
|
||||
@ -979,29 +980,29 @@ func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, clie
|
||||
}
|
||||
|
||||
type LabelNodePrepareStrategy struct {
|
||||
labelKey string
|
||||
labelValue string
|
||||
LabelKey string
|
||||
LabelValue string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{}
|
||||
|
||||
func NewLabelNodePrepareStrategy(labelKey string, labelValue string) *LabelNodePrepareStrategy {
|
||||
return &LabelNodePrepareStrategy{
|
||||
labelKey: labelKey,
|
||||
labelValue: labelValue,
|
||||
LabelKey: labelKey,
|
||||
LabelValue: labelValue,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
|
||||
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.labelKey, s.labelValue)
|
||||
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValue)
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
|
||||
return []byte(patch)
|
||||
}
|
||||
|
||||
func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
if node.Labels != nil && len(node.Labels[s.labelKey]) != 0 {
|
||||
delete(nodeCopy.Labels, s.labelKey)
|
||||
if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 {
|
||||
delete(nodeCopy.Labels, s.LabelKey)
|
||||
}
|
||||
return nodeCopy
|
||||
}
|
||||
@ -1019,22 +1020,26 @@ func (*LabelNodePrepareStrategy) CleanupDependentObjects(nodeName string, client
|
||||
// set to nil.
|
||||
type NodeAllocatableStrategy struct {
|
||||
// Node.status.allocatable to fill to all nodes.
|
||||
nodeAllocatable map[v1.ResourceName]string
|
||||
NodeAllocatable map[v1.ResourceName]string
|
||||
// Map <driver_name> -> VolumeNodeResources to fill into csiNode.spec.drivers[<driver_name>].
|
||||
csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources
|
||||
CsiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources
|
||||
// List of in-tree volume plugins migrated to CSI.
|
||||
migratedPlugins []string
|
||||
MigratedPlugins []string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &NodeAllocatableStrategy{}
|
||||
|
||||
func NewNodeAllocatableStrategy(nodeAllocatable map[v1.ResourceName]string, csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources, migratedPlugins []string) *NodeAllocatableStrategy {
|
||||
return &NodeAllocatableStrategy{nodeAllocatable, csiNodeAllocatable, migratedPlugins}
|
||||
return &NodeAllocatableStrategy{
|
||||
NodeAllocatable: nodeAllocatable,
|
||||
CsiNodeAllocatable: csiNodeAllocatable,
|
||||
MigratedPlugins: migratedPlugins,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte {
|
||||
newNode := node.DeepCopy()
|
||||
for name, value := range s.nodeAllocatable {
|
||||
for name, value := range s.NodeAllocatable {
|
||||
newNode.Status.Allocatable[name] = resource.MustParse(value)
|
||||
}
|
||||
|
||||
@ -1056,7 +1061,7 @@ func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte {
|
||||
|
||||
func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
for name := range s.nodeAllocatable {
|
||||
for name := range s.NodeAllocatable {
|
||||
delete(nodeCopy.Status.Allocatable, name)
|
||||
}
|
||||
return nodeCopy
|
||||
@ -1067,7 +1072,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Annotations: map[string]string{
|
||||
v1.MigratedPluginsAnnotationKey: strings.Join(s.migratedPlugins, ","),
|
||||
v1.MigratedPluginsAnnotationKey: strings.Join(s.MigratedPlugins, ","),
|
||||
},
|
||||
},
|
||||
Spec: storagev1beta1.CSINodeSpec{
|
||||
@ -1075,7 +1080,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
|
||||
},
|
||||
}
|
||||
|
||||
for driver, allocatable := range s.csiNodeAllocatable {
|
||||
for driver, allocatable := range s.CsiNodeAllocatable {
|
||||
d := storagev1beta1.CSINodeDriver{
|
||||
Name: driver,
|
||||
Allocatable: allocatable,
|
||||
@ -1084,17 +1089,17 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
|
||||
}
|
||||
|
||||
_, err := client.StorageV1beta1().CSINodes().Create(csiNode)
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
_, err := client.StorageV1beta1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{})
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
// Something created CSINode instance after we checked it did not exist.
|
||||
// Make the caller to re-try PrepareDependentObjects by returning Conflict error
|
||||
err = apierrs.NewConflict(storagev1beta1.Resource("csinodes"), nodeName, err)
|
||||
err = apierrors.NewConflict(storagev1beta1.Resource("csinodes"), nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, client clientset.Interface) error {
|
||||
for driverName, allocatable := range s.csiNodeAllocatable {
|
||||
for driverName, allocatable := range s.CsiNodeAllocatable {
|
||||
found := false
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
@ -1112,16 +1117,16 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode,
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
|
||||
}
|
||||
}
|
||||
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.migratedPlugins, ",")
|
||||
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",")
|
||||
|
||||
_, err := client.StorageV1beta1().CSINodes().Update(csiNode)
|
||||
_, err := client.StorageV1beta1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(node.Name, metav1.GetOptions{})
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return s.createCSINode(node.Name, client)
|
||||
}
|
||||
return err
|
||||
@ -1130,15 +1135,15 @@ func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for driverName := range s.csiNodeAllocatable {
|
||||
for driverName := range s.CsiNodeAllocatable {
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
csiNode.Spec.Drivers[i].Allocatable = nil
|
||||
@ -1148,6 +1153,41 @@ func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, clien
|
||||
return s.updateCSINode(csiNode, client)
|
||||
}
|
||||
|
||||
// UniqueNodeLabelStrategy sets a unique label for each node.
|
||||
type UniqueNodeLabelStrategy struct {
|
||||
LabelKey string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &UniqueNodeLabelStrategy{}
|
||||
|
||||
func NewUniqueNodeLabelStrategy(labelKey string) *UniqueNodeLabelStrategy {
|
||||
return &UniqueNodeLabelStrategy{
|
||||
LabelKey: labelKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *UniqueNodeLabelStrategy) PreparePatch(*v1.Node) []byte {
|
||||
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, string(uuid.NewUUID()))
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
|
||||
return []byte(patch)
|
||||
}
|
||||
|
||||
func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 {
|
||||
delete(nodeCopy.Labels, s.LabelKey)
|
||||
}
|
||||
return nodeCopy
|
||||
}
|
||||
|
||||
func (*UniqueNodeLabelStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*UniqueNodeLabelStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
patch := strategy.PreparePatch(node)
|
||||
@ -1155,10 +1195,10 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
return nil
|
||||
}
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
if _, err = client.CoreV1().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil {
|
||||
if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@ -1171,7 +1211,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
if err = strategy.PrepareDependentObjects(node, client); err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error while preparing objects for node %s: %s", node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@ -1185,7 +1225,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
|
||||
}
|
||||
@ -1193,10 +1233,10 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
|
||||
if apiequality.Semantic.DeepEqual(node, updatedNode) {
|
||||
return nil
|
||||
}
|
||||
if _, err = client.CoreV1().Nodes().Update(updatedNode); err == nil {
|
||||
if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode, metav1.UpdateOptions{}); err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@ -1210,7 +1250,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error when cleaning up Node %v objects: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@ -1268,7 +1308,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.1",
|
||||
Image: "k8s.gcr.io/pause:3.2",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
@ -1310,41 +1350,61 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe
|
||||
return createError
|
||||
}
|
||||
|
||||
func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int) error {
|
||||
func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error {
|
||||
var createError error
|
||||
lock := sync.Mutex{}
|
||||
createPodFunc := func(i int) {
|
||||
pvcName := fmt.Sprintf("pvc-%d", i)
|
||||
|
||||
// pvc
|
||||
pvc := claimTemplate.DeepCopy()
|
||||
pvc.Name = pvcName
|
||||
// pv
|
||||
pv := factory(i)
|
||||
// bind to "pvc-$i"
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
Namespace: namespace,
|
||||
Name: pvcName,
|
||||
APIVersion: "v1",
|
||||
// PVs are cluster-wide resources.
|
||||
// Prepend a namespace to make the name globally unique.
|
||||
pv.Name = fmt.Sprintf("%s-%s", namespace, pv.Name)
|
||||
if bindVolume {
|
||||
// bind pv to "pvc-$i"
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
Namespace: namespace,
|
||||
Name: pvcName,
|
||||
APIVersion: "v1",
|
||||
}
|
||||
pv.Status.Phase = v1.VolumeBound
|
||||
|
||||
// bind pvc to "pv-$i"
|
||||
// pvc.Spec.VolumeName = pv.Name
|
||||
pvc.Status.Phase = v1.ClaimBound
|
||||
} else {
|
||||
pv.Status.Phase = v1.VolumeAvailable
|
||||
}
|
||||
pv.Status.Phase = v1.VolumeBound
|
||||
if err := CreatePersistentVolumeWithRetries(client, pv); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error creating PV: %s", err)
|
||||
return
|
||||
}
|
||||
// We need to update statuses separately, as creating pv/pvc resets status to the default one.
|
||||
if _, err := client.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), pv, metav1.UpdateOptions{}); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error updating PV status: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// pvc
|
||||
pvc := claimTemplate.DeepCopy()
|
||||
pvc.Name = pvcName
|
||||
// bind to "pv-$i"
|
||||
pvc.Spec.VolumeName = pv.Name
|
||||
pvc.Status.Phase = v1.ClaimBound
|
||||
if err := CreatePersistentVolumeClaimWithRetries(client, namespace, pvc); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error creating PVC: %s", err)
|
||||
return
|
||||
}
|
||||
if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error updating PVC status: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// pod
|
||||
pod := podTemplate.DeepCopy()
|
||||
@ -1407,9 +1467,50 @@ type volumeFactory func(uniqueID int) *v1.PersistentVolume
|
||||
|
||||
func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
return func(client clientset.Interface, namespace string, podCount int) error {
|
||||
return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factory, podTemplate, podCount)
|
||||
return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */)
|
||||
}
|
||||
}
|
||||
|
||||
func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
|
||||
StorageClassName: &storageClass,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
return func(client clientset.Interface, namespace string, podCount int) error {
|
||||
volumeBindingMode := storage.VolumeBindingWaitForFirstConsumer
|
||||
storageClass := &storage.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "storage-class-1",
|
||||
},
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
VolumeBindingMode: &volumeBindingMode,
|
||||
}
|
||||
claimTemplate := makeUnboundPersistentVolumeClaim(storageClass.Name)
|
||||
|
||||
if err := CreateStorageClassWithRetries(client, storageClass); err != nil {
|
||||
return fmt.Errorf("failed to create storage class: %v", err)
|
||||
}
|
||||
|
||||
factoryWithStorageClass := func(i int) *v1.PersistentVolume {
|
||||
pv := factory(i)
|
||||
pv.Spec.StorageClassName = storageClass.Name
|
||||
return pv
|
||||
}
|
||||
|
||||
return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */)
|
||||
}
|
||||
}
|
||||
|
||||
func NewSimpleCreatePodStrategy() TestPodCreateStrategy {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1464,7 +1565,7 @@ func (config *SecretConfig) Run() error {
|
||||
}
|
||||
|
||||
func (config *SecretConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("Error deleting secret: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name)
|
||||
@ -1522,7 +1623,7 @@ func (config *ConfigMapConfig) Run() error {
|
||||
}
|
||||
|
||||
func (config *ConfigMapConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("Error deleting configmap: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name)
|
||||
@ -1625,7 +1726,7 @@ type DaemonConfig struct {
|
||||
|
||||
func (config *DaemonConfig) Run() error {
|
||||
if config.Image == "" {
|
||||
config.Image = "k8s.gcr.io/pause:3.1"
|
||||
config.Image = "k8s.gcr.io/pause:3.2"
|
||||
}
|
||||
nameLabel := map[string]string{
|
||||
"name": config.Name + "-daemon",
|
||||
@ -1659,7 +1760,7 @@ func (config *DaemonConfig) Run() error {
|
||||
var err error
|
||||
for i := 0; i < retries; i++ {
|
||||
// Wait for all daemons to be running
|
||||
nodes, err = config.Client.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
nodes, err = config.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err == nil {
|
||||
break
|
||||
} else if i+1 == retries {
|
||||
|
Reference in New Issue
Block a user