rebase: update kubernetes to v1.21.2

Updated kubernetes packages to latest release.
resizefs package has been included into k8s.io/mount-utils
package. updated code to use the same.

Updates: #1968

Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
Rakshith R
2021-06-25 10:29:51 +05:30
committed by mergify[bot]
parent 8ce5ae16c1
commit 1b23d78113
1115 changed files with 98825 additions and 12365 deletions

View File

@ -1,274 +1,276 @@
rules:
- selectorRegexp: k8s[.]io/kubernetes/pkg/
allowedPrefixes:
- k8s.io/kubernetes/pkg/api/legacyscheme
- k8s.io/kubernetes/pkg/api/service
- k8s.io/kubernetes/pkg/api/v1/pod
- k8s.io/kubernetes/pkg/api/v1/resource
- k8s.io/kubernetes/pkg/api/v1/service
- k8s.io/kubernetes/pkg/api/pod
- k8s.io/kubernetes/pkg/apis/apps
- k8s.io/kubernetes/pkg/apis/apps/validation
- k8s.io/kubernetes/pkg/apis/autoscaling
- k8s.io/kubernetes/pkg/apis/batch
- k8s.io/kubernetes/pkg/apis/certificates
- k8s.io/kubernetes/pkg/apis/certificates/v1
- k8s.io/kubernetes/pkg/apis/core
- k8s.io/kubernetes/pkg/apis/core/helper
- k8s.io/kubernetes/pkg/apis/core/install
- k8s.io/kubernetes/pkg/apis/core/pods
- k8s.io/kubernetes/pkg/apis/core/v1
- k8s.io/kubernetes/pkg/apis/core/v1/helper
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
- k8s.io/kubernetes/pkg/apis/core/validation
- k8s.io/kubernetes/pkg/apis/extensions
- k8s.io/kubernetes/pkg/apis/networking
- k8s.io/kubernetes/pkg/apis/policy
- k8s.io/kubernetes/pkg/apis/policy/validation
- k8s.io/kubernetes/pkg/apis/scheduling
- k8s.io/kubernetes/pkg/apis/storage/v1/util
- k8s.io/kubernetes/pkg/capabilities
- k8s.io/kubernetes/pkg/client/conditions
- k8s.io/kubernetes/pkg/cloudprovider/providers
- k8s.io/kubernetes/pkg/controller
- k8s.io/kubernetes/pkg/controller/deployment/util
- k8s.io/kubernetes/pkg/controller/nodelifecycle
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/controller/volume/scheduling
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
- k8s.io/kubernetes/pkg/credentialprovider/gcp
- k8s.io/kubernetes/pkg/credentialprovider/secrets
- k8s.io/kubernetes/pkg/features
- k8s.io/kubernetes/pkg/fieldpath
- k8s.io/kubernetes/pkg/kubectl
- k8s.io/kubernetes/pkg/kubectl/apps
- k8s.io/kubernetes/pkg/kubectl/describe
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
- k8s.io/kubernetes/pkg/kubectl/scheme
- k8s.io/kubernetes/pkg/kubectl/util
- k8s.io/kubernetes/pkg/kubectl/util/certificate
- k8s.io/kubernetes/pkg/kubectl/util/deployment
- k8s.io/kubernetes/pkg/kubectl/util/event
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
- k8s.io/kubernetes/pkg/kubectl/util/podutils
- k8s.io/kubernetes/pkg/kubectl/util/qos
- k8s.io/kubernetes/pkg/kubectl/util/rbac
- k8s.io/kubernetes/pkg/kubectl/util/resource
- k8s.io/kubernetes/pkg/kubectl/util/slice
- k8s.io/kubernetes/pkg/kubectl/util/storage
- k8s.io/kubernetes/pkg/kubelet
- k8s.io/kubernetes/pkg/kubelet/apis
- k8s.io/kubernetes/pkg/kubelet/apis/config
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
- k8s.io/kubernetes/pkg/kubelet/cadvisor
- k8s.io/kubernetes/pkg/kubelet/certificate
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
- k8s.io/kubernetes/pkg/kubelet/checkpoint
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
- k8s.io/kubernetes/pkg/kubelet/cloudresource
- k8s.io/kubernetes/pkg/kubelet/cm
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
- k8s.io/kubernetes/pkg/kubelet/cm/util
- k8s.io/kubernetes/pkg/kubelet/config
- k8s.io/kubernetes/pkg/kubelet/configmap
- k8s.io/kubernetes/pkg/kubelet/container
- k8s.io/kubernetes/pkg/kubelet/dockershim
- k8s.io/kubernetes/pkg/kubelet/dockershim/cm
- k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
- k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/network
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/remote
- k8s.io/kubernetes/pkg/kubelet/envvars
- k8s.io/kubernetes/pkg/kubelet/eviction
- k8s.io/kubernetes/pkg/kubelet/eviction/api
- k8s.io/kubernetes/pkg/kubelet/events
- k8s.io/kubernetes/pkg/kubelet/images
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
- k8s.io/kubernetes/pkg/kubelet/leaky
- k8s.io/kubernetes/pkg/kubelet/lifecycle
- k8s.io/kubernetes/pkg/kubelet/logs
- k8s.io/kubernetes/pkg/kubelet/metrics
- k8s.io/kubernetes/pkg/kubelet/network/dns
- k8s.io/kubernetes/pkg/kubelet/nodelease
- k8s.io/kubernetes/pkg/kubelet/nodestatus
- k8s.io/kubernetes/pkg/kubelet/oom
- k8s.io/kubernetes/pkg/kubelet/pleg
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
- k8s.io/kubernetes/pkg/kubelet/pod
- k8s.io/kubernetes/pkg/kubelet/preemption
- k8s.io/kubernetes/pkg/kubelet/prober
- k8s.io/kubernetes/pkg/kubelet/prober/results
- k8s.io/kubernetes/pkg/kubelet/qos
- k8s.io/kubernetes/pkg/kubelet/remote
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
- k8s.io/kubernetes/pkg/kubelet/server
- k8s.io/kubernetes/pkg/kubelet/server/metrics
- k8s.io/kubernetes/pkg/kubelet/server/portforward
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
- k8s.io/kubernetes/pkg/kubelet/server/stats
- k8s.io/kubernetes/pkg/kubelet/server/streaming
- k8s.io/kubernetes/pkg/kubelet/stats
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
- k8s.io/kubernetes/pkg/kubelet/status
- k8s.io/kubernetes/pkg/kubelet/secret
- k8s.io/kubernetes/pkg/kubelet/sysctl
- k8s.io/kubernetes/pkg/kubelet/types
- k8s.io/kubernetes/pkg/kubelet/token
- k8s.io/kubernetes/pkg/kubelet/util
- k8s.io/kubernetes/pkg/kubelet/util/format
- k8s.io/kubernetes/pkg/kubelet/util/manager
- k8s.io/kubernetes/pkg/kubelet/util/store
- k8s.io/kubernetes/pkg/kubelet/volumemanager
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
- k8s.io/kubernetes/pkg/kubemark
- k8s.io/kubernetes/pkg/cluster/ports
- k8s.io/kubernetes/pkg/probe
- k8s.io/kubernetes/pkg/probe/exec
- k8s.io/kubernetes/pkg/probe/http
- k8s.io/kubernetes/pkg/probe/tcp
- k8s.io/kubernetes/pkg/proxy
- k8s.io/kubernetes/pkg/proxy/apis
- k8s.io/kubernetes/pkg/proxy/apis/config
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
- k8s.io/kubernetes/pkg/proxy/config
- k8s.io/kubernetes/pkg/proxy/healthcheck
- k8s.io/kubernetes/pkg/proxy/iptables
- k8s.io/kubernetes/pkg/proxy/ipvs
- k8s.io/kubernetes/pkg/proxy/metaproxier
- k8s.io/kubernetes/pkg/proxy/metrics
- k8s.io/kubernetes/pkg/proxy/userspace
- k8s.io/kubernetes/pkg/proxy/util
- k8s.io/kubernetes/pkg/registry/core/service/allocator
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
- k8s.io/kubernetes/pkg/scheduler/api
- k8s.io/kubernetes/pkg/scheduler/framework
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util
- k8s.io/kubernetes/pkg/scheduler/volumebinder
- k8s.io/kubernetes/pkg/security/apparmor
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
- k8s.io/kubernetes/pkg/securitycontext
- k8s.io/kubernetes/pkg/serviceaccount
- k8s.io/kubernetes/pkg/util/async
- k8s.io/kubernetes/pkg/util/bandwidth
- k8s.io/kubernetes/pkg/util/config
- k8s.io/kubernetes/pkg/util/configz
- k8s.io/kubernetes/pkg/util/conntrack
- k8s.io/kubernetes/pkg/util/env
- k8s.io/kubernetes/pkg/util/filesystem
- k8s.io/kubernetes/pkg/util/flag
- k8s.io/kubernetes/pkg/util/flock
- k8s.io/kubernetes/pkg/util/goroutinemap
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
- k8s.io/kubernetes/pkg/util/hash
- k8s.io/kubernetes/pkg/util/ipset
- k8s.io/kubernetes/pkg/util/iptables
- k8s.io/kubernetes/pkg/util/ipvs
- k8s.io/kubernetes/pkg/util/labels
- k8s.io/kubernetes/pkg/util/node
- k8s.io/kubernetes/pkg/util/oom
- k8s.io/kubernetes/pkg/util/parsers
- k8s.io/kubernetes/pkg/util/pod
- k8s.io/kubernetes/pkg/util/procfs
- k8s.io/kubernetes/pkg/util/removeall
- k8s.io/kubernetes/pkg/util/resizefs
- k8s.io/kubernetes/pkg/util/rlimit
- k8s.io/kubernetes/pkg/util/selinux
- k8s.io/kubernetes/pkg/util/slice
- k8s.io/kubernetes/pkg/util/sysctl
- k8s.io/kubernetes/pkg/util/system
- k8s.io/kubernetes/pkg/util/tail
- k8s.io/kubernetes/pkg/util/taints
- k8s.io/kubernetes/pkg/volume
- k8s.io/kubernetes/pkg/volume/util
- k8s.io/kubernetes/pkg/volume/util/fs
- k8s.io/kubernetes/pkg/volume/util/fsquota
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
- k8s.io/kubernetes/pkg/volume/util/subpath
- k8s.io/kubernetes/pkg/volume/util/types
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
- k8s.io/kubernetes/pkg/apis/authentication
- k8s.io/kubernetes/pkg/apis/authentication/v1
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
- k8s.io/kubernetes/pkg/apis/storage/v1
- k8s.io/kubernetes/pkg/scheduler/internal/cache
- selectorRegexp: k8s[.]io/kubernetes/test/
allowedPrefixes:
- k8s.io/kubernetes/test/e2e/framework
- k8s.io/kubernetes/test/e2e/framework/auth
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
- k8s.io/kubernetes/test/e2e/framework/kubectl
- k8s.io/kubernetes/test/e2e/framework/log
- k8s.io/kubernetes/test/e2e/framework/metrics
- k8s.io/kubernetes/test/e2e/framework/network
- k8s.io/kubernetes/test/e2e/framework/node
- k8s.io/kubernetes/test/e2e/framework/pod
- k8s.io/kubernetes/test/e2e/framework/rc
- k8s.io/kubernetes/test/e2e/framework/resource
- k8s.io/kubernetes/test/e2e/framework/service
- k8s.io/kubernetes/test/e2e/framework/ssh
- k8s.io/kubernetes/test/e2e/framework/testfiles
- k8s.io/kubernetes/test/e2e/framework/websocket
- k8s.io/kubernetes/test/e2e/manifest
- k8s.io/kubernetes/test/e2e/perftype
- k8s.io/kubernetes/test/e2e/storage/utils
- k8s.io/kubernetes/test/e2e/system
- k8s.io/kubernetes/test/utils
- k8s.io/kubernetes/test/utils/image
# TODO: why is this here?
- selectorRegexp: k8s[.]io/kubernetes/third_party/
allowedPrefixes:
- k8s.io/kubernetes/third_party/forked/golang/expansion
rules:
- selectorRegexp: k8s[.]io/kubernetes/pkg/
allowedPrefixes:
- k8s.io/kubernetes/pkg/api/legacyscheme
- k8s.io/kubernetes/pkg/api/service
- k8s.io/kubernetes/pkg/api/v1/pod
- k8s.io/kubernetes/pkg/api/v1/resource
- k8s.io/kubernetes/pkg/api/v1/service
- k8s.io/kubernetes/pkg/api/pod
- k8s.io/kubernetes/pkg/apis/apps
- k8s.io/kubernetes/pkg/apis/apps/validation
- k8s.io/kubernetes/pkg/apis/autoscaling
- k8s.io/kubernetes/pkg/apis/batch
- k8s.io/kubernetes/pkg/apis/certificates
- k8s.io/kubernetes/pkg/apis/certificates/v1
- k8s.io/kubernetes/pkg/apis/core
- k8s.io/kubernetes/pkg/apis/core/helper
- k8s.io/kubernetes/pkg/apis/core/install
- k8s.io/kubernetes/pkg/apis/core/pods
- k8s.io/kubernetes/pkg/apis/core/v1
- k8s.io/kubernetes/pkg/apis/core/v1/helper
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
- k8s.io/kubernetes/pkg/apis/core/validation
- k8s.io/kubernetes/pkg/apis/extensions
- k8s.io/kubernetes/pkg/apis/networking
- k8s.io/kubernetes/pkg/apis/policy
- k8s.io/kubernetes/pkg/apis/policy/validation
- k8s.io/kubernetes/pkg/apis/scheduling
- k8s.io/kubernetes/pkg/apis/storage/v1/util
- k8s.io/kubernetes/pkg/capabilities
- k8s.io/kubernetes/pkg/client/conditions
- k8s.io/kubernetes/pkg/cloudprovider/providers
- k8s.io/kubernetes/pkg/controller
- k8s.io/kubernetes/pkg/controller/deployment/util
- k8s.io/kubernetes/pkg/controller/nodelifecycle
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/controller/volume/scheduling
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
- k8s.io/kubernetes/pkg/credentialprovider/gcp
- k8s.io/kubernetes/pkg/credentialprovider/secrets
- k8s.io/kubernetes/pkg/features
- k8s.io/kubernetes/pkg/fieldpath
- k8s.io/kubernetes/pkg/kubectl
- k8s.io/kubernetes/pkg/kubectl/apps
- k8s.io/kubernetes/pkg/kubectl/describe
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
- k8s.io/kubernetes/pkg/kubectl/scheme
- k8s.io/kubernetes/pkg/kubectl/util
- k8s.io/kubernetes/pkg/kubectl/util/certificate
- k8s.io/kubernetes/pkg/kubectl/util/deployment
- k8s.io/kubernetes/pkg/kubectl/util/event
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
- k8s.io/kubernetes/pkg/kubectl/util/podutils
- k8s.io/kubernetes/pkg/kubectl/util/qos
- k8s.io/kubernetes/pkg/kubectl/util/rbac
- k8s.io/kubernetes/pkg/kubectl/util/resource
- k8s.io/kubernetes/pkg/kubectl/util/slice
- k8s.io/kubernetes/pkg/kubectl/util/storage
- k8s.io/kubernetes/pkg/kubelet
- k8s.io/kubernetes/pkg/kubelet/apis
- k8s.io/kubernetes/pkg/kubelet/apis/config
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
- k8s.io/kubernetes/pkg/kubelet/cadvisor
- k8s.io/kubernetes/pkg/kubelet/certificate
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
- k8s.io/kubernetes/pkg/kubelet/checkpoint
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
- k8s.io/kubernetes/pkg/kubelet/cloudresource
- k8s.io/kubernetes/pkg/kubelet/cm
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
- k8s.io/kubernetes/pkg/kubelet/cm/util
- k8s.io/kubernetes/pkg/kubelet/config
- k8s.io/kubernetes/pkg/kubelet/configmap
- k8s.io/kubernetes/pkg/kubelet/container
- k8s.io/kubernetes/pkg/kubelet/dockershim
- k8s.io/kubernetes/pkg/kubelet/dockershim/cm
- k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
- k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/network
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/remote
- k8s.io/kubernetes/pkg/kubelet/envvars
- k8s.io/kubernetes/pkg/kubelet/eviction
- k8s.io/kubernetes/pkg/kubelet/eviction/api
- k8s.io/kubernetes/pkg/kubelet/events
- k8s.io/kubernetes/pkg/kubelet/images
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
- k8s.io/kubernetes/pkg/kubelet/leaky
- k8s.io/kubernetes/pkg/kubelet/lifecycle
- k8s.io/kubernetes/pkg/kubelet/logs
- k8s.io/kubernetes/pkg/kubelet/metrics
- k8s.io/kubernetes/pkg/kubelet/network/dns
- k8s.io/kubernetes/pkg/kubelet/nodelease
- k8s.io/kubernetes/pkg/kubelet/nodestatus
- k8s.io/kubernetes/pkg/kubelet/oom
- k8s.io/kubernetes/pkg/kubelet/pleg
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
- k8s.io/kubernetes/pkg/kubelet/pod
- k8s.io/kubernetes/pkg/kubelet/preemption
- k8s.io/kubernetes/pkg/kubelet/prober
- k8s.io/kubernetes/pkg/kubelet/prober/results
- k8s.io/kubernetes/pkg/kubelet/qos
- k8s.io/kubernetes/pkg/kubelet/remote
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
- k8s.io/kubernetes/pkg/kubelet/server
- k8s.io/kubernetes/pkg/kubelet/server/metrics
- k8s.io/kubernetes/pkg/kubelet/server/portforward
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
- k8s.io/kubernetes/pkg/kubelet/server/stats
- k8s.io/kubernetes/pkg/kubelet/server/streaming
- k8s.io/kubernetes/pkg/kubelet/stats
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
- k8s.io/kubernetes/pkg/kubelet/status
- k8s.io/kubernetes/pkg/kubelet/secret
- k8s.io/kubernetes/pkg/kubelet/sysctl
- k8s.io/kubernetes/pkg/kubelet/types
- k8s.io/kubernetes/pkg/kubelet/token
- k8s.io/kubernetes/pkg/kubelet/util
- k8s.io/kubernetes/pkg/kubelet/util/format
- k8s.io/kubernetes/pkg/kubelet/util/manager
- k8s.io/kubernetes/pkg/kubelet/util/store
- k8s.io/kubernetes/pkg/kubelet/volumemanager
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
- k8s.io/kubernetes/pkg/kubemark
- k8s.io/kubernetes/pkg/cluster/ports
- k8s.io/kubernetes/pkg/probe
- k8s.io/kubernetes/pkg/probe/exec
- k8s.io/kubernetes/pkg/probe/http
- k8s.io/kubernetes/pkg/probe/tcp
- k8s.io/kubernetes/pkg/proxy
- k8s.io/kubernetes/pkg/proxy/apis
- k8s.io/kubernetes/pkg/proxy/apis/config
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
- k8s.io/kubernetes/pkg/proxy/config
- k8s.io/kubernetes/pkg/proxy/healthcheck
- k8s.io/kubernetes/pkg/proxy/iptables
- k8s.io/kubernetes/pkg/proxy/ipvs
- k8s.io/kubernetes/pkg/proxy/metaproxier
- k8s.io/kubernetes/pkg/proxy/metrics
- k8s.io/kubernetes/pkg/proxy/userspace
- k8s.io/kubernetes/pkg/proxy/util
- k8s.io/kubernetes/pkg/registry/core/service/allocator
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
- k8s.io/kubernetes/pkg/scheduler/api
- k8s.io/kubernetes/pkg/scheduler/framework
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
- k8s.io/kubernetes/pkg/scheduler/internal/heap
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/internal/queue
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util
- k8s.io/kubernetes/pkg/scheduler/volumebinder
- k8s.io/kubernetes/pkg/security/apparmor
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
- k8s.io/kubernetes/pkg/securitycontext
- k8s.io/kubernetes/pkg/serviceaccount
- k8s.io/kubernetes/pkg/util/async
- k8s.io/kubernetes/pkg/util/bandwidth
- k8s.io/kubernetes/pkg/util/config
- k8s.io/kubernetes/pkg/util/configz
- k8s.io/kubernetes/pkg/util/conntrack
- k8s.io/kubernetes/pkg/util/env
- k8s.io/kubernetes/pkg/util/filesystem
- k8s.io/kubernetes/pkg/util/flag
- k8s.io/kubernetes/pkg/util/flock
- k8s.io/kubernetes/pkg/util/goroutinemap
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
- k8s.io/kubernetes/pkg/util/hash
- k8s.io/kubernetes/pkg/util/ipset
- k8s.io/kubernetes/pkg/util/iptables
- k8s.io/kubernetes/pkg/util/ipvs
- k8s.io/kubernetes/pkg/util/labels
- k8s.io/kubernetes/pkg/util/node
- k8s.io/kubernetes/pkg/util/oom
- k8s.io/kubernetes/pkg/util/parsers
- k8s.io/kubernetes/pkg/util/pod
- k8s.io/kubernetes/pkg/util/procfs
- k8s.io/kubernetes/pkg/util/removeall
- k8s.io/kubernetes/pkg/util/resizefs
- k8s.io/kubernetes/pkg/util/rlimit
- k8s.io/kubernetes/pkg/util/selinux
- k8s.io/kubernetes/pkg/util/slice
- k8s.io/kubernetes/pkg/util/sysctl
- k8s.io/kubernetes/pkg/util/system
- k8s.io/kubernetes/pkg/util/tail
- k8s.io/kubernetes/pkg/util/taints
- k8s.io/kubernetes/pkg/volume
- k8s.io/kubernetes/pkg/volume/util
- k8s.io/kubernetes/pkg/volume/util/fs
- k8s.io/kubernetes/pkg/volume/util/fsquota
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
- k8s.io/kubernetes/pkg/volume/util/subpath
- k8s.io/kubernetes/pkg/volume/util/types
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
- k8s.io/kubernetes/pkg/apis/authentication
- k8s.io/kubernetes/pkg/apis/authentication/v1
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
- k8s.io/kubernetes/pkg/apis/storage/v1
- k8s.io/kubernetes/pkg/scheduler/internal/cache
- selectorRegexp: k8s[.]io/kubernetes/test/
allowedPrefixes:
- k8s.io/kubernetes/test/e2e/framework
- k8s.io/kubernetes/test/e2e/framework/auth
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
- k8s.io/kubernetes/test/e2e/framework/kubectl
- k8s.io/kubernetes/test/e2e/framework/log
- k8s.io/kubernetes/test/e2e/framework/metrics
- k8s.io/kubernetes/test/e2e/framework/network
- k8s.io/kubernetes/test/e2e/framework/node
- k8s.io/kubernetes/test/e2e/framework/pod
- k8s.io/kubernetes/test/e2e/framework/rc
- k8s.io/kubernetes/test/e2e/framework/resource
- k8s.io/kubernetes/test/e2e/framework/service
- k8s.io/kubernetes/test/e2e/framework/ssh
- k8s.io/kubernetes/test/e2e/framework/testfiles
- k8s.io/kubernetes/test/e2e/framework/websocket
- k8s.io/kubernetes/test/e2e/manifest
- k8s.io/kubernetes/test/e2e/perftype
- k8s.io/kubernetes/test/e2e/storage/utils
- k8s.io/kubernetes/test/e2e/system
- k8s.io/kubernetes/test/utils
- k8s.io/kubernetes/test/utils/image
# TODO: why is this here?
- selectorRegexp: k8s[.]io/kubernetes/third_party/
allowedPrefixes:
- k8s.io/kubernetes/third_party/forked/golang/expansion

View File

@ -1,146 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cleanup.go",
"exec_util.go",
"expect.go",
"flake_reporting_util.go",
"framework.go",
"log.go",
"log_size_monitoring.go",
"nodes_util.go",
"pods.go",
"ports.go",
"provider.go",
"psp.go",
"resource_usage_gatherer.go",
"size.go",
"test_context.go",
"util.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/apis/config:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
"//staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["log_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/framework/auth:all-srcs",
"//test/e2e/framework/autoscaling:all-srcs",
"//test/e2e/framework/config:all-srcs",
"//test/e2e/framework/deployment:all-srcs",
"//test/e2e/framework/endpoints:all-srcs",
"//test/e2e/framework/endpointslice:all-srcs",
"//test/e2e/framework/events:all-srcs",
"//test/e2e/framework/ginkgowrapper:all-srcs",
"//test/e2e/framework/gpu:all-srcs",
"//test/e2e/framework/ingress:all-srcs",
"//test/e2e/framework/job:all-srcs",
"//test/e2e/framework/kubectl:all-srcs",
"//test/e2e/framework/kubelet:all-srcs",
"//test/e2e/framework/kubesystem:all-srcs",
"//test/e2e/framework/log:all-srcs",
"//test/e2e/framework/manifest:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/network:all-srcs",
"//test/e2e/framework/node:all-srcs",
"//test/e2e/framework/perf:all-srcs",
"//test/e2e/framework/pod:all-srcs",
"//test/e2e/framework/providers/aws:all-srcs",
"//test/e2e/framework/providers/azure:all-srcs",
"//test/e2e/framework/providers/gce:all-srcs",
"//test/e2e/framework/providers/kubemark:all-srcs",
"//test/e2e/framework/providers/openstack:all-srcs",
"//test/e2e/framework/providers/vsphere:all-srcs",
"//test/e2e/framework/pv:all-srcs",
"//test/e2e/framework/rc:all-srcs",
"//test/e2e/framework/replicaset:all-srcs",
"//test/e2e/framework/resource:all-srcs",
"//test/e2e/framework/security:all-srcs",
"//test/e2e/framework/service:all-srcs",
"//test/e2e/framework/skipper:all-srcs",
"//test/e2e/framework/ssh:all-srcs",
"//test/e2e/framework/statefulset:all-srcs",
"//test/e2e/framework/testfiles:all-srcs",
"//test/e2e/framework/timer:all-srcs",
"//test/e2e/framework/volume:all-srcs",
"//test/e2e/framework/websocket:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,33 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["helpers.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/auth",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,32 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["config.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/config",
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["config_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -119,6 +119,9 @@ type Framework struct {
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
clusterAutoscalerMetricsBeforeTest e2emetrics.Collection
// Timeouts contains the custom timeouts used during the test execution.
Timeouts *TimeoutContext
}
// AfterEachActionFunc is a function that can be called after each test
@ -138,6 +141,13 @@ type Options struct {
GroupVersion *schema.GroupVersion
}
// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts.
func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework {
f := NewDefaultFramework(baseName)
f.Timeouts = timeouts
return f
}
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
// you (you can write additional before/after each functions).
func NewDefaultFramework(baseName string) *Framework {
@ -155,6 +165,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
AddonResourceConstraints: make(map[string]ResourceConstraint),
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
}
f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) {
@ -202,10 +213,6 @@ func (f *Framework) BeforeEach() {
ExpectNoError(err)
f.DynamicClient, err = dynamic.NewForConfig(config)
ExpectNoError(err)
// node.k8s.io is based on CRD, which is served only as JSON
jsonConfig := config
jsonConfig.ContentType = "application/json"
ExpectNoError(err)
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
// as they are required when creating a REST client.
@ -618,12 +625,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
return nil
}
// KubeDescribe is wrapper function for ginkgo describe. Adds namespacing.
// TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401.
func KubeDescribe(text string, body func()) bool {
return ginkgo.Describe("[k8s.io] "+text, body)
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, body interface{}, timeout ...float64) bool {
return ginkgo.It(text+" [Conformance]", body, timeout...)

View File

@ -1,26 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["wrapper.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper",
deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,32 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["kubectl_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/kubectl",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,26 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["logger.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/log",
visibility = ["//visibility:public"],
deps = [
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,49 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"api.go",
"api_server_metrics.go",
"cluster_autoscaler_metrics.go",
"controller_manager_metrics.go",
"e2e_metrics.go",
"interesting_metrics.go",
"kubelet_metrics.go",
"latencies.go",
"metrics_grabber.go",
"pod.go",
"scheduler_metrics.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/perftype:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,60 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"resource.go",
"runtimeclass.go",
"wait.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/utils/net:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["wait_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -584,6 +584,14 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
return podLabels
}
// RemoveTaintsOffNode removes a list of taints from the given node
// It is simply a helper wrapper for RemoveTaintOffNode
func RemoveTaintsOffNode(c clientset.Interface, nodeName string, taints []v1.Taint) {
for _, taint := range taints {
RemoveTaintOffNode(c, nodeName, taint)
}
}
// RemoveTaintOffNode removes the given taint from the given node.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
err := removeNodeTaint(c, nodeName, nil, &taint)

View File

@ -202,6 +202,9 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
attempt := 0
return func() (bool, error) {
if allowedNotReadyNodes == -1 {
return true, nil
}
attempt++
var nodesNotReadyYet []v1.Node
opts := metav1.ListOptions{

View File

@ -1,57 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"create.go",
"delete.go",
"node_selection.go",
"resource.go",
"wait.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework/pod",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["resource_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@ -145,13 +145,10 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: BusyBoxImage,
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
Name: "write-pod",
Image: GetDefaultTestImage(),
Command: GenerateScriptCmd(command),
SecurityContext: GenerateContainerSecurityContext(isPrivileged),
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
@ -187,10 +184,6 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
return &i
}(1000)
}
image := imageutils.BusyBox
if podConfig.ImageID != imageutils.None {
image = podConfig.ImageID
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -200,28 +193,34 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
Name: podName,
Namespace: podConfig.NS,
},
Spec: v1.PodSpec{
HostIPC: podConfig.HostIPC,
HostPID: podConfig.HostPID,
SecurityContext: &v1.PodSecurityContext{
FSGroup: podConfig.FsGroup,
},
Containers: []v1.Container{
{
Name: "write-pod",
Image: imageutils.GetE2EImage(image),
Command: []string{"/bin/sh"},
Args: []string{"-c", podConfig.Command},
SecurityContext: &v1.SecurityContext{
Privileged: &podConfig.IsPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
Spec: *MakePodSpec(podConfig),
}
return podSpec, nil
}
// MakePodSpec returns a PodSpec definition
func MakePodSpec(podConfig *Config) *v1.PodSpec {
image := imageutils.BusyBox
if podConfig.ImageID != imageutils.None {
image = podConfig.ImageID
}
podSpec := &v1.PodSpec{
HostIPC: podConfig.HostIPC,
HostPID: podConfig.HostPID,
SecurityContext: GeneratePodSecurityContext(podConfig.FsGroup, podConfig.SeLinuxLabel),
Containers: []v1.Container{
{
Name: "write-pod",
Image: GetTestImage(image),
Command: GenerateScriptCmd(podConfig.Command),
SecurityContext: GenerateContainerSecurityContext(podConfig.IsPrivileged),
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
}
if podConfig.PodFSGroupChangePolicy != nil {
podSpec.Spec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
podSpec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
}
var volumeMounts = make([]v1.VolumeMount, 0)
@ -247,13 +246,10 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
volumeIndex++
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
podSpec.Spec.Volumes = volumes
if runtime.GOOS != "windows" {
podSpec.Spec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel
}
podSpec.Containers[0].VolumeMounts = volumeMounts
podSpec.Containers[0].VolumeDevices = volumeDevices
podSpec.Volumes = volumes
SetNodeSelection(&podSpec.Spec, podConfig.NodeSelection)
return podSpec, nil
SetNodeSelection(podSpec, podConfig.NodeSelection)
return podSpec
}

View File

@ -321,6 +321,20 @@ func podContainerFailed(c clientset.Interface, namespace, podName string, contai
}
}
func podContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
if containerIndex > len(pod.Status.ContainerStatuses)-1 {
return false, nil
}
containerStatus := pod.Status.ContainerStatuses[containerIndex]
return *containerStatus.Started, nil
}
}
// LogPodStates logs basic info of provided pods for debugging.
func LogPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.

View File

@ -0,0 +1,120 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"flag"
v1 "k8s.io/api/core/v1"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// NodeOSDistroIs returns true if the distro is the same as `--node-os-distro`
// the package framework/pod can't import the framework package (see #81245)
// we need to check if the --node-os-distro=windows is set and the framework package
// is the one that's parsing the flags, as a workaround this method is looking for the same flag again
// TODO: replace with `framework.NodeOSDistroIs` when #81245 is complete
func NodeOSDistroIs(distro string) bool {
var nodeOsDistro *flag.Flag = flag.Lookup("node-os-distro")
if nodeOsDistro != nil && nodeOsDistro.Value.String() == distro {
return true
}
return false
}
// GenerateScriptCmd generates the corresponding command lines to execute a command.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateScriptCmd(command string) []string {
var commands []string
if !NodeOSDistroIs("windows") {
commands = []string{"/bin/sh", "-c", command}
} else {
commands = []string{"powershell", "/c", command}
}
return commands
}
// GetDefaultTestImage returns the default test image based on OS.
// If the node OS is windows, currently we return Agnhost image for Windows node
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
// If the node OS is linux, return busybox image
func GetDefaultTestImage() string {
return imageutils.GetE2EImage(GetDefaultTestImageID())
}
// GetDefaultTestImageID returns the default test image id based on OS.
// If the node OS is windows, currently we return Agnhost image for Windows node
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
// If the node OS is linux, return busybox image
func GetDefaultTestImageID() int {
return GetTestImageID(imageutils.BusyBox)
}
// GetTestImage returns the image name with the given input
// If the Node OS is windows, currently we return Agnhost image for Windows node
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
func GetTestImage(id int) string {
if NodeOSDistroIs("windows") {
return imageutils.GetE2EImage(imageutils.Agnhost)
}
return imageutils.GetE2EImage(id)
}
// GetTestImageID returns the image id with the given input
// If the Node OS is windows, currently we return Agnhost image for Windows node
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
func GetTestImageID(id int) int {
if NodeOSDistroIs("windows") {
return imageutils.Agnhost
}
return id
}
// GeneratePodSecurityContext generates the corresponding pod security context with the given inputs
// If the Node OS is windows, currently we will ignore the inputs and return nil.
// TODO: Will modify it after windows has its own security context
func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext {
if NodeOSDistroIs("windows") {
return nil
}
return &v1.PodSecurityContext{
FSGroup: fsGroup,
SELinuxOptions: seLinuxOptions,
}
}
// GenerateContainerSecurityContext generates the corresponding container security context with the given inputs
// If the Node OS is windows, currently we will ignore the inputs and return nil.
// TODO: Will modify it after windows has its own security context
func GenerateContainerSecurityContext(privileged bool) *v1.SecurityContext {
if NodeOSDistroIs("windows") {
return nil
}
return &v1.SecurityContext{
Privileged: &privileged,
}
}
// GetLinuxLabel returns the default SELinuxLabel based on OS.
// If the node OS is windows, it will return nil
func GetLinuxLabel() *v1.SELinuxOptions {
if NodeOSDistroIs("windows") {
return nil
}
return &v1.SELinuxOptions{
Level: "s0:c0,c1"}
}

View File

@ -102,7 +102,14 @@ func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState stri
// waiting. All pods that are in SUCCESS state are not counted.
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
//
// If minPods or allowedNotReadyPods are -1, this method returns immediately
// without waiting.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
if minPods == -1 || allowedNotReadyPods == -1 {
return nil
}
ignoreSelector := labels.SelectorFromSet(map[string]string{})
start := time.Now()
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
@ -203,14 +210,16 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
// WaitForPodCondition waits a pods to be matched to the given condition.
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
var lastPodError error
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
lastPodError = err
if err != nil {
if apierrors.IsNotFound(err) {
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
return err
} else {
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
}
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
continue
}
// log now so that current pod info is reported before calling `condition()`
@ -223,6 +232,10 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
return err
}
}
if apierrors.IsNotFound(lastPodError) {
// return for compatbility with other functions testing for IsNotFound
return lastPodError
}
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
}
@ -245,8 +258,8 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
@ -350,7 +363,7 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, defaultPodDeletionTimeout)
}
// WaitTimeoutForPodReadyInNamespace waits the given timeout diration for the
// WaitTimeoutForPodReadyInNamespace waits the given timeout duration for the
// specified pod to be ready and running.
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace))
@ -365,12 +378,12 @@ func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
@ -535,3 +548,8 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string, containerIndex int, reason string, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, podContainerFailed(c, namespace, podName, containerIndex, reason))
}
// WaitForPodContainerStarted waits for the given Pod container to start, after a successful run of the startupProbe.
func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, podContainerStarted(c, namespace, podName, containerIndex))
}

View File

@ -96,12 +96,12 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
return p
}
// CreateSync creates a new pod according to the framework specifications, and wait for it to start.
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
namespace := c.f.Namespace.Name
p := c.Create(pod)
ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, PodStartTimeout))
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
ExpectNoError(err)
return p

View File

@ -1,35 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pv.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/pv",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/skipper:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -19,9 +19,10 @@ package framework
import (
"context"
"fmt"
"k8s.io/kubernetes/test/e2e/storage/utils"
"time"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -38,18 +39,6 @@ const (
pdRetryTimeout = 5 * time.Minute
pdRetryPollTime = 5 * time.Second
// PVBindingTimeout is how long PVs have to become bound.
PVBindingTimeout = 3 * time.Minute
// ClaimBindingTimeout is how long claims have to become bound.
ClaimBindingTimeout = 3 * time.Minute
// PVReclaimingTimeout is how long PVs have to beome reclaimed.
PVReclaimingTimeout = 3 * time.Minute
// PVDeletingTimeout is how long PVs have to become deleted.
PVDeletingTimeout = 3 * time.Minute
// VolumeSelectorKey is the key for volume selector.
VolumeSelectorKey = "e2e-pv-pool"
@ -223,7 +212,7 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
@ -233,7 +222,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
// Wait for the PV's phase to return to be `expectPVPhase`
framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
}
@ -266,7 +255,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
// Available, Bound).
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
// available pvs.
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
var boundPVs, deletedPVCs int
for pvName := range pvols {
@ -287,7 +276,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
// get the pvc for the delete call below
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
if err == nil {
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
if err = DeletePVCandValidatePV(c, timeouts, ns, pvc, pv, expectPVPhase); err != nil {
return err
}
} else if !apierrors.IsNotFound(err) {
@ -445,17 +434,17 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
}
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, PVBindingTimeout)
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
}
@ -493,7 +482,7 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
// to situations where the maximum wait times are reached several times in succession,
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
// small.
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
var actualBinds int
expectedBinds := len(pvols)
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
@ -501,7 +490,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
}
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound)
if err != nil && len(pvols) > len(claims) {
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs")
@ -524,7 +513,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
}
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, ClaimBindingTimeout)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
}
@ -746,7 +735,7 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
framework.Logf("Waiting up to timeout=%v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
@ -876,3 +865,11 @@ func WaitForPVCFinalizer(ctx context.Context, cs clientset.Interface, name, name
}
return err
}
// GetDefaultFSType returns the default fsType
func GetDefaultFSType() string {
if framework.NodeOSDistroIs("windows") {
return "ntfs"
}
return "ext4"
}

View File

@ -1,38 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["skipper.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/skipper",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -46,6 +46,11 @@ import (
// New local storage types to support local storage capacity isolation
var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
var (
downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
execProbeTimeout featuregate.Feature = "ExecProbeTimeout"
)
func skipInternalf(caller int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
framework.Logf(msg)
@ -137,6 +142,18 @@ func SkipUnlessLocalEphemeralStorageEnabled() {
}
}
func SkipUnlessDownwardAPIHugePagesEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(downwardAPIHugePages) {
skipInternalf(1, "Only supported when %v feature is enabled", downwardAPIHugePages)
}
}
func SkipUnlessExecProbeTimeoutEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(execProbeTimeout) {
skipInternalf(1, "Only supported when %v feature is enabled", execProbeTimeout)
}
}
// SkipIfMissingResource skips if the gvr resource is missing.
func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)

View File

@ -1,32 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["ssh.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/ssh",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -217,6 +217,8 @@ type NodeTestContextType struct {
KubeletConfig kubeletconfig.KubeletConfiguration
// ImageDescription is the description of the image on which the test is running.
ImageDescription string
// RuntimeConfig is a map of API server runtime configuration values.
RuntimeConfig map[string]string
// SystemSpecName is the name of the system spec (e.g., gke) that's used in
// the node e2e test. If empty, the default one (system.DefaultSpec) is
// used. The system specs are in test/e2e_node/system/specs/.
@ -229,7 +231,8 @@ type NodeTestContextType struct {
type CloudConfig struct {
APIEndpoint string
ProjectID string
Zone string // for multizone tests, arbitrarily chosen zone
Zone string // for multizone tests, arbitrarily chosen zone
Zones []string // for multizone tests, use this set of zones instead of querying the cloud provider. Must include Zone.
Region string
MultiZone bool
MultiMaster bool
@ -289,7 +292,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.LogexporterGCSPath, "logexporter-gcs-path", "", "Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.")
flags.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
flags.BoolVar(&TestContext.DeleteNamespaceOnFailure, "delete-namespace-on-failure", true, "If true, framework will delete test namespace on failure. Used only during test debugging.")
flags.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If non-zero, framework will allow for that many non-ready nodes when checking for all ready nodes.")
flags.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If greater than zero, framework will allow for that many non-ready nodes when checking for all ready nodes. If -1, no waiting will be performed for ready nodes or daemonset pods.")
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost))
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
@ -329,7 +332,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
flags.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
flags.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, ubuntu, gci, coreos, or custom).")
flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, or custom).")
flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, windows, or custom), which determines how specific tests are implemented.")
flags.StringVar(&TestContext.NodeOSArch, "node-os-arch", "amd64", "The OS architecture of cluster VM instances (amd64, arm64, or custom).")
flags.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.")
@ -339,6 +342,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&cloudConfig.APIEndpoint, "gce-api-endpoint", "", "The GCE APIEndpoint being used, if applicable")
flags.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
flags.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
flags.Var(cliflag.NewStringSlice(&cloudConfig.Zones), "gce-zones", "The set of zones to use in a multi-zone test instead of querying the cloud provider.")
flags.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
flags.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.")
flags.BoolVar(&cloudConfig.MultiMaster, "gce-multimaster", false, "If true, the underlying GCE/GKE cluster is assumed to be multi-master.")
@ -352,7 +356,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.")
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.")
flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
@ -433,7 +437,7 @@ func AfterReadingAllFlags(t *TestContextType) {
kubeConfig := createKubeConfig(clusterConfig)
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
t.KubeConfig = tempFile.Name()
klog.Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
klog.V(4).Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
}
}
if len(t.KubeConfig) == 0 {
@ -454,7 +458,7 @@ func AfterReadingAllFlags(t *TestContextType) {
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100
}
klog.Infof("Tolerating taints %q when considering if nodes are ready", TestContext.NonblockingTaints)
klog.V(4).Infof("Tolerating taints %q when considering if nodes are ready", TestContext.NonblockingTaints)
// Make sure that all test runs have a valid TestContext.CloudConfig.Provider.
// TODO: whether and how long this code is needed is getting discussed

View File

@ -1,22 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["testfiles.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/testfiles",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,99 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import "time"
const (
// Default timeouts to be used in TimeoutContext
podStartTimeout = 5 * time.Minute
podStartShortTimeout = 2 * time.Minute
podStartSlowTimeout = 15 * time.Minute
podDeleteTimeout = 5 * time.Minute
claimProvisionTimeout = 5 * time.Minute
claimProvisionShortTimeout = 1 * time.Minute
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
pvDeleteTimeout = 3 * time.Minute
pvDeleteSlowTimeout = 20 * time.Minute
snapshotCreateTimeout = 5 * time.Minute
snapshotDeleteTimeout = 5 * time.Minute
)
// TimeoutContext contains timeout settings for several actions.
type TimeoutContext struct {
// PodStart is how long to wait for the pod to be started.
PodStart time.Duration
// PodStartShort is same as `PodStart`, but shorter.
// Use it in a case-by-case basis, mostly when you are sure pod start will not be delayed.
PodStartShort time.Duration
// PodStartSlow is same as `PodStart`, but longer.
// Use it in a case-by-case basis, mostly when you are sure pod start will take longer than usual.
PodStartSlow time.Duration
// PodDelete is how long to wait for the pod to be deleted.
PodDelete time.Duration
// ClaimProvision is how long claims have to become dynamically provisioned.
ClaimProvision time.Duration
// ClaimProvisionShort is the same as `ClaimProvision`, but shorter.
ClaimProvisionShort time.Duration
// ClaimBound is how long claims have to become bound.
ClaimBound time.Duration
// PVReclaim is how long PVs have to become reclaimed.
PVReclaim time.Duration
// PVBound is how long PVs have to become bound.
PVBound time.Duration
// PVDelete is how long PVs have to become deleted.
PVDelete time.Duration
// PVDeleteSlow is the same as PVDelete, but slower.
PVDeleteSlow time.Duration
// SnapshotCreate is how long for snapshot to create snapshotContent.
SnapshotCreate time.Duration
// SnapshotDelete is how long for snapshot to delete snapshotContent.
SnapshotDelete time.Duration
}
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values.
func NewTimeoutContextWithDefaults() *TimeoutContext {
return &TimeoutContext{
PodStart: podStartTimeout,
PodStartShort: podStartShortTimeout,
PodStartSlow: podStartSlowTimeout,
PodDelete: podDeleteTimeout,
ClaimProvision: claimProvisionTimeout,
ClaimProvisionShort: claimProvisionShortTimeout,
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,
PVDelete: pvDeleteTimeout,
PVDeleteSlow: pvDeleteSlowTimeout,
SnapshotCreate: snapshotCreateTimeout,
SnapshotDelete: snapshotDeleteTimeout,
}
}

View File

@ -71,9 +71,23 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// AllContainers specifies that all containers be visited
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
AllContainers = InitContainers | Containers | EphemeralContainers
)
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
const (
// PodListTimeout is how long to wait for the pod to be listable.
PodListTimeout = time.Minute
// PodStartTimeout is how long to wait for the pod to be started.
PodStartTimeout = 5 * time.Minute
@ -136,16 +150,6 @@ const (
// SnapshotDeleteTimeout is how long for snapshot to delete snapshotContent.
SnapshotDeleteTimeout = 5 * time.Minute
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// AllContainers specifies that all containers be visited
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
AllContainers = InitContainers | Containers | EphemeralContainers
)
var (
@ -818,7 +822,7 @@ func (f *Framework) MatchContainerOutput(
}()
// Wait for client pod to complete.
podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
@ -1018,10 +1022,13 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
}
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
if TestContext.AllowedNotReadyNodes == -1 {
return nil
}
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
return wait.PollImmediate(
30*time.Second,
timeout,
@ -1114,11 +1121,16 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
}
}
// AllNodesReady checks whether all registered nodes are ready.
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
// TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
if TestContext.AllowedNotReadyNodes == -1 {
return nil
}
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
@ -1230,20 +1242,23 @@ func RunCmdEnv(env []string, command string, args ...string) (string, string, er
return stdout, stderr, nil
}
// getMasterAddresses returns the externalIP, internalIP and hostname fields of the master.
// If any of these is unavailable, it is set to "".
func getMasterAddresses(c clientset.Interface) (string, string, string) {
var externalIP, internalIP, hostname string
// getControlPlaneAddresses returns the externalIP, internalIP and hostname fields of control plane nodes.
// If any of these is unavailable, empty slices are returned.
func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []string) {
var externalIPs, internalIPs, hostnames []string
// Populate the internal IP.
// Populate the internal IPs.
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
for _, subset := range eps.Subsets {
for _, address := range subset.Addresses {
if address.IP != "" {
internalIPs = append(internalIPs, address.IP)
}
}
}
internalIP = eps.Subsets[0].Addresses[0].IP
// Populate the external IP/hostname.
hostURL, err := url.Parse(TestContext.Host)
@ -1251,12 +1266,12 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(hostURL.Host) != nil {
externalIP = hostURL.Host
externalIPs = append(externalIPs, hostURL.Host)
} else {
hostname = hostURL.Host
hostnames = append(hostnames, hostURL.Host)
}
return externalIP, internalIP, hostname
return externalIPs, internalIPs, hostnames
}
// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane.
@ -1264,16 +1279,16 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
// e.g. internal IPs to be used (issue #56787), so that we can be
// sure to block the control plane fully during tests.
func GetControlPlaneAddresses(c clientset.Interface) []string {
externalIP, internalIP, _ := getMasterAddresses(c)
externalIPs, internalIPs, _ := getControlPlaneAddresses(c)
ips := sets.NewString()
switch TestContext.Provider {
case "gce", "gke":
if externalIP != "" {
ips.Insert(externalIP)
for _, ip := range externalIPs {
ips.Insert(ip)
}
if internalIP != "" {
ips.Insert(internalIP)
for _, ip := range internalIPs {
ips.Insert(ip)
}
case "aws":
ips.Insert(awsMasterIP)

View File

@ -0,0 +1,20 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- saad-ali
- rootfs
- gnufied
- jingxu97
- jsafrane
- msau42
reviewers:
- saad-ali
- rootfs
- gnufied
- jingxu97
- jsafrane
- msau42
- jeffvance
- copejon
- verult
- davidz627

View File

@ -0,0 +1,685 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
package volume
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
// Kb is byte size of kilobyte
Kb int64 = 1000
// Mb is byte size of megabyte
Mb int64 = 1000 * Kb
// Gb is byte size of gigabyte
Gb int64 = 1000 * Mb
// Tb is byte size of terabyte
Tb int64 = 1000 * Gb
// KiB is byte size of kibibyte
KiB int64 = 1024
// MiB is byte size of mebibyte
MiB int64 = 1024 * KiB
// GiB is byte size of gibibyte
GiB int64 = 1024 * MiB
// TiB is byte size of tebibyte
TiB int64 = 1024 * GiB
// VolumeServerPodStartupTimeout is a waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupTimeout = 3 * time.Minute
// PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early.
PodCleanupTimeout = 20 * time.Second
)
// SizeRange encapsulates a range of sizes specified as minimum and maximum quantity strings
// Both values are optional.
// If size is not set, it will assume there's not limitation and it may set a very small size (E.g. 1ki)
// as Min and set a considerable big size(E.g. 10Ei) as Max, which make it possible to calculate
// the intersection of given intervals (if it exists)
type SizeRange struct {
// Max quantity specified as a string including units. E.g "3Gi".
// If the Max size is unset, It will be assign a default valid maximum size 10Ei,
// which is defined in test/e2e/storage/testsuites/base.go
Max string
// Min quantity specified as a string including units. E.g "1Gi"
// If the Min size is unset, It will be assign a default valid minimum size 1Ki,
// which is defined in test/e2e/storage/testsuites/base.go
Min string
}
// TestConfig is a struct for configuration of one tests. The test consist of:
// - server pod - runs serverImage, exports ports[]
// - client pod - does not need any special configuration
type TestConfig struct {
Namespace string
// Prefix of all pods. Typically the test name.
Prefix string
// Name of container image for the server pod.
ServerImage string
// Ports to export from the server pod. TCP only.
ServerPorts []int
// Commands to run in the container image.
ServerCmds []string
// Arguments to pass to the container image.
ServerArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
// if <host (source) path> is empty, mount a tmpfs emptydir
ServerVolumes map[string]string
// Message to wait for before starting clients
ServerReadyMessage string
// Use HostNetwork for the server
ServerHostNetwork bool
// Wait for the pod to terminate successfully
// False indicates that the pod is long running
WaitForCompletion bool
// ClientNodeSelection restricts where the client pod runs on. Default is any node.
ClientNodeSelection e2epod.NodeSelection
}
// Test contains a volume to mount into a client pod and its
// expected content.
type Test struct {
Volume v1.VolumeSource
Mode v1.PersistentVolumeMode
// Name of file to read/write in FileSystem mode
File string
ExpectedContent string
}
// NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
config = TestConfig{
Namespace: namespace,
Prefix: "nfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
ServerPorts: []int{2049},
ServerVolumes: map[string]string{"": "/exports"},
ServerReadyMessage: "NFS started",
}
if len(args) > 0 {
config.ServerArgs = args
}
pod, host = CreateStorageServer(cs, config)
if strings.Contains(host, ":") {
host = "[" + host + "]"
}
return config, pod, host
}
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) {
config = TestConfig{
Namespace: namespace,
Prefix: "gluster",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
ServerPorts: []int{24007, 24008, 49152},
}
pod, ip = CreateStorageServer(cs, config)
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-server",
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Protocol: v1.ProtocolTCP,
Port: 24007,
},
},
},
}
_, err := cs.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create service for Gluster server")
ginkgo.By("creating Gluster endpoints")
endpoints := &v1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-server",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: ip,
},
},
Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
Protocol: v1.ProtocolTCP,
},
},
},
},
}
_, err = cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
return config, pod, ip
}
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
// and ip address string are returned.
// Note: Expect() is called so no error is returned.
func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
pod = startVolumeServer(cs, config)
gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
ip = pod.Status.PodIP
gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
framework.Logf("%s server pod IP address: %s", config.Prefix, ip)
return pod, ip
}
// startVolumeServer starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
portCount := len(config.ServerPorts)
serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.ServerPorts[i]),
Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.ServerVolumes)
volumes := make([]v1.Volume, volumeCount)
mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.ServerVolumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
if src == "" {
volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
} else {
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
}
mounts[i].Name = mountName
mounts[i].ReadOnly = false
mounts[i].MountPath = dst
i++
}
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
ginkgo.By(fmt.Sprint("creating ", serverPodName, " pod"))
privileged := new(bool)
*privileged = true
restartPolicy := v1.RestartPolicyAlways
if config.WaitForCompletion {
restartPolicy = v1.RestartPolicyNever
}
serverPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: serverPodName,
Labels: map[string]string{
"role": serverPodName,
},
},
Spec: v1.PodSpec{
HostNetwork: config.ServerHostNetwork,
Containers: []v1.Container{
{
Name: serverPodName,
Image: config.ServerImage,
SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Command: config.ServerCmds,
Args: config.ServerArgs,
Ports: serverPodPorts,
VolumeMounts: mounts,
},
},
Volumes: volumes,
RestartPolicy: restartPolicy,
},
}
var pod *v1.Pod
serverPod, err := podClient.Create(context.TODO(), serverPod, metav1.CreateOptions{})
// ok if the server pod already exists. TODO: make this controllable by callers
if err != nil {
if apierrors.IsAlreadyExists(err) {
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
serverPod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
pod = serverPod
} else {
framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
}
}
if config.WaitForCompletion {
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, metav1.DeleteOptions{}))
} else {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod))
if pod == nil {
ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
if config.ServerReadyMessage != "" {
_, err := framework.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
}
// TestServerCleanup cleans server pod.
func TestServerCleanup(f *framework.Framework, config TestConfig) {
ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer ginkgo.GinkgoRecover()
if config.ServerImage == "" {
return
}
err := e2epod.DeletePodWithWaitByName(f.ClientSet, config.Prefix+"-server", config.Namespace)
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
var gracePeriod int64 = 1
var command string
if !framework.NodeOSDistroIs("windows") {
command = "while true ; do sleep 2; done "
} else {
command = "while(1) {sleep 2}"
}
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + podSuffix,
Labels: map[string]string{
"role": config.Prefix + "-" + podSuffix,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-" + podSuffix,
Image: e2epod.GetDefaultTestImage(),
WorkingDir: "/opt",
// An imperative and easily debuggable container which reads/writes vol contents for
// us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test.
Command: e2epod.GenerateScriptCmd(command),
VolumeMounts: []v1.VolumeMount{},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: e2epod.GeneratePodSecurityContext(fsGroup, seLinuxOptions),
Volumes: []v1.Volume{},
},
}
e2epod.SetNodeSelection(&clientPod.Spec, config.ClientNodeSelection)
for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
// We need to make the container privileged when SELinux is enabled on the
// host, so the test can write data to a location like /tmp. Also, due to
// the Docker bug below, it's not currently possible to map a device with
// a privileged container, so we don't go privileged for block volumes.
// https://github.com/moby/moby/issues/35991
if privileged && test.Mode == v1.PersistentVolumeBlock {
privileged = false
}
clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(privileged)
if test.Mode == v1.PersistentVolumeBlock {
clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
Name: volumeName,
DevicePath: fmt.Sprintf("/opt/%d", i),
})
} else {
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/opt/%d", i),
})
}
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
Name: volumeName,
VolumeSource: test.Volume,
})
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
clientPod, err := podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{})
if err != nil {
return nil, err
}
if slow {
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
} else {
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
}
if err != nil {
e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
return nil, err
}
return clientPod, nil
}
func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests {
if test.Mode == v1.PersistentVolumeBlock {
// Block: check content
deviceName := fmt.Sprintf("/opt/%d", i)
commands := generateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
} else {
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
dirName := filepath.Dir(fileName)
CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
if !framework.NodeOSDistroIs("windows") {
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
}
}
}
// TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees expected data, e.g. from the server pod.
// Multiple Tests can be specified to mount multiple volumes to a single
// pod.
// Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.podStartTimeout.
// It should be used for cases where "regular" dynamic provisioning of an empty volume is requested.
func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(f, config, fsGroup, fsType, tests, false)
}
// TestVolumeClientSlow is the same as TestVolumeClient except for its timeout.
// Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.slowPodStartTimeout.
// It should be used for cases where "special" dynamic provisioning is requested, such as volume cloning
// or snapshot restore.
func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(f, config, fsGroup, fsType, tests, true)
}
func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
timeouts := f.Timeouts
clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
if err != nil {
framework.Failf("Failed to create client pod: %v", err)
}
defer func() {
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}()
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
}
// InjectContent inserts index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
privileged := true
timeouts := f.Timeouts
if framework.NodeOSDistroIs("windows") {
privileged = false
}
injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
if err != nil {
framework.Failf("Failed to create injector pod: %v", err)
return
}
defer func() {
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}()
ginkgo.By("Writing text file contents in the container.")
for i, test := range tests {
commands := []string{"exec", injectorPod.Name, fmt.Sprintf("--namespace=%v", injectorPod.Namespace), "--"}
if test.Mode == v1.PersistentVolumeBlock {
// Block: write content
deviceName := fmt.Sprintf("/opt/%d", i)
commands = append(commands, generateWriteBlockCmd(test.ExpectedContent, deviceName)...)
} else {
// Filesystem: write content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
}
out, err := framework.RunKubectl(injectorPod.Namespace, commands...)
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
}
// Check that the data have been really written in this pod.
// This tests non-persistent volume types
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
}
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
func generateWriteCmd(content, path string) []string {
var commands []string
if !framework.NodeOSDistroIs("windows") {
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
} else {
commands = []string{"powershell", "/c", "echo '" + content + "' > " + path}
}
return commands
}
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
var commands []string
if !framework.NodeOSDistroIs("windows") {
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
} else {
// TODO: is there a way on windows to get the first X bytes from a device?
commands = []string{"powershell", "/c", "type " + fullPath}
}
return commands
}
// generateWriteBlockCmd generates the corresponding command lines to write to a block device the given content.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func generateWriteBlockCmd(content, fullPath string) []string {
return generateWriteCmd(content, fullPath)
}
// GenerateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateReadFileCmd(fullPath string) []string {
var commands []string
if !framework.NodeOSDistroIs("windows") {
commands = []string{"cat", fullPath}
} else {
commands = []string{"powershell", "/c", "type " + fullPath}
}
return commands
}
// generateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func generateWriteFileCmd(content, fullPath string) []string {
return generateWriteCmd(content, fullPath)
}
// CheckVolumeModeOfPath check mode of volume
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
}
}
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
if framework.NodeOSDistroIs("windows") {
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
}
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(uexec.CodeExitError); ok {
exitCode := exiterr.ExitStatus()
framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, err, stdout, stderr)
}
}
}
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
framework.ExpectEqual(actualExitCode, exitCode,
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, err, stdout, stderr)
}
}
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}

View File

@ -1,25 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["perftype.go"],
importpath = "k8s.io/kubernetes/test/e2e/perftype",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,310 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package podlogs enables live capturing of all events and log
// messages for some or all pods in a namespace as they get generated.
// This helps debugging both a running test (what is currently going
// on?) and the output of a CI run (events appear in chronological
// order and output that normally isn't available like the command
// stdout messages are available).
package podlogs
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"path"
"regexp"
"strings"
"sync"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
)
// LogOutput determines where output from CopyAllLogs goes.
type LogOutput struct {
// If not nil, errors will be logged here.
StatusWriter io.Writer
// If not nil, all output goes to this writer with "<pod>/<container>:" as prefix.
LogWriter io.Writer
// Base directory for one log file per container.
// The full path of each log file will be <log path prefix><pod>-<container>.log.
LogPathPrefix string
}
// Matches harmless errors from pkg/kubelet/kubelet_pods.go.
var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|waiting to start|not available)|the server could not find the requested resource`)
// CopyAllLogs follows the logs of all containers in all pods,
// including those that get created in the future, and writes each log
// line as configured in the output options. It does that until the
// context is done or until an error occurs.
//
// Beware that there is currently no way to force log collection
// before removing pods, which means that there is a known race
// between "stop pod" and "collecting log entries". The alternative
// would be a blocking function with collects logs from all currently
// running pods, but that then would have the disadvantage that
// already deleted pods aren't covered.
//
// Another race occurs is when a pod shuts down. Logging stops, but if
// then the pod is not removed from the apiserver quickly enough, logging
// resumes and dumps the old log again. Previously, this was allowed based
// on the assumption that it is better to log twice than miss log messages
// of pods that started and immediately terminated or when logging temporarily
// stopped.
//
// But it turned out to be rather confusing, so now a heuristic is used: if
// log output of a container was already captured, then capturing does not
// resume if the pod is marked for deletion.
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
if err != nil {
return errors.Wrap(err, "cannot create Pod event watcher")
}
go func() {
var m sync.Mutex
// Key is pod/container name, true if currently logging it.
active := map[string]bool{}
// Key is pod/container/container-id, true if we have ever started to capture its output.
started := map[string]bool{}
check := func() {
m.Lock()
defer m.Unlock()
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), meta.ListOptions{})
if err != nil {
if to.StatusWriter != nil {
fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err)
}
return
}
for _, pod := range pods.Items {
for i, c := range pod.Spec.Containers {
// sanity check, array should have entry for each container
if len(pod.Status.ContainerStatuses) <= i {
continue
}
name := pod.ObjectMeta.Name + "/" + c.Name
id := name + "/" + pod.Status.ContainerStatuses[i].ContainerID
if active[name] ||
// If we have worked on a container before and it has now terminated, then
// there cannot be any new output and we can ignore it.
(pod.Status.ContainerStatuses[i].State.Terminated != nil &&
started[id]) ||
// State.Terminated might not have been updated although the container already
// stopped running. Also check whether the pod is deleted.
(pod.DeletionTimestamp != nil && started[id]) ||
// Don't attempt to get logs for a container unless it is running or has terminated.
// Trying to get a log would just end up with an error that we would have to suppress.
(pod.Status.ContainerStatuses[i].State.Running == nil &&
pod.Status.ContainerStatuses[i].State.Terminated == nil) {
continue
}
readCloser, err := logsForPod(ctx, cs, ns, pod.ObjectMeta.Name,
&v1.PodLogOptions{
Container: c.Name,
Follow: true,
})
if err != nil {
// We do get "normal" errors here, like trying to read too early.
// We can ignore those.
if to.StatusWriter != nil &&
expectedErrors.FindStringIndex(err.Error()) == nil {
fmt.Fprintf(to.StatusWriter, "WARNING: pod log: %s: %s\n", name, err)
}
continue
}
// Determine where we write. If this fails, we intentionally return without clearing
// the active[name] flag, which prevents trying over and over again to
// create the output file.
var out io.Writer
var closer io.Closer
var prefix string
if to.LogWriter != nil {
out = to.LogWriter
nodeName := pod.Spec.NodeName
if len(nodeName) > 10 {
nodeName = nodeName[0:4] + ".." + nodeName[len(nodeName)-4:]
}
prefix = name + "@" + nodeName + ": "
} else {
var err error
filename := to.LogPathPrefix + pod.ObjectMeta.Name + "-" + c.Name + ".log"
err = os.MkdirAll(path.Dir(filename), 0755)
if err != nil {
if to.StatusWriter != nil {
fmt.Fprintf(to.StatusWriter, "ERROR: pod log: create directory for %s: %s\n", filename, err)
}
return
}
// The test suite might run the same test multiple times,
// so we have to append here.
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
if to.StatusWriter != nil {
fmt.Fprintf(to.StatusWriter, "ERROR: pod log: create file %s: %s\n", filename, err)
}
return
}
closer = file
out = file
}
go func() {
if closer != nil {
defer closer.Close()
}
first := true
defer func() {
m.Lock()
// If we never printed anything, then also skip the final message.
if !first {
if prefix != "" {
fmt.Fprintf(out, "%s==== end of pod log ====\n", prefix)
} else {
fmt.Fprintf(out, "==== end of pod log for container %s ====\n", name)
}
}
active[name] = false
m.Unlock()
readCloser.Close()
}()
scanner := bufio.NewScanner(readCloser)
for scanner.Scan() {
line := scanner.Text()
// Filter out the expected "end of stream" error message,
// it would just confuse developers who don't know about it.
// Same for attempts to read logs from a container that
// isn't ready (yet?!).
if !strings.HasPrefix(line, "rpc error: code = Unknown desc = Error: No such container:") &&
!strings.HasPrefix(line, "unable to retrieve container logs for ") &&
!strings.HasPrefix(line, "Unable to retrieve container logs for ") {
if first {
// Because the same log might be written to multiple times
// in different test instances, log an extra line to separate them.
// Also provides some useful extra information.
if prefix == "" {
fmt.Fprintf(out, "==== start of pod log for container %s ====\n", name)
} else {
fmt.Fprintf(out, "%s==== start of pod log ====\n", prefix)
}
first = false
}
fmt.Fprintf(out, "%s%s\n", prefix, line)
}
}
}()
active[name] = true
started[id] = true
}
}
}
// Watch events to see whether we can start logging
// and log interesting ones.
check()
for {
select {
case <-watcher.ResultChan():
check()
case <-ctx.Done():
return
}
}
}()
return nil
}
// logsForPod starts reading the logs for a certain pod. If the pod has more than one
// container, opts.Container must be set. Reading stops when the context is done.
// The stream includes formatted error messages and ends with
// rpc error: code = Unknown desc = Error: No such container: 41a...
// when the pod gets deleted while streaming.
func logsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) {
return cs.CoreV1().Pods(ns).GetLogs(pod, opts).Stream(ctx)
}
// WatchPods prints pod status events for a certain namespace or all namespaces
// when namespace name is empty.
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error {
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
if err != nil {
return errors.Wrap(err, "cannot create Pod event watcher")
}
go func() {
defer watcher.Stop()
for {
select {
case e := <-watcher.ResultChan():
if e.Object == nil {
continue
}
pod, ok := e.Object.(*v1.Pod)
if !ok {
continue
}
buffer := new(bytes.Buffer)
fmt.Fprintf(buffer,
"pod event: %s: %s/%s %s: %s %s\n",
e.Type,
pod.Namespace,
pod.Name,
pod.Status.Phase,
pod.Status.Reason,
pod.Status.Conditions,
)
for _, cst := range pod.Status.ContainerStatuses {
fmt.Fprintf(buffer, " %s: ", cst.Name)
if cst.State.Waiting != nil {
fmt.Fprintf(buffer, "WAITING: %s - %s",
cst.State.Waiting.Reason,
cst.State.Waiting.Message,
)
} else if cst.State.Running != nil {
fmt.Fprintf(buffer, "RUNNING")
} else if cst.State.Terminated != nil {
fmt.Fprintf(buffer, "TERMINATED: %s - %s",
cst.State.Terminated.Reason,
cst.State.Terminated.Message,
)
}
fmt.Fprintf(buffer, "\n")
}
to.Write(buffer.Bytes())
case <-ctx.Done():
return
}
}
}()
return nil
}

View File

@ -1,62 +0,0 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"create.go",
"deployment.go",
"ebs.go",
"framework.go",
"host_exec.go",
"local.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/exec:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -41,7 +41,6 @@ import (
// that follow these conventions:
// - driver and provisioner names are identical
// - the driver binary accepts a --drivername parameter
// - the provisioner binary accepts a --provisioner parameter
// - the paths inside the container are either fixed
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
// okay) or are specified directly in a parameter (for example,
@ -86,10 +85,6 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
switch container.Name {
case o.DriverContainerName:
container.Args = append(container.Args, o.DriverContainerArguments...)
case o.ProvisionerContainerName:
// Driver name is expected to be the same
// as the provisioner here.
container.Args = append(container.Args, "--provisioner="+o.NewDriverName)
}
}
}

View File

@ -106,7 +106,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err)
return pod
}

182
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
"regexp"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/storage/podlogs"
)
// StartPodLogs begins capturing log output and events from current
// and future pods running in the namespace of the framework. That
// ends when the returned cleanup function is called.
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
ctx, cancel := context.WithCancel(context.Background())
cs := f.ClientSet
ns := driverNamespace.Name
to := podlogs.LogOutput{
StatusWriter: ginkgo.GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = ginkgo.GinkgoWriter
} else {
test := ginkgo.CurrentGinkgoTestDescription()
// Clean up each individual component text such that
// it contains only characters that are valid as file
// name.
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
var components []string
for _, component := range test.ComponentTexts {
components = append(components, reg.ReplaceAllString(component, "_"))
}
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
//
// Each component name maps to a directory. This
// avoids cluttering the root artifact directory and
// keeps each directory name smaller (the full test
// name at one point exceeded 256 characters, which was
// too much for some filesystems).
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
strings.Join(components, "/") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
}
return cancel
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
systemctlPresent := false
kubeletPid := ""
nodeIP, err := getHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
framework.Logf("Checking if systemctl command is present")
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}

View File

@ -0,0 +1,151 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// SnapshotGroup is the snapshot CRD api group
SnapshotGroup = "snapshot.storage.k8s.io"
// SnapshotAPIVersion is the snapshot CRD api version
SnapshotAPIVersion = "snapshot.storage.k8s.io/v1"
)
var (
// SnapshotGVR is GroupVersionResource for volumesnapshots
SnapshotGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshots"}
// SnapshotClassGVR is GroupVersionResource for volumesnapshotclasses
SnapshotClassGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotclasses"}
// SnapshotContentGVR is GroupVersionResource for volumesnapshotcontents
SnapshotContentGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotcontents"}
)
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
if successful := WaitUntil(poll, timeout, func() bool {
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err)
return false
}
status := snapshot.Object["status"]
if status == nil {
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
return false
}
value := status.(map[string]interface{})
if value["readyToUse"] == true {
framework.Logf("VolumeSnapshot %s found and is ready", snapshotName)
return true
}
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
return false
}); successful {
return nil
}
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout)
}
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
// given VolumeSnapshot
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
defer ginkgo.GinkgoRecover()
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
framework.ExpectNoError(err)
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
snapshotStatus := vs.Object["status"].(map[string]interface{})
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
framework.Logf("received snapshotStatus %v", snapshotStatus)
framework.Logf("snapshotContentName %s", snapshotContentName)
framework.ExpectNoError(err)
vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
framework.ExpectNoError(err)
return vscontent
}
// DeleteSnapshotWithoutWaiting deletes a VolumeSnapshot and return directly without waiting
func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName string) error {
ginkgo.By("deleting the snapshot")
err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
var err error
err = DeleteSnapshotWithoutWaiting(dc, ns, snapshotName)
if err != nil {
return err
}
ginkgo.By("checking the Snapshot has been deleted")
err = WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout)
return err
}
// GenerateSnapshotClassSpec constructs a new SnapshotClass instance spec
// with a unique name that is based on namespace + suffix.
func GenerateSnapshotClassSpec(
snapshotter string,
parameters map[string]string,
ns string,
suffix string,
) *unstructured.Unstructured {
snapshotClass := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshotClass",
"apiVersion": SnapshotAPIVersion,
"metadata": map[string]interface{}{
// Name must be unique, so let's base it on namespace name and use GenerateName
// TODO(#96234): Remove unnecessary suffix.
"name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix),
},
"driver": snapshotter,
"parameters": parameters,
"deletionPolicy": "Delete",
},
}
return snapshotClass
}

View File

@ -21,6 +21,7 @@ import (
"crypto/sha256"
"encoding/base64"
"fmt"
"math"
"math/rand"
"path/filepath"
"strings"
@ -32,6 +33,7 @@ import (
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -39,13 +41,11 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
// KubeletOpt type definition
@ -59,7 +59,9 @@ const (
// KStop defines stop value
KStop KubeletOpt = "stop"
// KRestart defines restart value
KRestart KubeletOpt = "restart"
KRestart KubeletOpt = "restart"
minValidSize string = "1Ki"
maxValidSize string = "10Ei"
)
const (
@ -67,37 +69,10 @@ const (
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
if framework.NodeOSDistroIs("windows") {
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
}
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(uexec.CodeExitError); ok {
exitCode := exiterr.ExitStatus()
framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, err, stdout, stderr)
}
}
}
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
@ -105,131 +80,6 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
}
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
framework.ExpectEqual(actualExitCode, exitCode,
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, err, stdout, stderr)
}
}
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}
func isSudoPresent(nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
if !strings.Contains(sshResult.Stderr, "command not found") {
return true
}
return false
}
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
systemctlPresent := false
kubeletPid := ""
nodeIP, err := getHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
framework.Logf("Checking if systemctl command is present")
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getKubeletMainPid return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
@ -325,7 +175,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
if err != nil {
framework.ExpectNoError(err, "Expected pod to be not found.")
}
@ -411,7 +261,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "Expected pod to be not found.")
if forceDelete {
@ -446,7 +296,7 @@ func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Fra
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -489,7 +339,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
defer func() {
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
}
// StartExternalProvisioner create external provisioner pod
@ -614,46 +464,39 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
}
}
// CheckVolumeModeOfPath check mode of volume
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
func isSudoPresent(nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
if !strings.Contains(sshResult.Stderr, "command not found") {
return true
}
return false
}
// CheckReadWriteToPath check that path can b e read and written
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
e2evolume.VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
e2evolume.VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
e2evolume.VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
e2evolume.VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
// Check that writing to directory as block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}
@ -699,8 +542,8 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
}
// CheckWriteToPath that file can be properly written.
@ -724,8 +567,8 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
}
// findMountPoints returns all mount points on given node under specified directory.
@ -866,7 +709,7 @@ func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.Gr
// VerifyFilePathGidInPod verfies expected GID of the target filepath
func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
@ -877,7 +720,90 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string
// ChangeFilePathGidInPod changes the GID of the target filepath.
func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
_, _, err := PodExec(f, pod, cmd)
_, _, err := e2evolume.PodExec(f, pod, cmd)
framework.ExpectNoError(err)
VerifyFilePathGidInPod(f, filePath, targetGid, pod)
}
// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func DeleteStorageClass(cs clientset.Interface, className string) error {
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// CreateVolumeSource creates a volume source object
func CreateVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
return &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: readOnly,
},
}
}
// TryFunc try to execute the function and return err if there is any
func TryFunc(f func()) error {
var err error
if f == nil {
return nil
}
defer func() {
if recoverError := recover(); recoverError != nil {
err = fmt.Errorf("%v", recoverError)
}
}()
f()
return err
}
// GetSizeRangesIntersection takes two instances of storage size ranges and determines the
// intersection of the intervals (if it exists) and return the minimum of the intersection
// to be used as the claim size for the test.
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
func GetSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
var firstMin, firstMax, secondMin, secondMax resource.Quantity
var err error
//if SizeRange is not set, assign a minimum or maximum size
if len(first.Min) == 0 {
first.Min = minValidSize
}
if len(first.Max) == 0 {
first.Max = maxValidSize
}
if len(second.Min) == 0 {
second.Min = minValidSize
}
if len(second.Max) == 0 {
second.Max = maxValidSize
}
if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
return "", err
}
if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
return "", err
}
if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
return "", err
}
if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
return "", err
}
interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
// the minimum of the intersection shall be returned as the claim size
var intersectionMin resource.Quantity
if intersectionEnd-interSectionStart >= 0 { //have intersection
intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi
// return the minimum of the intersection as the claim size
return intersectionMin.String(), nil
}
return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
}

View File

@ -1,89 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"admission_webhook.go",
"audit.go",
"conditions.go",
"create_resources.go",
"delete_resources.go",
"density_utils.go",
"deployment.go",
"node.go",
"paths.go",
"pki_helpers.go",
"pod_store.go",
"replicaset.go",
"runners.go",
"tmpdir.go",
"update_resources.go",
],
importpath = "k8s.io/kubernetes/test/utils",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/util/labels:go_default_library",
"//staging/src/k8s.io/api/admission/v1beta1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/audit:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/scale:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/utils/crd:all-srcs",
"//test/utils/harness:all-srcs",
"//test/utils/image:all-srcs",
"//test/utils/junit:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,31 +0,0 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["manifest.go"],
importpath = "k8s.io/kubernetes/test/utils/image",
deps = [
"//vendor/gopkg.in/yaml.v2:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["manifest_test.go"],
embed = [":go_default_library"],
)

View File

@ -4,10 +4,10 @@ reviewers:
- justaugustus
- luxas
- mkumatag
- listx
- dims
approvers:
- luxas
- mkumatag
- listx
- dims
emeritus_approvers:
- ixdy

View File

@ -17,9 +17,12 @@ limitations under the License.
package image
import (
"crypto/sha256"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
yaml "gopkg.in/yaml.v2"
@ -28,18 +31,17 @@ import (
// RegistryList holds public and private image registries
type RegistryList struct {
GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"`
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
DockerGluster string `yaml:"dockerGluster"`
E2eRegistry string `yaml:"e2eRegistry"`
E2eVolumeRegistry string `yaml:"e2eVolumeRegistry"`
PromoterE2eRegistry string `yaml:"promoterE2eRegistry"`
BuildImageRegistry string `yaml:"buildImageRegistry"`
InvalidRegistry string `yaml:"invalidRegistry"`
GcEtcdRegistry string `yaml:"gcEtcdRegistry"`
GcRegistry string `yaml:"gcRegistry"`
SigStorageRegistry string `yaml:"sigStorageRegistry"`
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
PrivateRegistry string `yaml:"privateRegistry"`
SampleRegistry string `yaml:"sampleRegistry"`
MicrosoftRegistry string `yaml:"microsoftRegistry"`
}
// Config holds an images registry, name, and version
@ -67,18 +69,17 @@ func (i *Config) SetVersion(version string) {
func initReg() RegistryList {
registry := RegistryList{
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
DockerLibraryRegistry: "docker.io/library",
DockerGluster: "docker.io/gluster",
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
E2eVolumeRegistry: "gcr.io/kubernetes-e2e-test-images/volume",
PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images",
BuildImageRegistry: "k8s.gcr.io/build-image",
InvalidRegistry: "invalid.com/invalid",
GcEtcdRegistry: "k8s.gcr.io",
GcRegistry: "k8s.gcr.io",
SigStorageRegistry: "k8s.gcr.io/sig-storage",
GcrReleaseRegistry: "gcr.io/gke-release",
PrivateRegistry: "gcr.io/k8s-authenticated-test",
SampleRegistry: "gcr.io/google-samples",
GcrReleaseRegistry: "gcr.io/gke-release",
MicrosoftRegistry: "mcr.microsoft.com",
}
repoList := os.Getenv("KUBE_TEST_REPO_LIST")
if repoList == "" {
@ -98,24 +99,26 @@ func initReg() RegistryList {
}
var (
registry = initReg()
dockerLibraryRegistry = registry.DockerLibraryRegistry
dockerGluster = registry.DockerGluster
registry = initReg()
// PrivateRegistry is an image repository that requires authentication
PrivateRegistry = registry.PrivateRegistry
// Preconfigured image configs
dockerLibraryRegistry = "docker.io/library"
e2eRegistry = registry.E2eRegistry
e2eVolumeRegistry = registry.E2eVolumeRegistry
promoterE2eRegistry = registry.PromoterE2eRegistry
buildImageRegistry = registry.BuildImageRegistry
gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry
gcEtcdRegistry = registry.GcEtcdRegistry
gcRegistry = registry.GcRegistry
sigStorageRegistry = registry.SigStorageRegistry
gcrReleaseRegistry = registry.GcrReleaseRegistry
invalidRegistry = registry.InvalidRegistry
// PrivateRegistry is an image repository that requires authentication
PrivateRegistry = registry.PrivateRegistry
sampleRegistry = registry.SampleRegistry
sampleRegistry = registry.SampleRegistry
microsoftRegistry = registry.MicrosoftRegistry
// Preconfigured image configs
imageConfigs = initImageConfigs()
imageConfigs, originalImageConfigs = initImageConfigs()
)
const (
@ -169,6 +172,12 @@ const (
Nginx
// NginxNew image
NginxNew
// NodePerfNpbEp image
NodePerfNpbEp
// NodePerfNpbIs image
NodePerfNpbIs
// NodePerfTfWideDeep image
NodePerfTfWideDeep
// Nonewprivs image
Nonewprivs
// NonRoot runs with a default user of 1234
@ -198,52 +207,132 @@ const (
VolumeGlusterServer
// VolumeRBDServer image
VolumeRBDServer
// WindowsServer image
WindowsServer
)
func initImageConfigs() map[int]Config {
func initImageConfigs() (map[int]Config, map[int]Config) {
configs := map[int]Config{}
configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.21"}
configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.32"}
configs[AgnhostPrivate] = Config{PrivateRegistry, "agnhost", "2.6"}
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
configs[AuthenticatedWindowsNanoServer] = Config{gcAuthenticatedRegistry, "windows-nanoserver", "v1"}
configs[APIServer] = Config{e2eRegistry, "sample-apiserver", "1.17"}
configs[AppArmorLoader] = Config{e2eRegistry, "apparmor-loader", "1.0"}
configs[BusyBox] = Config{dockerLibraryRegistry, "busybox", "1.29"}
configs[APIServer] = Config{promoterE2eRegistry, "sample-apiserver", "1.17.4"}
configs[AppArmorLoader] = Config{promoterE2eRegistry, "apparmor-loader", "1.3"}
configs[BusyBox] = Config{promoterE2eRegistry, "busybox", "1.29-1"}
configs[CheckMetadataConcealment] = Config{promoterE2eRegistry, "metadata-concealment", "1.6"}
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{e2eRegistry, "cuda-vector-add", "2.0"}
configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.3.0"}
configs[EchoServer] = Config{e2eRegistry, "echoserver", "2.2"}
configs[Etcd] = Config{gcRegistry, "etcd", "3.4.13-0"}
configs[GlusterDynamicProvisioner] = Config{dockerGluster, "glusterdynamic-provisioner", "v1.0"}
configs[Httpd] = Config{dockerLibraryRegistry, "httpd", "2.4.38-alpine"}
configs[HttpdNew] = Config{dockerLibraryRegistry, "httpd", "2.4.39-alpine"}
configs[CudaVectorAdd2] = Config{promoterE2eRegistry, "cuda-vector-add", "2.2"}
configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.6.2"}
configs[EchoServer] = Config{promoterE2eRegistry, "echoserver", "2.3"}
configs[Etcd] = Config{gcEtcdRegistry, "etcd", "3.4.13-0"}
configs[GlusterDynamicProvisioner] = Config{promoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"}
configs[Httpd] = Config{promoterE2eRegistry, "httpd", "2.4.38-1"}
configs[HttpdNew] = Config{promoterE2eRegistry, "httpd", "2.4.39-1"}
configs[InvalidRegistryImage] = Config{invalidRegistry, "alpine", "3.1"}
configs[IpcUtils] = Config{e2eRegistry, "ipc-utils", "1.0"}
configs[JessieDnsutils] = Config{e2eRegistry, "jessie-dnsutils", "1.0"}
configs[Kitten] = Config{e2eRegistry, "kitten", "1.0"}
configs[Nautilus] = Config{e2eRegistry, "nautilus", "1.0"}
configs[IpcUtils] = Config{promoterE2eRegistry, "ipc-utils", "1.2"}
configs[JessieDnsutils] = Config{promoterE2eRegistry, "jessie-dnsutils", "1.4"}
configs[Kitten] = Config{promoterE2eRegistry, "kitten", "1.4"}
configs[Nautilus] = Config{promoterE2eRegistry, "nautilus", "1.4"}
configs[NFSProvisioner] = Config{sigStorageRegistry, "nfs-provisioner", "v2.2.2"}
configs[Nginx] = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"}
configs[NginxNew] = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"}
configs[Nonewprivs] = Config{e2eRegistry, "nonewprivs", "1.0"}
configs[NonRoot] = Config{e2eRegistry, "nonroot", "1.0"}
configs[Nginx] = Config{promoterE2eRegistry, "nginx", "1.14-1"}
configs[NginxNew] = Config{promoterE2eRegistry, "nginx", "1.15-1"}
configs[NodePerfNpbEp] = Config{promoterE2eRegistry, "node-perf/npb-ep", "1.1"}
configs[NodePerfNpbIs] = Config{promoterE2eRegistry, "node-perf/npb-is", "1.1"}
configs[NodePerfTfWideDeep] = Config{promoterE2eRegistry, "node-perf/tf-wide-deep", "1.1"}
configs[Nonewprivs] = Config{promoterE2eRegistry, "nonewprivs", "1.3"}
configs[NonRoot] = Config{promoterE2eRegistry, "nonroot", "1.1"}
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
configs[Pause] = Config{gcRegistry, "pause", "3.2"}
configs[Perl] = Config{dockerLibraryRegistry, "perl", "5.26"}
configs[Pause] = Config{gcRegistry, "pause", "3.4.1"}
configs[Perl] = Config{promoterE2eRegistry, "perl", "5.26"}
configs[PrometheusDummyExporter] = Config{gcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
configs[PrometheusToSd] = Config{gcRegistry, "prometheus-to-sd", "v0.5.0"}
configs[Redis] = Config{dockerLibraryRegistry, "redis", "5.0.5-alpine"}
configs[RegressionIssue74839] = Config{e2eRegistry, "regression-issue-74839-amd64", "1.0"}
configs[ResourceConsumer] = Config{e2eRegistry, "resource-consumer", "1.5"}
configs[Redis] = Config{promoterE2eRegistry, "redis", "5.0.5-alpine"}
configs[RegressionIssue74839] = Config{promoterE2eRegistry, "regression-issue-74839", "1.2"}
configs[ResourceConsumer] = Config{promoterE2eRegistry, "resource-consumer", "1.9"}
configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"}
configs[VolumeNFSServer] = Config{e2eVolumeRegistry, "nfs", "1.0"}
configs[VolumeISCSIServer] = Config{e2eVolumeRegistry, "iscsi", "2.0"}
configs[VolumeGlusterServer] = Config{e2eVolumeRegistry, "gluster", "1.0"}
configs[VolumeRBDServer] = Config{e2eVolumeRegistry, "rbd", "1.0.1"}
configs[VolumeNFSServer] = Config{promoterE2eRegistry, "volume/nfs", "1.2"}
configs[VolumeISCSIServer] = Config{promoterE2eRegistry, "volume/iscsi", "2.2"}
configs[VolumeGlusterServer] = Config{promoterE2eRegistry, "volume/gluster", "1.2"}
configs[VolumeRBDServer] = Config{promoterE2eRegistry, "volume/rbd", "1.0.3"}
configs[WindowsServer] = Config{microsoftRegistry, "windows", "1809"}
// if requested, map all the SHAs into a known format based on the input
originalImageConfigs := configs
if repo := os.Getenv("KUBE_TEST_REPO"); len(repo) > 0 {
configs = GetMappedImageConfigs(originalImageConfigs, repo)
}
return configs, originalImageConfigs
}
// GetMappedImageConfigs returns the images if they were mapped to the provided
// image repository.
func GetMappedImageConfigs(originalImageConfigs map[int]Config, repo string) map[int]Config {
configs := make(map[int]Config)
for i, config := range originalImageConfigs {
switch i {
case InvalidRegistryImage, AuthenticatedAlpine,
AuthenticatedWindowsNanoServer, AgnhostPrivate:
// These images are special and can't be run out of the cloud - some because they
// are authenticated, and others because they are not real images. Tests that depend
// on these images can't be run without access to the public internet.
configs[i] = config
continue
}
// Build a new tag with a the index, a hash of the image spec (to be unique) and
// shorten and make the pull spec "safe" so it will fit in the tag
configs[i] = getRepositoryMappedConfig(i, config, repo)
}
return configs
}
var (
reCharSafe = regexp.MustCompile(`[^\w]`)
reDashes = regexp.MustCompile(`-+`)
)
// getRepositoryMappedConfig maps an existing image to the provided repo, generating a
// tag that is unique with the input config. The tag will contain the index, a hash of
// the image spec (to be unique) and shorten and make the pull spec "safe" so it will
// fit in the tag to allow a human to recognize the value. If index is -1, then no
// index will be added to the tag.
func getRepositoryMappedConfig(index int, config Config, repo string) Config {
parts := strings.SplitN(repo, "/", 2)
registry, name := parts[0], parts[1]
pullSpec := config.GetE2EImage()
h := sha256.New()
h.Write([]byte(pullSpec))
hash := base64.RawURLEncoding.EncodeToString(h.Sum(nil)[:16])
shortName := reCharSafe.ReplaceAllLiteralString(pullSpec, "-")
shortName = reDashes.ReplaceAllLiteralString(shortName, "-")
maxLength := 127 - 16 - 6 - 10
if len(shortName) > maxLength {
shortName = shortName[len(shortName)-maxLength:]
}
var version string
if index == -1 {
version = fmt.Sprintf("e2e-%s-%s", shortName, hash)
} else {
version = fmt.Sprintf("e2e-%d-%s-%s", index, shortName, hash)
}
return Config{
registry: registry,
name: name,
version: version,
}
}
// GetOriginalImageConfigs returns the configuration before any mapping rules.
func GetOriginalImageConfigs() map[int]Config {
return originalImageConfigs
}
// GetImageConfigs returns the map of imageConfigs
func GetImageConfigs() map[int]Config {
return imageConfigs
@ -275,11 +364,26 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) {
countParts := len(parts)
registryAndUser := strings.Join(parts[:countParts-1], "/")
if repo := os.Getenv("KUBE_TEST_REPO"); len(repo) > 0 {
index := -1
for i, v := range originalImageConfigs {
if v.GetE2EImage() == imageURL {
index = i
break
}
}
last := strings.SplitN(parts[countParts-1], ":", 2)
config := getRepositoryMappedConfig(index, Config{
registry: parts[0],
name: strings.Join([]string{strings.Join(parts[1:countParts-1], "/"), last[0]}, "/"),
version: last[1],
}, repo)
return config.GetE2EImage(), nil
}
switch registryAndUser {
case "gcr.io/kubernetes-e2e-test-images":
registryAndUser = e2eRegistry
case "gcr.io/kubernetes-e2e-test-images/volume":
registryAndUser = e2eVolumeRegistry
case "k8s.gcr.io":
registryAndUser = gcRegistry
case "k8s.gcr.io/sig-storage":

View File

@ -28,7 +28,7 @@ import (
apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -1017,14 +1017,14 @@ type NodeAllocatableStrategy struct {
// Node.status.allocatable to fill to all nodes.
NodeAllocatable map[v1.ResourceName]string
// Map <driver_name> -> VolumeNodeResources to fill into csiNode.spec.drivers[<driver_name>].
CsiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources
CsiNodeAllocatable map[string]*storagev1.VolumeNodeResources
// List of in-tree volume plugins migrated to CSI.
MigratedPlugins []string
}
var _ PrepareNodeStrategy = &NodeAllocatableStrategy{}
func NewNodeAllocatableStrategy(nodeAllocatable map[v1.ResourceName]string, csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources, migratedPlugins []string) *NodeAllocatableStrategy {
func NewNodeAllocatableStrategy(nodeAllocatable map[v1.ResourceName]string, csiNodeAllocatable map[string]*storagev1.VolumeNodeResources, migratedPlugins []string) *NodeAllocatableStrategy {
return &NodeAllocatableStrategy{
NodeAllocatable: nodeAllocatable,
CsiNodeAllocatable: csiNodeAllocatable,
@ -1063,20 +1063,20 @@ func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node {
}
func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientset.Interface) error {
csiNode := &storagev1beta1.CSINode{
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
Annotations: map[string]string{
v1.MigratedPluginsAnnotationKey: strings.Join(s.MigratedPlugins, ","),
},
},
Spec: storagev1beta1.CSINodeSpec{
Drivers: []storagev1beta1.CSINodeDriver{},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{},
},
}
for driver, allocatable := range s.CsiNodeAllocatable {
d := storagev1beta1.CSINodeDriver{
d := storagev1.CSINodeDriver{
Name: driver,
Allocatable: allocatable,
NodeID: nodeName,
@ -1084,7 +1084,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
}
_, err := client.StorageV1beta1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{})
_, err := client.StorageV1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
// Something created CSINode instance after we checked it did not exist.
// Make the caller to re-try PrepareDependentObjects by returning Conflict error
@ -1093,7 +1093,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
return err
}
func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, client clientset.Interface) error {
func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1.CSINode, client clientset.Interface) error {
for driverName, allocatable := range s.CsiNodeAllocatable {
found := false
for i, driver := range csiNode.Spec.Drivers {
@ -1104,7 +1104,7 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode,
}
}
if !found {
d := storagev1beta1.CSINodeDriver{
d := storagev1.CSINodeDriver{
Name: driverName,
Allocatable: allocatable,
}
@ -1114,12 +1114,12 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode,
}
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",")
_, err := client.StorageV1beta1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{})
_, err := client.StorageV1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{})
return err
}
func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return s.createCSINode(node.Name, client)
@ -1130,7 +1130,7 @@ func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client
}
func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil
@ -1303,7 +1303,7 @@ func MakePodSpec() v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
Image: "k8s.gcr.io/pause:3.2",
Image: "k8s.gcr.io/pause:3.4.1",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
@ -1369,11 +1369,28 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string,
pv.Status.Phase = v1.VolumeBound
// bind pvc to "pv-$i"
// pvc.Spec.VolumeName = pv.Name
pvc.Spec.VolumeName = pv.Name
pvc.Status.Phase = v1.ClaimBound
} else {
pv.Status.Phase = v1.VolumeAvailable
}
// Create PVC first as it's referenced by the PV when the `bindVolume` is true.
if err := CreatePersistentVolumeClaimWithRetries(client, namespace, pvc); err != nil {
lock.Lock()
defer lock.Unlock()
createError = fmt.Errorf("error creating PVC: %s", err)
return
}
// We need to update statuses separately, as creating pv/pvc resets status to the default one.
if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil {
lock.Lock()
defer lock.Unlock()
createError = fmt.Errorf("error updating PVC status: %s", err)
return
}
if err := CreatePersistentVolumeWithRetries(client, pv); err != nil {
lock.Lock()
defer lock.Unlock()
@ -1388,19 +1405,6 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string,
return
}
if err := CreatePersistentVolumeClaimWithRetries(client, namespace, pvc); err != nil {
lock.Lock()
defer lock.Unlock()
createError = fmt.Errorf("error creating PVC: %s", err)
return
}
if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil {
lock.Lock()
defer lock.Unlock()
createError = fmt.Errorf("error updating PVC status: %s", err)
return
}
// pod
pod := podTemplate.DeepCopy()
pod.Spec.Volumes = []v1.Volume{
@ -1482,10 +1486,10 @@ func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeC
func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
return func(client clientset.Interface, namespace string, podCount int) error {
volumeBindingMode := storage.VolumeBindingWaitForFirstConsumer
storageClass := &storage.StorageClass{
volumeBindingMode := storagev1.VolumeBindingWaitForFirstConsumer
storageClass := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "storage-class-1",
Name: "storagev1-class-1",
},
Provisioner: "kubernetes.io/gce-pd",
VolumeBindingMode: &volumeBindingMode,
@ -1493,7 +1497,7 @@ func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFac
claimTemplate := makeUnboundPersistentVolumeClaim(storageClass.Name)
if err := CreateStorageClassWithRetries(client, storageClass); err != nil {
return fmt.Errorf("failed to create storage class: %v", err)
return fmt.Errorf("failed to create storagev1 class: %v", err)
}
factoryWithStorageClass := func(i int) *v1.PersistentVolume {
@ -1721,7 +1725,7 @@ type DaemonConfig struct {
func (config *DaemonConfig) Run() error {
if config.Image == "" {
config.Image = "k8s.gcr.io/pause:3.2"
config.Image = "k8s.gcr.io/pause:3.4.1"
}
nameLabel := map[string]string{
"name": config.Name + "-daemon",