mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to v1.21.2
Updated kubernetes packages to latest release. resizefs package has been included into k8s.io/mount-utils package. updated code to use the same. Updates: #1968 Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
550
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
550
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -1,274 +1,276 @@
|
||||
rules:
|
||||
- selectorRegexp: k8s[.]io/kubernetes/pkg/
|
||||
allowedPrefixes:
|
||||
- k8s.io/kubernetes/pkg/api/legacyscheme
|
||||
- k8s.io/kubernetes/pkg/api/service
|
||||
- k8s.io/kubernetes/pkg/api/v1/pod
|
||||
- k8s.io/kubernetes/pkg/api/v1/resource
|
||||
- k8s.io/kubernetes/pkg/api/v1/service
|
||||
- k8s.io/kubernetes/pkg/api/pod
|
||||
- k8s.io/kubernetes/pkg/apis/apps
|
||||
- k8s.io/kubernetes/pkg/apis/apps/validation
|
||||
- k8s.io/kubernetes/pkg/apis/autoscaling
|
||||
- k8s.io/kubernetes/pkg/apis/batch
|
||||
- k8s.io/kubernetes/pkg/apis/certificates
|
||||
- k8s.io/kubernetes/pkg/apis/certificates/v1
|
||||
- k8s.io/kubernetes/pkg/apis/core
|
||||
- k8s.io/kubernetes/pkg/apis/core/helper
|
||||
- k8s.io/kubernetes/pkg/apis/core/install
|
||||
- k8s.io/kubernetes/pkg/apis/core/pods
|
||||
- k8s.io/kubernetes/pkg/apis/core/v1
|
||||
- k8s.io/kubernetes/pkg/apis/core/v1/helper
|
||||
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
|
||||
- k8s.io/kubernetes/pkg/apis/core/validation
|
||||
- k8s.io/kubernetes/pkg/apis/extensions
|
||||
- k8s.io/kubernetes/pkg/apis/networking
|
||||
- k8s.io/kubernetes/pkg/apis/policy
|
||||
- k8s.io/kubernetes/pkg/apis/policy/validation
|
||||
- k8s.io/kubernetes/pkg/apis/scheduling
|
||||
- k8s.io/kubernetes/pkg/apis/storage/v1/util
|
||||
- k8s.io/kubernetes/pkg/capabilities
|
||||
- k8s.io/kubernetes/pkg/client/conditions
|
||||
- k8s.io/kubernetes/pkg/cloudprovider/providers
|
||||
- k8s.io/kubernetes/pkg/controller
|
||||
- k8s.io/kubernetes/pkg/controller/deployment/util
|
||||
- k8s.io/kubernetes/pkg/controller/nodelifecycle
|
||||
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
|
||||
- k8s.io/kubernetes/pkg/controller/service
|
||||
- k8s.io/kubernetes/pkg/controller/util/node
|
||||
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
|
||||
- k8s.io/kubernetes/pkg/controller/volume/scheduling
|
||||
- k8s.io/kubernetes/pkg/credentialprovider
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/aws
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/azure
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/gcp
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/secrets
|
||||
- k8s.io/kubernetes/pkg/features
|
||||
- k8s.io/kubernetes/pkg/fieldpath
|
||||
- k8s.io/kubernetes/pkg/kubectl
|
||||
- k8s.io/kubernetes/pkg/kubectl/apps
|
||||
- k8s.io/kubernetes/pkg/kubectl/describe
|
||||
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
|
||||
- k8s.io/kubernetes/pkg/kubectl/scheme
|
||||
- k8s.io/kubernetes/pkg/kubectl/util
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/certificate
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/deployment
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/event
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/podutils
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/qos
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/rbac
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/resource
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/slice
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/storage
|
||||
- k8s.io/kubernetes/pkg/kubelet
|
||||
- k8s.io/kubernetes/pkg/kubelet/apis
|
||||
- k8s.io/kubernetes/pkg/kubelet/apis/config
|
||||
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
|
||||
- k8s.io/kubernetes/pkg/kubelet/cadvisor
|
||||
- k8s.io/kubernetes/pkg/kubelet/certificate
|
||||
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpoint
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
|
||||
- k8s.io/kubernetes/pkg/kubelet/cloudresource
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/util
|
||||
- k8s.io/kubernetes/pkg/kubelet/config
|
||||
- k8s.io/kubernetes/pkg/kubelet/configmap
|
||||
- k8s.io/kubernetes/pkg/kubelet/container
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/cm
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/remote
|
||||
- k8s.io/kubernetes/pkg/kubelet/envvars
|
||||
- k8s.io/kubernetes/pkg/kubelet/eviction
|
||||
- k8s.io/kubernetes/pkg/kubelet/eviction/api
|
||||
- k8s.io/kubernetes/pkg/kubelet/events
|
||||
- k8s.io/kubernetes/pkg/kubelet/images
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
|
||||
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
|
||||
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
|
||||
- k8s.io/kubernetes/pkg/kubelet/leaky
|
||||
- k8s.io/kubernetes/pkg/kubelet/lifecycle
|
||||
- k8s.io/kubernetes/pkg/kubelet/logs
|
||||
- k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/network/dns
|
||||
- k8s.io/kubernetes/pkg/kubelet/nodelease
|
||||
- k8s.io/kubernetes/pkg/kubelet/nodestatus
|
||||
- k8s.io/kubernetes/pkg/kubelet/oom
|
||||
- k8s.io/kubernetes/pkg/kubelet/pleg
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
|
||||
- k8s.io/kubernetes/pkg/kubelet/pod
|
||||
- k8s.io/kubernetes/pkg/kubelet/preemption
|
||||
- k8s.io/kubernetes/pkg/kubelet/prober
|
||||
- k8s.io/kubernetes/pkg/kubelet/prober/results
|
||||
- k8s.io/kubernetes/pkg/kubelet/qos
|
||||
- k8s.io/kubernetes/pkg/kubelet/remote
|
||||
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
|
||||
- k8s.io/kubernetes/pkg/kubelet/server
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/portforward
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/stats
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/streaming
|
||||
- k8s.io/kubernetes/pkg/kubelet/stats
|
||||
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
|
||||
- k8s.io/kubernetes/pkg/kubelet/status
|
||||
- k8s.io/kubernetes/pkg/kubelet/secret
|
||||
- k8s.io/kubernetes/pkg/kubelet/sysctl
|
||||
- k8s.io/kubernetes/pkg/kubelet/types
|
||||
- k8s.io/kubernetes/pkg/kubelet/token
|
||||
- k8s.io/kubernetes/pkg/kubelet/util
|
||||
- k8s.io/kubernetes/pkg/kubelet/util/format
|
||||
- k8s.io/kubernetes/pkg/kubelet/util/manager
|
||||
- k8s.io/kubernetes/pkg/kubelet/util/store
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
|
||||
- k8s.io/kubernetes/pkg/kubemark
|
||||
- k8s.io/kubernetes/pkg/cluster/ports
|
||||
- k8s.io/kubernetes/pkg/probe
|
||||
- k8s.io/kubernetes/pkg/probe/exec
|
||||
- k8s.io/kubernetes/pkg/probe/http
|
||||
- k8s.io/kubernetes/pkg/probe/tcp
|
||||
- k8s.io/kubernetes/pkg/proxy
|
||||
- k8s.io/kubernetes/pkg/proxy/apis
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
|
||||
- k8s.io/kubernetes/pkg/proxy/config
|
||||
- k8s.io/kubernetes/pkg/proxy/healthcheck
|
||||
- k8s.io/kubernetes/pkg/proxy/iptables
|
||||
- k8s.io/kubernetes/pkg/proxy/ipvs
|
||||
- k8s.io/kubernetes/pkg/proxy/metaproxier
|
||||
- k8s.io/kubernetes/pkg/proxy/metrics
|
||||
- k8s.io/kubernetes/pkg/proxy/userspace
|
||||
- k8s.io/kubernetes/pkg/proxy/util
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/allocator
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
|
||||
- k8s.io/kubernetes/pkg/scheduler/api
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
|
||||
- k8s.io/kubernetes/pkg/scheduler/listers
|
||||
- k8s.io/kubernetes/pkg/scheduler/metrics
|
||||
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
|
||||
- k8s.io/kubernetes/pkg/scheduler/util
|
||||
- k8s.io/kubernetes/pkg/scheduler/volumebinder
|
||||
- k8s.io/kubernetes/pkg/security/apparmor
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
|
||||
- k8s.io/kubernetes/pkg/securitycontext
|
||||
- k8s.io/kubernetes/pkg/serviceaccount
|
||||
- k8s.io/kubernetes/pkg/util/async
|
||||
- k8s.io/kubernetes/pkg/util/bandwidth
|
||||
- k8s.io/kubernetes/pkg/util/config
|
||||
- k8s.io/kubernetes/pkg/util/configz
|
||||
- k8s.io/kubernetes/pkg/util/conntrack
|
||||
- k8s.io/kubernetes/pkg/util/env
|
||||
- k8s.io/kubernetes/pkg/util/filesystem
|
||||
- k8s.io/kubernetes/pkg/util/flag
|
||||
- k8s.io/kubernetes/pkg/util/flock
|
||||
- k8s.io/kubernetes/pkg/util/goroutinemap
|
||||
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
|
||||
- k8s.io/kubernetes/pkg/util/hash
|
||||
- k8s.io/kubernetes/pkg/util/ipset
|
||||
- k8s.io/kubernetes/pkg/util/iptables
|
||||
- k8s.io/kubernetes/pkg/util/ipvs
|
||||
- k8s.io/kubernetes/pkg/util/labels
|
||||
- k8s.io/kubernetes/pkg/util/node
|
||||
- k8s.io/kubernetes/pkg/util/oom
|
||||
- k8s.io/kubernetes/pkg/util/parsers
|
||||
- k8s.io/kubernetes/pkg/util/pod
|
||||
- k8s.io/kubernetes/pkg/util/procfs
|
||||
- k8s.io/kubernetes/pkg/util/removeall
|
||||
- k8s.io/kubernetes/pkg/util/resizefs
|
||||
- k8s.io/kubernetes/pkg/util/rlimit
|
||||
- k8s.io/kubernetes/pkg/util/selinux
|
||||
- k8s.io/kubernetes/pkg/util/slice
|
||||
- k8s.io/kubernetes/pkg/util/sysctl
|
||||
- k8s.io/kubernetes/pkg/util/system
|
||||
- k8s.io/kubernetes/pkg/util/tail
|
||||
- k8s.io/kubernetes/pkg/util/taints
|
||||
- k8s.io/kubernetes/pkg/volume
|
||||
- k8s.io/kubernetes/pkg/volume/util
|
||||
- k8s.io/kubernetes/pkg/volume/util/fs
|
||||
- k8s.io/kubernetes/pkg/volume/util/fsquota
|
||||
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
|
||||
- k8s.io/kubernetes/pkg/volume/util/subpath
|
||||
- k8s.io/kubernetes/pkg/volume/util/types
|
||||
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
|
||||
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
|
||||
- k8s.io/kubernetes/pkg/apis/authentication
|
||||
- k8s.io/kubernetes/pkg/apis/authentication/v1
|
||||
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
|
||||
- k8s.io/kubernetes/pkg/apis/storage/v1
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/cache
|
||||
- selectorRegexp: k8s[.]io/kubernetes/test/
|
||||
allowedPrefixes:
|
||||
- k8s.io/kubernetes/test/e2e/framework
|
||||
- k8s.io/kubernetes/test/e2e/framework/auth
|
||||
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
|
||||
- k8s.io/kubernetes/test/e2e/framework/kubectl
|
||||
- k8s.io/kubernetes/test/e2e/framework/log
|
||||
- k8s.io/kubernetes/test/e2e/framework/metrics
|
||||
- k8s.io/kubernetes/test/e2e/framework/network
|
||||
- k8s.io/kubernetes/test/e2e/framework/node
|
||||
- k8s.io/kubernetes/test/e2e/framework/pod
|
||||
- k8s.io/kubernetes/test/e2e/framework/rc
|
||||
- k8s.io/kubernetes/test/e2e/framework/resource
|
||||
- k8s.io/kubernetes/test/e2e/framework/service
|
||||
- k8s.io/kubernetes/test/e2e/framework/ssh
|
||||
- k8s.io/kubernetes/test/e2e/framework/testfiles
|
||||
- k8s.io/kubernetes/test/e2e/framework/websocket
|
||||
- k8s.io/kubernetes/test/e2e/manifest
|
||||
- k8s.io/kubernetes/test/e2e/perftype
|
||||
- k8s.io/kubernetes/test/e2e/storage/utils
|
||||
- k8s.io/kubernetes/test/e2e/system
|
||||
- k8s.io/kubernetes/test/utils
|
||||
- k8s.io/kubernetes/test/utils/image
|
||||
# TODO: why is this here?
|
||||
- selectorRegexp: k8s[.]io/kubernetes/third_party/
|
||||
allowedPrefixes:
|
||||
- k8s.io/kubernetes/third_party/forked/golang/expansion
|
||||
rules:
|
||||
- selectorRegexp: k8s[.]io/kubernetes/pkg/
|
||||
allowedPrefixes:
|
||||
- k8s.io/kubernetes/pkg/api/legacyscheme
|
||||
- k8s.io/kubernetes/pkg/api/service
|
||||
- k8s.io/kubernetes/pkg/api/v1/pod
|
||||
- k8s.io/kubernetes/pkg/api/v1/resource
|
||||
- k8s.io/kubernetes/pkg/api/v1/service
|
||||
- k8s.io/kubernetes/pkg/api/pod
|
||||
- k8s.io/kubernetes/pkg/apis/apps
|
||||
- k8s.io/kubernetes/pkg/apis/apps/validation
|
||||
- k8s.io/kubernetes/pkg/apis/autoscaling
|
||||
- k8s.io/kubernetes/pkg/apis/batch
|
||||
- k8s.io/kubernetes/pkg/apis/certificates
|
||||
- k8s.io/kubernetes/pkg/apis/certificates/v1
|
||||
- k8s.io/kubernetes/pkg/apis/core
|
||||
- k8s.io/kubernetes/pkg/apis/core/helper
|
||||
- k8s.io/kubernetes/pkg/apis/core/install
|
||||
- k8s.io/kubernetes/pkg/apis/core/pods
|
||||
- k8s.io/kubernetes/pkg/apis/core/v1
|
||||
- k8s.io/kubernetes/pkg/apis/core/v1/helper
|
||||
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
|
||||
- k8s.io/kubernetes/pkg/apis/core/validation
|
||||
- k8s.io/kubernetes/pkg/apis/extensions
|
||||
- k8s.io/kubernetes/pkg/apis/networking
|
||||
- k8s.io/kubernetes/pkg/apis/policy
|
||||
- k8s.io/kubernetes/pkg/apis/policy/validation
|
||||
- k8s.io/kubernetes/pkg/apis/scheduling
|
||||
- k8s.io/kubernetes/pkg/apis/storage/v1/util
|
||||
- k8s.io/kubernetes/pkg/capabilities
|
||||
- k8s.io/kubernetes/pkg/client/conditions
|
||||
- k8s.io/kubernetes/pkg/cloudprovider/providers
|
||||
- k8s.io/kubernetes/pkg/controller
|
||||
- k8s.io/kubernetes/pkg/controller/deployment/util
|
||||
- k8s.io/kubernetes/pkg/controller/nodelifecycle
|
||||
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
|
||||
- k8s.io/kubernetes/pkg/controller/service
|
||||
- k8s.io/kubernetes/pkg/controller/util/node
|
||||
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
|
||||
- k8s.io/kubernetes/pkg/controller/volume/scheduling
|
||||
- k8s.io/kubernetes/pkg/credentialprovider
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/aws
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/azure
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/gcp
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/secrets
|
||||
- k8s.io/kubernetes/pkg/features
|
||||
- k8s.io/kubernetes/pkg/fieldpath
|
||||
- k8s.io/kubernetes/pkg/kubectl
|
||||
- k8s.io/kubernetes/pkg/kubectl/apps
|
||||
- k8s.io/kubernetes/pkg/kubectl/describe
|
||||
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
|
||||
- k8s.io/kubernetes/pkg/kubectl/scheme
|
||||
- k8s.io/kubernetes/pkg/kubectl/util
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/certificate
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/deployment
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/event
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/podutils
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/qos
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/rbac
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/resource
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/slice
|
||||
- k8s.io/kubernetes/pkg/kubectl/util/storage
|
||||
- k8s.io/kubernetes/pkg/kubelet
|
||||
- k8s.io/kubernetes/pkg/kubelet/apis
|
||||
- k8s.io/kubernetes/pkg/kubelet/apis/config
|
||||
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
|
||||
- k8s.io/kubernetes/pkg/kubelet/cadvisor
|
||||
- k8s.io/kubernetes/pkg/kubelet/certificate
|
||||
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpoint
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
|
||||
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
|
||||
- k8s.io/kubernetes/pkg/kubelet/cloudresource
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
|
||||
- k8s.io/kubernetes/pkg/kubelet/cm/util
|
||||
- k8s.io/kubernetes/pkg/kubelet/config
|
||||
- k8s.io/kubernetes/pkg/kubelet/configmap
|
||||
- k8s.io/kubernetes/pkg/kubelet/container
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/cm
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/dockershim/remote
|
||||
- k8s.io/kubernetes/pkg/kubelet/envvars
|
||||
- k8s.io/kubernetes/pkg/kubelet/eviction
|
||||
- k8s.io/kubernetes/pkg/kubelet/eviction/api
|
||||
- k8s.io/kubernetes/pkg/kubelet/events
|
||||
- k8s.io/kubernetes/pkg/kubelet/images
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
|
||||
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
|
||||
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
|
||||
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
|
||||
- k8s.io/kubernetes/pkg/kubelet/leaky
|
||||
- k8s.io/kubernetes/pkg/kubelet/lifecycle
|
||||
- k8s.io/kubernetes/pkg/kubelet/logs
|
||||
- k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/network/dns
|
||||
- k8s.io/kubernetes/pkg/kubelet/nodelease
|
||||
- k8s.io/kubernetes/pkg/kubelet/nodestatus
|
||||
- k8s.io/kubernetes/pkg/kubelet/oom
|
||||
- k8s.io/kubernetes/pkg/kubelet/pleg
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
|
||||
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
|
||||
- k8s.io/kubernetes/pkg/kubelet/pod
|
||||
- k8s.io/kubernetes/pkg/kubelet/preemption
|
||||
- k8s.io/kubernetes/pkg/kubelet/prober
|
||||
- k8s.io/kubernetes/pkg/kubelet/prober/results
|
||||
- k8s.io/kubernetes/pkg/kubelet/qos
|
||||
- k8s.io/kubernetes/pkg/kubelet/remote
|
||||
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
|
||||
- k8s.io/kubernetes/pkg/kubelet/server
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/portforward
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/stats
|
||||
- k8s.io/kubernetes/pkg/kubelet/server/streaming
|
||||
- k8s.io/kubernetes/pkg/kubelet/stats
|
||||
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
|
||||
- k8s.io/kubernetes/pkg/kubelet/status
|
||||
- k8s.io/kubernetes/pkg/kubelet/secret
|
||||
- k8s.io/kubernetes/pkg/kubelet/sysctl
|
||||
- k8s.io/kubernetes/pkg/kubelet/types
|
||||
- k8s.io/kubernetes/pkg/kubelet/token
|
||||
- k8s.io/kubernetes/pkg/kubelet/util
|
||||
- k8s.io/kubernetes/pkg/kubelet/util/format
|
||||
- k8s.io/kubernetes/pkg/kubelet/util/manager
|
||||
- k8s.io/kubernetes/pkg/kubelet/util/store
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
|
||||
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
|
||||
- k8s.io/kubernetes/pkg/kubemark
|
||||
- k8s.io/kubernetes/pkg/cluster/ports
|
||||
- k8s.io/kubernetes/pkg/probe
|
||||
- k8s.io/kubernetes/pkg/probe/exec
|
||||
- k8s.io/kubernetes/pkg/probe/http
|
||||
- k8s.io/kubernetes/pkg/probe/tcp
|
||||
- k8s.io/kubernetes/pkg/proxy
|
||||
- k8s.io/kubernetes/pkg/proxy/apis
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
|
||||
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
|
||||
- k8s.io/kubernetes/pkg/proxy/config
|
||||
- k8s.io/kubernetes/pkg/proxy/healthcheck
|
||||
- k8s.io/kubernetes/pkg/proxy/iptables
|
||||
- k8s.io/kubernetes/pkg/proxy/ipvs
|
||||
- k8s.io/kubernetes/pkg/proxy/metaproxier
|
||||
- k8s.io/kubernetes/pkg/proxy/metrics
|
||||
- k8s.io/kubernetes/pkg/proxy/userspace
|
||||
- k8s.io/kubernetes/pkg/proxy/util
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/allocator
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
|
||||
- k8s.io/kubernetes/pkg/scheduler/api
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
|
||||
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/heap
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/queue
|
||||
- k8s.io/kubernetes/pkg/scheduler/listers
|
||||
- k8s.io/kubernetes/pkg/scheduler/metrics
|
||||
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
|
||||
- k8s.io/kubernetes/pkg/scheduler/util
|
||||
- k8s.io/kubernetes/pkg/scheduler/volumebinder
|
||||
- k8s.io/kubernetes/pkg/security/apparmor
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
|
||||
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
|
||||
- k8s.io/kubernetes/pkg/securitycontext
|
||||
- k8s.io/kubernetes/pkg/serviceaccount
|
||||
- k8s.io/kubernetes/pkg/util/async
|
||||
- k8s.io/kubernetes/pkg/util/bandwidth
|
||||
- k8s.io/kubernetes/pkg/util/config
|
||||
- k8s.io/kubernetes/pkg/util/configz
|
||||
- k8s.io/kubernetes/pkg/util/conntrack
|
||||
- k8s.io/kubernetes/pkg/util/env
|
||||
- k8s.io/kubernetes/pkg/util/filesystem
|
||||
- k8s.io/kubernetes/pkg/util/flag
|
||||
- k8s.io/kubernetes/pkg/util/flock
|
||||
- k8s.io/kubernetes/pkg/util/goroutinemap
|
||||
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
|
||||
- k8s.io/kubernetes/pkg/util/hash
|
||||
- k8s.io/kubernetes/pkg/util/ipset
|
||||
- k8s.io/kubernetes/pkg/util/iptables
|
||||
- k8s.io/kubernetes/pkg/util/ipvs
|
||||
- k8s.io/kubernetes/pkg/util/labels
|
||||
- k8s.io/kubernetes/pkg/util/node
|
||||
- k8s.io/kubernetes/pkg/util/oom
|
||||
- k8s.io/kubernetes/pkg/util/parsers
|
||||
- k8s.io/kubernetes/pkg/util/pod
|
||||
- k8s.io/kubernetes/pkg/util/procfs
|
||||
- k8s.io/kubernetes/pkg/util/removeall
|
||||
- k8s.io/kubernetes/pkg/util/resizefs
|
||||
- k8s.io/kubernetes/pkg/util/rlimit
|
||||
- k8s.io/kubernetes/pkg/util/selinux
|
||||
- k8s.io/kubernetes/pkg/util/slice
|
||||
- k8s.io/kubernetes/pkg/util/sysctl
|
||||
- k8s.io/kubernetes/pkg/util/system
|
||||
- k8s.io/kubernetes/pkg/util/tail
|
||||
- k8s.io/kubernetes/pkg/util/taints
|
||||
- k8s.io/kubernetes/pkg/volume
|
||||
- k8s.io/kubernetes/pkg/volume/util
|
||||
- k8s.io/kubernetes/pkg/volume/util/fs
|
||||
- k8s.io/kubernetes/pkg/volume/util/fsquota
|
||||
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
|
||||
- k8s.io/kubernetes/pkg/volume/util/subpath
|
||||
- k8s.io/kubernetes/pkg/volume/util/types
|
||||
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
|
||||
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
|
||||
- k8s.io/kubernetes/pkg/apis/authentication
|
||||
- k8s.io/kubernetes/pkg/apis/authentication/v1
|
||||
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
|
||||
- k8s.io/kubernetes/pkg/apis/storage/v1
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/cache
|
||||
- selectorRegexp: k8s[.]io/kubernetes/test/
|
||||
allowedPrefixes:
|
||||
- k8s.io/kubernetes/test/e2e/framework
|
||||
- k8s.io/kubernetes/test/e2e/framework/auth
|
||||
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
|
||||
- k8s.io/kubernetes/test/e2e/framework/kubectl
|
||||
- k8s.io/kubernetes/test/e2e/framework/log
|
||||
- k8s.io/kubernetes/test/e2e/framework/metrics
|
||||
- k8s.io/kubernetes/test/e2e/framework/network
|
||||
- k8s.io/kubernetes/test/e2e/framework/node
|
||||
- k8s.io/kubernetes/test/e2e/framework/pod
|
||||
- k8s.io/kubernetes/test/e2e/framework/rc
|
||||
- k8s.io/kubernetes/test/e2e/framework/resource
|
||||
- k8s.io/kubernetes/test/e2e/framework/service
|
||||
- k8s.io/kubernetes/test/e2e/framework/ssh
|
||||
- k8s.io/kubernetes/test/e2e/framework/testfiles
|
||||
- k8s.io/kubernetes/test/e2e/framework/websocket
|
||||
- k8s.io/kubernetes/test/e2e/manifest
|
||||
- k8s.io/kubernetes/test/e2e/perftype
|
||||
- k8s.io/kubernetes/test/e2e/storage/utils
|
||||
- k8s.io/kubernetes/test/e2e/system
|
||||
- k8s.io/kubernetes/test/utils
|
||||
- k8s.io/kubernetes/test/utils/image
|
||||
# TODO: why is this here?
|
||||
- selectorRegexp: k8s[.]io/kubernetes/third_party/
|
||||
allowedPrefixes:
|
||||
- k8s.io/kubernetes/third_party/forked/golang/expansion
|
||||
|
146
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
146
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
@ -1,146 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cleanup.go",
|
||||
"exec_util.go",
|
||||
"expect.go",
|
||||
"flake_reporting_util.go",
|
||||
"framework.go",
|
||||
"log.go",
|
||||
"log_size_monitoring.go",
|
||||
"nodes_util.go",
|
||||
"pods.go",
|
||||
"ports.go",
|
||||
"provider.go",
|
||||
"psp.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"size.go",
|
||||
"test_context.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
|
||||
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
|
||||
"//staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/kubectl:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["log_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/framework/auth:all-srcs",
|
||||
"//test/e2e/framework/autoscaling:all-srcs",
|
||||
"//test/e2e/framework/config:all-srcs",
|
||||
"//test/e2e/framework/deployment:all-srcs",
|
||||
"//test/e2e/framework/endpoints:all-srcs",
|
||||
"//test/e2e/framework/endpointslice:all-srcs",
|
||||
"//test/e2e/framework/events:all-srcs",
|
||||
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||
"//test/e2e/framework/gpu:all-srcs",
|
||||
"//test/e2e/framework/ingress:all-srcs",
|
||||
"//test/e2e/framework/job:all-srcs",
|
||||
"//test/e2e/framework/kubectl:all-srcs",
|
||||
"//test/e2e/framework/kubelet:all-srcs",
|
||||
"//test/e2e/framework/kubesystem:all-srcs",
|
||||
"//test/e2e/framework/log:all-srcs",
|
||||
"//test/e2e/framework/manifest:all-srcs",
|
||||
"//test/e2e/framework/metrics:all-srcs",
|
||||
"//test/e2e/framework/network:all-srcs",
|
||||
"//test/e2e/framework/node:all-srcs",
|
||||
"//test/e2e/framework/perf:all-srcs",
|
||||
"//test/e2e/framework/pod:all-srcs",
|
||||
"//test/e2e/framework/providers/aws:all-srcs",
|
||||
"//test/e2e/framework/providers/azure:all-srcs",
|
||||
"//test/e2e/framework/providers/gce:all-srcs",
|
||||
"//test/e2e/framework/providers/kubemark:all-srcs",
|
||||
"//test/e2e/framework/providers/openstack:all-srcs",
|
||||
"//test/e2e/framework/providers/vsphere:all-srcs",
|
||||
"//test/e2e/framework/pv:all-srcs",
|
||||
"//test/e2e/framework/rc:all-srcs",
|
||||
"//test/e2e/framework/replicaset:all-srcs",
|
||||
"//test/e2e/framework/resource:all-srcs",
|
||||
"//test/e2e/framework/security:all-srcs",
|
||||
"//test/e2e/framework/service:all-srcs",
|
||||
"//test/e2e/framework/skipper:all-srcs",
|
||||
"//test/e2e/framework/ssh:all-srcs",
|
||||
"//test/e2e/framework/statefulset:all-srcs",
|
||||
"//test/e2e/framework/testfiles:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
"//test/e2e/framework/volume:all-srcs",
|
||||
"//test/e2e/framework/websocket:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
33
vendor/k8s.io/kubernetes/test/e2e/framework/auth/BUILD
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e/framework/auth/BUILD
generated
vendored
@ -1,33 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/auth",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/config/BUILD
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/config/BUILD
generated
vendored
@ -1,32 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["config.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/config",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["config_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
21
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -119,6 +119,9 @@ type Framework struct {
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest e2emetrics.Collection
|
||||
|
||||
// Timeouts contains the custom timeouts used during the test execution.
|
||||
Timeouts *TimeoutContext
|
||||
}
|
||||
|
||||
// AfterEachActionFunc is a function that can be called after each test
|
||||
@ -138,6 +141,13 @@ type Options struct {
|
||||
GroupVersion *schema.GroupVersion
|
||||
}
|
||||
|
||||
// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts.
|
||||
func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework {
|
||||
f := NewDefaultFramework(baseName)
|
||||
f.Timeouts = timeouts
|
||||
return f
|
||||
}
|
||||
|
||||
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
@ -155,6 +165,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
Timeouts: NewTimeoutContextWithDefaults(),
|
||||
}
|
||||
|
||||
f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) {
|
||||
@ -202,10 +213,6 @@ func (f *Framework) BeforeEach() {
|
||||
ExpectNoError(err)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
// node.k8s.io is based on CRD, which is served only as JSON
|
||||
jsonConfig := config
|
||||
jsonConfig.ContentType = "application/json"
|
||||
ExpectNoError(err)
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
@ -618,12 +625,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
return nil
|
||||
}
|
||||
|
||||
// KubeDescribe is wrapper function for ginkgo describe. Adds namespacing.
|
||||
// TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401.
|
||||
func KubeDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[k8s.io] "+text, body)
|
||||
}
|
||||
|
||||
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
|
||||
func ConformanceIt(text string, body interface{}, timeout ...float64) bool {
|
||||
return ginkgo.It(text+" [Conformance]", body, timeout...)
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/BUILD
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/BUILD
generated
vendored
@ -1,26 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["wrapper.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper",
|
||||
deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/BUILD
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/BUILD
generated
vendored
@ -1,32 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["kubectl_utils.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/kubectl",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
26
vendor/k8s.io/kubernetes/test/e2e/framework/log/BUILD
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/framework/log/BUILD
generated
vendored
@ -1,26 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["logger.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/log",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
49
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
@ -1,49 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api.go",
|
||||
"api_server_metrics.go",
|
||||
"cluster_autoscaler_metrics.go",
|
||||
"controller_manager_metrics.go",
|
||||
"e2e_metrics.go",
|
||||
"interesting_metrics.go",
|
||||
"kubelet_metrics.go",
|
||||
"latencies.go",
|
||||
"metrics_grabber.go",
|
||||
"pod.go",
|
||||
"scheduler_metrics.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
60
vendor/k8s.io/kubernetes/test/e2e/framework/node/BUILD
generated
vendored
60
vendor/k8s.io/kubernetes/test/e2e/framework/node/BUILD
generated
vendored
@ -1,60 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"resource.go",
|
||||
"runtimeclass.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["wait_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -584,6 +584,14 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
|
||||
return podLabels
|
||||
}
|
||||
|
||||
// RemoveTaintsOffNode removes a list of taints from the given node
|
||||
// It is simply a helper wrapper for RemoveTaintOffNode
|
||||
func RemoveTaintsOffNode(c clientset.Interface, nodeName string, taints []v1.Taint) {
|
||||
for _, taint := range taints {
|
||||
RemoveTaintOffNode(c, nodeName, taint)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTaintOffNode removes the given taint from the given node.
|
||||
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
|
||||
err := removeNodeTaint(c, nodeName, nil, &taint)
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -202,6 +202,9 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
|
||||
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
|
||||
attempt := 0
|
||||
return func() (bool, error) {
|
||||
if allowedNotReadyNodes == -1 {
|
||||
return true, nil
|
||||
}
|
||||
attempt++
|
||||
var nodesNotReadyYet []v1.Node
|
||||
opts := metav1.ListOptions{
|
||||
|
57
vendor/k8s.io/kubernetes/test/e2e/framework/pod/BUILD
generated
vendored
57
vendor/k8s.io/kubernetes/test/e2e/framework/pod/BUILD
generated
vendored
@ -1,57 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"create.go",
|
||||
"delete.go",
|
||||
"node_selection.go",
|
||||
"resource.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/pod",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["resource_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
74
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
74
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -145,13 +145,10 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
Name: "write-pod",
|
||||
Image: GetDefaultTestImage(),
|
||||
Command: GenerateScriptCmd(command),
|
||||
SecurityContext: GenerateContainerSecurityContext(isPrivileged),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
@ -187,10 +184,6 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
return &i
|
||||
}(1000)
|
||||
}
|
||||
image := imageutils.BusyBox
|
||||
if podConfig.ImageID != imageutils.None {
|
||||
image = podConfig.ImageID
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
@ -200,28 +193,34 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
Name: podName,
|
||||
Namespace: podConfig.NS,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
HostIPC: podConfig.HostIPC,
|
||||
HostPID: podConfig.HostPID,
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
FSGroup: podConfig.FsGroup,
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: imageutils.GetE2EImage(image),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", podConfig.Command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &podConfig.IsPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
Spec: *MakePodSpec(podConfig),
|
||||
}
|
||||
return podSpec, nil
|
||||
}
|
||||
|
||||
// MakePodSpec returns a PodSpec definition
|
||||
func MakePodSpec(podConfig *Config) *v1.PodSpec {
|
||||
image := imageutils.BusyBox
|
||||
if podConfig.ImageID != imageutils.None {
|
||||
image = podConfig.ImageID
|
||||
}
|
||||
podSpec := &v1.PodSpec{
|
||||
HostIPC: podConfig.HostIPC,
|
||||
HostPID: podConfig.HostPID,
|
||||
SecurityContext: GeneratePodSecurityContext(podConfig.FsGroup, podConfig.SeLinuxLabel),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: GetTestImage(image),
|
||||
Command: GenerateScriptCmd(podConfig.Command),
|
||||
SecurityContext: GenerateContainerSecurityContext(podConfig.IsPrivileged),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
}
|
||||
|
||||
if podConfig.PodFSGroupChangePolicy != nil {
|
||||
podSpec.Spec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
|
||||
podSpec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
|
||||
}
|
||||
|
||||
var volumeMounts = make([]v1.VolumeMount, 0)
|
||||
@ -247,13 +246,10 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
volumeIndex++
|
||||
}
|
||||
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Spec.Volumes = volumes
|
||||
if runtime.GOOS != "windows" {
|
||||
podSpec.Spec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel
|
||||
}
|
||||
podSpec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Volumes = volumes
|
||||
|
||||
SetNodeSelection(&podSpec.Spec, podConfig.NodeSelection)
|
||||
return podSpec, nil
|
||||
SetNodeSelection(podSpec, podConfig.NodeSelection)
|
||||
return podSpec
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -321,6 +321,20 @@ func podContainerFailed(c clientset.Interface, namespace, podName string, contai
|
||||
}
|
||||
}
|
||||
|
||||
func podContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if containerIndex > len(pod.Status.ContainerStatuses)-1 {
|
||||
return false, nil
|
||||
}
|
||||
containerStatus := pod.Status.ContainerStatuses[containerIndex]
|
||||
return *containerStatus.Started, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LogPodStates logs basic info of provided pods for debugging.
|
||||
func LogPodStates(pods []v1.Pod) {
|
||||
// Find maximum widths for pod, node, and phase strings for column printing.
|
||||
|
120
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
Normal file
120
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// NodeOSDistroIs returns true if the distro is the same as `--node-os-distro`
|
||||
// the package framework/pod can't import the framework package (see #81245)
|
||||
// we need to check if the --node-os-distro=windows is set and the framework package
|
||||
// is the one that's parsing the flags, as a workaround this method is looking for the same flag again
|
||||
// TODO: replace with `framework.NodeOSDistroIs` when #81245 is complete
|
||||
func NodeOSDistroIs(distro string) bool {
|
||||
var nodeOsDistro *flag.Flag = flag.Lookup("node-os-distro")
|
||||
if nodeOsDistro != nil && nodeOsDistro.Value.String() == distro {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
if !NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", command}
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
// GetDefaultTestImage returns the default test image based on OS.
|
||||
// If the node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
// If the node OS is linux, return busybox image
|
||||
func GetDefaultTestImage() string {
|
||||
return imageutils.GetE2EImage(GetDefaultTestImageID())
|
||||
}
|
||||
|
||||
// GetDefaultTestImageID returns the default test image id based on OS.
|
||||
// If the node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
// If the node OS is linux, return busybox image
|
||||
func GetDefaultTestImageID() int {
|
||||
return GetTestImageID(imageutils.BusyBox)
|
||||
}
|
||||
|
||||
// GetTestImage returns the image name with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImage(id int) string {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
}
|
||||
return imageutils.GetE2EImage(id)
|
||||
}
|
||||
|
||||
// GetTestImageID returns the image id with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImageID(id int) int {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return imageutils.Agnhost
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// GeneratePodSecurityContext generates the corresponding pod security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
SELinuxOptions: seLinuxOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateContainerSecurityContext generates the corresponding container security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GenerateContainerSecurityContext(privileged bool) *v1.SecurityContext {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
}
|
||||
}
|
||||
|
||||
// GetLinuxLabel returns the default SELinuxLabel based on OS.
|
||||
// If the node OS is windows, it will return nil
|
||||
func GetLinuxLabel() *v1.SELinuxOptions {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
}
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -102,7 +102,14 @@ func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState stri
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
//
|
||||
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
||||
//
|
||||
// If minPods or allowedNotReadyPods are -1, this method returns immediately
|
||||
// without waiting.
|
||||
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
if minPods == -1 || allowedNotReadyPods == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
@ -203,14 +210,16 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
|
||||
var lastPodError error
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
lastPodError = err
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
||||
return err
|
||||
} else {
|
||||
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
|
||||
}
|
||||
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
|
||||
continue
|
||||
}
|
||||
// log now so that current pod info is reported before calling `condition()`
|
||||
@ -223,6 +232,10 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
|
||||
return err
|
||||
}
|
||||
}
|
||||
if apierrors.IsNotFound(lastPodError) {
|
||||
// return for compatbility with other functions testing for IsNotFound
|
||||
return lastPodError
|
||||
}
|
||||
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
|
||||
}
|
||||
|
||||
@ -245,8 +258,8 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
|
||||
})
|
||||
}
|
||||
|
||||
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
|
||||
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
|
||||
@ -350,7 +363,7 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp
|
||||
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, defaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodReadyInNamespace waits the given timeout diration for the
|
||||
// WaitTimeoutForPodReadyInNamespace waits the given timeout duration for the
|
||||
// specified pod to be ready and running.
|
||||
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace))
|
||||
@ -365,12 +378,12 @@ func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
|
||||
|
||||
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
|
||||
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
|
||||
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
|
||||
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
|
||||
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
|
||||
@ -535,3 +548,8 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
|
||||
func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string, containerIndex int, reason string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podContainerFailed(c, namespace, podName, containerIndex, reason))
|
||||
}
|
||||
|
||||
// WaitForPodContainerStarted waits for the given Pod container to start, after a successful run of the startupProbe.
|
||||
func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podContainerStarted(c, namespace, podName, containerIndex))
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@ -96,12 +96,12 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start.
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
|
||||
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
|
||||
namespace := c.f.Namespace.Name
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
|
||||
ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, PodStartTimeout))
|
||||
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
return p
|
||||
|
35
vendor/k8s.io/kubernetes/test/e2e/framework/pv/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/test/e2e/framework/pv/BUILD
generated
vendored
@ -1,35 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["pv.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/pv",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
45
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
@ -19,9 +19,10 @@ package framework
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -38,18 +39,6 @@ const (
|
||||
pdRetryTimeout = 5 * time.Minute
|
||||
pdRetryPollTime = 5 * time.Second
|
||||
|
||||
// PVBindingTimeout is how long PVs have to become bound.
|
||||
PVBindingTimeout = 3 * time.Minute
|
||||
|
||||
// ClaimBindingTimeout is how long claims have to become bound.
|
||||
ClaimBindingTimeout = 3 * time.Minute
|
||||
|
||||
// PVReclaimingTimeout is how long PVs have to beome reclaimed.
|
||||
PVReclaimingTimeout = 3 * time.Minute
|
||||
|
||||
// PVDeletingTimeout is how long PVs have to become deleted.
|
||||
PVDeletingTimeout = 3 * time.Minute
|
||||
|
||||
// VolumeSelectorKey is the key for volume selector.
|
||||
VolumeSelectorKey = "e2e-pv-pool"
|
||||
|
||||
@ -223,7 +212,7 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
|
||||
// DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV
|
||||
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
|
||||
// phase value to expect for the pv bound to the to-be-deleted claim.
|
||||
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
pvname := pvc.Spec.VolumeName
|
||||
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||
@ -233,7 +222,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
|
||||
// Wait for the PV's phase to return to be `expectPVPhase`
|
||||
framework.Logf("Waiting for reclaim process to complete.")
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
|
||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
|
||||
}
|
||||
@ -266,7 +255,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
|
||||
// Available, Bound).
|
||||
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
|
||||
// available pvs.
|
||||
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
var boundPVs, deletedPVCs int
|
||||
|
||||
for pvName := range pvols {
|
||||
@ -287,7 +276,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
|
||||
// get the pvc for the delete call below
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
|
||||
if err = DeletePVCandValidatePV(c, timeouts, ns, pvc, pv, expectPVPhase); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !apierrors.IsNotFound(err) {
|
||||
@ -445,17 +434,17 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
}
|
||||
|
||||
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
|
||||
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||
func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||
// Wait for newly created PVC to bind to the PV
|
||||
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
||||
// since the PVC is already bound.
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, PVBindingTimeout)
|
||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
|
||||
}
|
||||
@ -493,7 +482,7 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
|
||||
// to situations where the maximum wait times are reached several times in succession,
|
||||
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
||||
// small.
|
||||
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
|
||||
func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
|
||||
var actualBinds int
|
||||
expectedBinds := len(pvols)
|
||||
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
|
||||
@ -501,7 +490,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
}
|
||||
|
||||
for pvName := range pvols {
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
|
||||
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound)
|
||||
if err != nil && len(pvols) > len(claims) {
|
||||
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
|
||||
framework.Logf(" This may be ok since there are more pvs than pvcs")
|
||||
@ -524,7 +513,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||
}
|
||||
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, ClaimBindingTimeout)
|
||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
|
||||
}
|
||||
@ -746,7 +735,7 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
|
||||
if len(pvcNames) == 0 {
|
||||
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
|
||||
}
|
||||
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
|
||||
framework.Logf("Waiting up to timeout=%v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
phaseFoundInAllClaims := true
|
||||
for _, pvcName := range pvcNames {
|
||||
@ -876,3 +865,11 @@ func WaitForPVCFinalizer(ctx context.Context, cs clientset.Interface, name, name
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDefaultFSType returns the default fsType
|
||||
func GetDefaultFSType() string {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return "ntfs"
|
||||
}
|
||||
return "ext4"
|
||||
}
|
||||
|
38
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/BUILD
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/BUILD
generated
vendored
@ -1,38 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["skipper.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/skipper",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
17
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -46,6 +46,11 @@ import (
|
||||
// New local storage types to support local storage capacity isolation
|
||||
var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
|
||||
|
||||
var (
|
||||
downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
|
||||
execProbeTimeout featuregate.Feature = "ExecProbeTimeout"
|
||||
)
|
||||
|
||||
func skipInternalf(caller int, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
framework.Logf(msg)
|
||||
@ -137,6 +142,18 @@ func SkipUnlessLocalEphemeralStorageEnabled() {
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessDownwardAPIHugePagesEnabled() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(downwardAPIHugePages) {
|
||||
skipInternalf(1, "Only supported when %v feature is enabled", downwardAPIHugePages)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessExecProbeTimeoutEnabled() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(execProbeTimeout) {
|
||||
skipInternalf(1, "Only supported when %v feature is enabled", execProbeTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfMissingResource skips if the gvr resource is missing.
|
||||
func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/BUILD
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/BUILD
generated
vendored
@ -1,32 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ssh.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/ssh",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -217,6 +217,8 @@ type NodeTestContextType struct {
|
||||
KubeletConfig kubeletconfig.KubeletConfiguration
|
||||
// ImageDescription is the description of the image on which the test is running.
|
||||
ImageDescription string
|
||||
// RuntimeConfig is a map of API server runtime configuration values.
|
||||
RuntimeConfig map[string]string
|
||||
// SystemSpecName is the name of the system spec (e.g., gke) that's used in
|
||||
// the node e2e test. If empty, the default one (system.DefaultSpec) is
|
||||
// used. The system specs are in test/e2e_node/system/specs/.
|
||||
@ -229,7 +231,8 @@ type NodeTestContextType struct {
|
||||
type CloudConfig struct {
|
||||
APIEndpoint string
|
||||
ProjectID string
|
||||
Zone string // for multizone tests, arbitrarily chosen zone
|
||||
Zone string // for multizone tests, arbitrarily chosen zone
|
||||
Zones []string // for multizone tests, use this set of zones instead of querying the cloud provider. Must include Zone.
|
||||
Region string
|
||||
MultiZone bool
|
||||
MultiMaster bool
|
||||
@ -289,7 +292,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.LogexporterGCSPath, "logexporter-gcs-path", "", "Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.")
|
||||
flags.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
|
||||
flags.BoolVar(&TestContext.DeleteNamespaceOnFailure, "delete-namespace-on-failure", true, "If true, framework will delete test namespace on failure. Used only during test debugging.")
|
||||
flags.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If non-zero, framework will allow for that many non-ready nodes when checking for all ready nodes.")
|
||||
flags.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If greater than zero, framework will allow for that many non-ready nodes when checking for all ready nodes. If -1, no waiting will be performed for ready nodes or daemonset pods.")
|
||||
|
||||
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost))
|
||||
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
|
||||
@ -329,7 +332,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
|
||||
flags.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
|
||||
flags.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, ubuntu, gci, coreos, or custom).")
|
||||
flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, or custom).")
|
||||
flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, windows, or custom), which determines how specific tests are implemented.")
|
||||
flags.StringVar(&TestContext.NodeOSArch, "node-os-arch", "amd64", "The OS architecture of cluster VM instances (amd64, arm64, or custom).")
|
||||
flags.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.")
|
||||
|
||||
@ -339,6 +342,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&cloudConfig.APIEndpoint, "gce-api-endpoint", "", "The GCE APIEndpoint being used, if applicable")
|
||||
flags.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
|
||||
flags.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
|
||||
flags.Var(cliflag.NewStringSlice(&cloudConfig.Zones), "gce-zones", "The set of zones to use in a multi-zone test instead of querying the cloud provider.")
|
||||
flags.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
|
||||
flags.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.")
|
||||
flags.BoolVar(&cloudConfig.MultiMaster, "gce-multimaster", false, "If true, the underlying GCE/GKE cluster is assumed to be multi-master.")
|
||||
@ -352,7 +356,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
|
||||
flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
|
||||
flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.")
|
||||
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
|
||||
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.")
|
||||
flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
|
||||
flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
|
||||
@ -433,7 +437,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
kubeConfig := createKubeConfig(clusterConfig)
|
||||
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
|
||||
t.KubeConfig = tempFile.Name()
|
||||
klog.Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
|
||||
klog.V(4).Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
|
||||
}
|
||||
}
|
||||
if len(t.KubeConfig) == 0 {
|
||||
@ -454,7 +458,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100
|
||||
}
|
||||
|
||||
klog.Infof("Tolerating taints %q when considering if nodes are ready", TestContext.NonblockingTaints)
|
||||
klog.V(4).Infof("Tolerating taints %q when considering if nodes are ready", TestContext.NonblockingTaints)
|
||||
|
||||
// Make sure that all test runs have a valid TestContext.CloudConfig.Provider.
|
||||
// TODO: whether and how long this code is needed is getting discussed
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/BUILD
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/BUILD
generated
vendored
@ -1,22 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["testfiles.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/testfiles",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
99
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// Default timeouts to be used in TimeoutContext
|
||||
podStartTimeout = 5 * time.Minute
|
||||
podStartShortTimeout = 2 * time.Minute
|
||||
podStartSlowTimeout = 15 * time.Minute
|
||||
podDeleteTimeout = 5 * time.Minute
|
||||
claimProvisionTimeout = 5 * time.Minute
|
||||
claimProvisionShortTimeout = 1 * time.Minute
|
||||
claimBoundTimeout = 3 * time.Minute
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
snapshotDeleteTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// TimeoutContext contains timeout settings for several actions.
|
||||
type TimeoutContext struct {
|
||||
// PodStart is how long to wait for the pod to be started.
|
||||
PodStart time.Duration
|
||||
|
||||
// PodStartShort is same as `PodStart`, but shorter.
|
||||
// Use it in a case-by-case basis, mostly when you are sure pod start will not be delayed.
|
||||
PodStartShort time.Duration
|
||||
|
||||
// PodStartSlow is same as `PodStart`, but longer.
|
||||
// Use it in a case-by-case basis, mostly when you are sure pod start will take longer than usual.
|
||||
PodStartSlow time.Duration
|
||||
|
||||
// PodDelete is how long to wait for the pod to be deleted.
|
||||
PodDelete time.Duration
|
||||
|
||||
// ClaimProvision is how long claims have to become dynamically provisioned.
|
||||
ClaimProvision time.Duration
|
||||
|
||||
// ClaimProvisionShort is the same as `ClaimProvision`, but shorter.
|
||||
ClaimProvisionShort time.Duration
|
||||
|
||||
// ClaimBound is how long claims have to become bound.
|
||||
ClaimBound time.Duration
|
||||
|
||||
// PVReclaim is how long PVs have to become reclaimed.
|
||||
PVReclaim time.Duration
|
||||
|
||||
// PVBound is how long PVs have to become bound.
|
||||
PVBound time.Duration
|
||||
|
||||
// PVDelete is how long PVs have to become deleted.
|
||||
PVDelete time.Duration
|
||||
|
||||
// PVDeleteSlow is the same as PVDelete, but slower.
|
||||
PVDeleteSlow time.Duration
|
||||
|
||||
// SnapshotCreate is how long for snapshot to create snapshotContent.
|
||||
SnapshotCreate time.Duration
|
||||
|
||||
// SnapshotDelete is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDelete time.Duration
|
||||
}
|
||||
|
||||
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values.
|
||||
func NewTimeoutContextWithDefaults() *TimeoutContext {
|
||||
return &TimeoutContext{
|
||||
PodStart: podStartTimeout,
|
||||
PodStartShort: podStartShortTimeout,
|
||||
PodStartSlow: podStartSlowTimeout,
|
||||
PodDelete: podDeleteTimeout,
|
||||
ClaimProvision: claimProvisionTimeout,
|
||||
ClaimProvisionShort: claimProvisionShortTimeout,
|
||||
ClaimBound: claimBoundTimeout,
|
||||
PVReclaim: pvReclaimTimeout,
|
||||
PVBound: pvBoundTimeout,
|
||||
PVDelete: pvDeleteTimeout,
|
||||
PVDeleteSlow: pvDeleteSlowTimeout,
|
||||
SnapshotCreate: snapshotCreateTimeout,
|
||||
SnapshotDelete: snapshotDeleteTimeout,
|
||||
}
|
||||
}
|
75
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
75
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -71,9 +71,23 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
|
||||
// TODO(justinsb): Avoid hardcoding this.
|
||||
awsMasterIP = "172.20.0.9"
|
||||
|
||||
// AllContainers specifies that all containers be visited
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
AllContainers = InitContainers | Containers | EphemeralContainers
|
||||
)
|
||||
|
||||
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
|
||||
const (
|
||||
// PodListTimeout is how long to wait for the pod to be listable.
|
||||
PodListTimeout = time.Minute
|
||||
|
||||
// PodStartTimeout is how long to wait for the pod to be started.
|
||||
PodStartTimeout = 5 * time.Minute
|
||||
|
||||
@ -136,16 +150,6 @@ const (
|
||||
|
||||
// SnapshotDeleteTimeout is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDeleteTimeout = 5 * time.Minute
|
||||
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
|
||||
// TODO(justinsb): Avoid hardcoding this.
|
||||
awsMasterIP = "172.20.0.9"
|
||||
|
||||
// AllContainers specifies that all containers be visited
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
AllContainers = InitContainers | Containers | EphemeralContainers
|
||||
)
|
||||
|
||||
var (
|
||||
@ -818,7 +822,7 @@ func (f *Framework) MatchContainerOutput(
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
|
||||
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
|
||||
@ -1018,10 +1022,13 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
|
||||
}
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
|
||||
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
|
||||
if TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediate(
|
||||
30*time.Second,
|
||||
timeout,
|
||||
@ -1114,11 +1121,16 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
// AllNodesReady checks whether all registered nodes are ready.
|
||||
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
|
||||
// TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
|
||||
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
||||
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
||||
// default test add-ons.
|
||||
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
|
||||
|
||||
var notReady []*v1.Node
|
||||
@ -1230,20 +1242,23 @@ func RunCmdEnv(env []string, command string, args ...string) (string, string, er
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
// getMasterAddresses returns the externalIP, internalIP and hostname fields of the master.
|
||||
// If any of these is unavailable, it is set to "".
|
||||
func getMasterAddresses(c clientset.Interface) (string, string, string) {
|
||||
var externalIP, internalIP, hostname string
|
||||
// getControlPlaneAddresses returns the externalIP, internalIP and hostname fields of control plane nodes.
|
||||
// If any of these is unavailable, empty slices are returned.
|
||||
func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []string) {
|
||||
var externalIPs, internalIPs, hostnames []string
|
||||
|
||||
// Populate the internal IP.
|
||||
// Populate the internal IPs.
|
||||
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get kubernetes endpoints: %v", err)
|
||||
}
|
||||
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
|
||||
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
|
||||
for _, subset := range eps.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
if address.IP != "" {
|
||||
internalIPs = append(internalIPs, address.IP)
|
||||
}
|
||||
}
|
||||
}
|
||||
internalIP = eps.Subsets[0].Addresses[0].IP
|
||||
|
||||
// Populate the external IP/hostname.
|
||||
hostURL, err := url.Parse(TestContext.Host)
|
||||
@ -1251,12 +1266,12 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
|
||||
Failf("Failed to parse hostname: %v", err)
|
||||
}
|
||||
if net.ParseIP(hostURL.Host) != nil {
|
||||
externalIP = hostURL.Host
|
||||
externalIPs = append(externalIPs, hostURL.Host)
|
||||
} else {
|
||||
hostname = hostURL.Host
|
||||
hostnames = append(hostnames, hostURL.Host)
|
||||
}
|
||||
|
||||
return externalIP, internalIP, hostname
|
||||
return externalIPs, internalIPs, hostnames
|
||||
}
|
||||
|
||||
// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane.
|
||||
@ -1264,16 +1279,16 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
|
||||
// e.g. internal IPs to be used (issue #56787), so that we can be
|
||||
// sure to block the control plane fully during tests.
|
||||
func GetControlPlaneAddresses(c clientset.Interface) []string {
|
||||
externalIP, internalIP, _ := getMasterAddresses(c)
|
||||
externalIPs, internalIPs, _ := getControlPlaneAddresses(c)
|
||||
|
||||
ips := sets.NewString()
|
||||
switch TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
if externalIP != "" {
|
||||
ips.Insert(externalIP)
|
||||
for _, ip := range externalIPs {
|
||||
ips.Insert(ip)
|
||||
}
|
||||
if internalIP != "" {
|
||||
ips.Insert(internalIP)
|
||||
for _, ip := range internalIPs {
|
||||
ips.Insert(ip)
|
||||
}
|
||||
case "aws":
|
||||
ips.Insert(awsMasterIP)
|
||||
|
20
vendor/k8s.io/kubernetes/test/e2e/framework/volume/OWNERS
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/test/e2e/framework/volume/OWNERS
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- saad-ali
|
||||
- rootfs
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- rootfs
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
- jeffvance
|
||||
- copejon
|
||||
- verult
|
||||
- davidz627
|
685
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
Normal file
685
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
Normal file
@ -0,0 +1,685 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test checks that various VolumeSources are working.
|
||||
*
|
||||
* There are two ways, how to test the volumes:
|
||||
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
|
||||
* The test creates a server pod, exporting simple 'index.html' file.
|
||||
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||
* and checks that the pod can see the file. It does so by importing the file
|
||||
* into web server root and loadind the index.html from it.
|
||||
*
|
||||
* These tests work only when privileged containers are allowed, exporting
|
||||
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||
* other privileged magic in the server pod.
|
||||
*
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
* be used in production.
|
||||
*
|
||||
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
|
||||
package volume
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// Kb is byte size of kilobyte
|
||||
Kb int64 = 1000
|
||||
// Mb is byte size of megabyte
|
||||
Mb int64 = 1000 * Kb
|
||||
// Gb is byte size of gigabyte
|
||||
Gb int64 = 1000 * Mb
|
||||
// Tb is byte size of terabyte
|
||||
Tb int64 = 1000 * Gb
|
||||
// KiB is byte size of kibibyte
|
||||
KiB int64 = 1024
|
||||
// MiB is byte size of mebibyte
|
||||
MiB int64 = 1024 * KiB
|
||||
// GiB is byte size of gibibyte
|
||||
GiB int64 = 1024 * MiB
|
||||
// TiB is byte size of tebibyte
|
||||
TiB int64 = 1024 * GiB
|
||||
|
||||
// VolumeServerPodStartupTimeout is a waiting period for volume server (Ceph, ...) to initialize itself.
|
||||
VolumeServerPodStartupTimeout = 3 * time.Minute
|
||||
|
||||
// PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we
|
||||
// don't tear down containers with NFS/Ceph/Gluster server too early.
|
||||
PodCleanupTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
// SizeRange encapsulates a range of sizes specified as minimum and maximum quantity strings
|
||||
// Both values are optional.
|
||||
// If size is not set, it will assume there's not limitation and it may set a very small size (E.g. 1ki)
|
||||
// as Min and set a considerable big size(E.g. 10Ei) as Max, which make it possible to calculate
|
||||
// the intersection of given intervals (if it exists)
|
||||
type SizeRange struct {
|
||||
// Max quantity specified as a string including units. E.g "3Gi".
|
||||
// If the Max size is unset, It will be assign a default valid maximum size 10Ei,
|
||||
// which is defined in test/e2e/storage/testsuites/base.go
|
||||
Max string
|
||||
// Min quantity specified as a string including units. E.g "1Gi"
|
||||
// If the Min size is unset, It will be assign a default valid minimum size 1Ki,
|
||||
// which is defined in test/e2e/storage/testsuites/base.go
|
||||
Min string
|
||||
}
|
||||
|
||||
// TestConfig is a struct for configuration of one tests. The test consist of:
|
||||
// - server pod - runs serverImage, exports ports[]
|
||||
// - client pod - does not need any special configuration
|
||||
type TestConfig struct {
|
||||
Namespace string
|
||||
// Prefix of all pods. Typically the test name.
|
||||
Prefix string
|
||||
// Name of container image for the server pod.
|
||||
ServerImage string
|
||||
// Ports to export from the server pod. TCP only.
|
||||
ServerPorts []int
|
||||
// Commands to run in the container image.
|
||||
ServerCmds []string
|
||||
// Arguments to pass to the container image.
|
||||
ServerArgs []string
|
||||
// Volumes needed to be mounted to the server container from the host
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
// if <host (source) path> is empty, mount a tmpfs emptydir
|
||||
ServerVolumes map[string]string
|
||||
// Message to wait for before starting clients
|
||||
ServerReadyMessage string
|
||||
// Use HostNetwork for the server
|
||||
ServerHostNetwork bool
|
||||
// Wait for the pod to terminate successfully
|
||||
// False indicates that the pod is long running
|
||||
WaitForCompletion bool
|
||||
// ClientNodeSelection restricts where the client pod runs on. Default is any node.
|
||||
ClientNodeSelection e2epod.NodeSelection
|
||||
}
|
||||
|
||||
// Test contains a volume to mount into a client pod and its
|
||||
// expected content.
|
||||
type Test struct {
|
||||
Volume v1.VolumeSource
|
||||
Mode v1.PersistentVolumeMode
|
||||
// Name of file to read/write in FileSystem mode
|
||||
File string
|
||||
ExpectedContent string
|
||||
}
|
||||
|
||||
// NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
|
||||
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
|
||||
ServerPorts: []int{2049},
|
||||
ServerVolumes: map[string]string{"": "/exports"},
|
||||
ServerReadyMessage: "NFS started",
|
||||
}
|
||||
if len(args) > 0 {
|
||||
config.ServerArgs = args
|
||||
}
|
||||
pod, host = CreateStorageServer(cs, config)
|
||||
if strings.Contains(host, ":") {
|
||||
host = "[" + host + "]"
|
||||
}
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
|
||||
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) {
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "gluster",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 24007,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create service for Gluster server")
|
||||
|
||||
ginkgo.By("creating Gluster endpoints")
|
||||
endpoints := &v1.Endpoints{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Endpoints",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: ip,
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: "gluster",
|
||||
Port: 24007,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
|
||||
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
|
||||
pod = startVolumeServer(cs, config)
|
||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
|
||||
ip = pod.Status.PodIP
|
||||
gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
|
||||
framework.Logf("%s server pod IP address: %s", config.Prefix, ip)
|
||||
return pod, ip
|
||||
}
|
||||
|
||||
// startVolumeServer starts a container specified by config.serverImage and exports all
|
||||
// config.serverPorts from it. The returned pod should be used to get the server
|
||||
// IP address and create appropriate VolumeSource.
|
||||
func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
portCount := len(config.ServerPorts)
|
||||
serverPodPorts := make([]v1.ContainerPort, portCount)
|
||||
|
||||
for i := 0; i < portCount; i++ {
|
||||
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
|
||||
|
||||
serverPodPorts[i] = v1.ContainerPort{
|
||||
Name: portName,
|
||||
ContainerPort: int32(config.ServerPorts[i]),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}
|
||||
}
|
||||
|
||||
volumeCount := len(config.ServerVolumes)
|
||||
volumes := make([]v1.Volume, volumeCount)
|
||||
mounts := make([]v1.VolumeMount, volumeCount)
|
||||
|
||||
i := 0
|
||||
for src, dst := range config.ServerVolumes {
|
||||
mountName := fmt.Sprintf("path%d", i)
|
||||
volumes[i].Name = mountName
|
||||
if src == "" {
|
||||
volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
|
||||
} else {
|
||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: src,
|
||||
}
|
||||
}
|
||||
|
||||
mounts[i].Name = mountName
|
||||
mounts[i].ReadOnly = false
|
||||
mounts[i].MountPath = dst
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
|
||||
ginkgo.By(fmt.Sprint("creating ", serverPodName, " pod"))
|
||||
privileged := new(bool)
|
||||
*privileged = true
|
||||
|
||||
restartPolicy := v1.RestartPolicyAlways
|
||||
if config.WaitForCompletion {
|
||||
restartPolicy = v1.RestartPolicyNever
|
||||
}
|
||||
serverPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serverPodName,
|
||||
Labels: map[string]string{
|
||||
"role": serverPodName,
|
||||
},
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
HostNetwork: config.ServerHostNetwork,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: serverPodName,
|
||||
Image: config.ServerImage,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: privileged,
|
||||
},
|
||||
Command: config.ServerCmds,
|
||||
Args: config.ServerArgs,
|
||||
Ports: serverPodPorts,
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
RestartPolicy: restartPolicy,
|
||||
},
|
||||
}
|
||||
|
||||
var pod *v1.Pod
|
||||
serverPod, err := podClient.Create(context.TODO(), serverPod, metav1.CreateOptions{})
|
||||
// ok if the server pod already exists. TODO: make this controllable by callers
|
||||
if err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
|
||||
ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
||||
serverPod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
|
||||
pod = serverPod
|
||||
} else {
|
||||
framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
if config.WaitForCompletion {
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
|
||||
framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, metav1.DeleteOptions{}))
|
||||
} else {
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod))
|
||||
if pod == nil {
|
||||
ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
||||
pod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
if config.ServerReadyMessage != "" {
|
||||
_, err := framework.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
|
||||
framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// TestServerCleanup cleans server pod.
|
||||
func TestServerCleanup(f *framework.Framework, config TestConfig) {
|
||||
ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
if config.ServerImage == "" {
|
||||
return
|
||||
}
|
||||
|
||||
err := e2epod.DeletePodWithWaitByName(f.ClientSet, config.Prefix+"-server", config.Namespace)
|
||||
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
|
||||
}
|
||||
|
||||
func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
|
||||
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
|
||||
var gracePeriod int64 = 1
|
||||
var command string
|
||||
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
command = "while true ; do sleep 2; done "
|
||||
} else {
|
||||
command = "while(1) {sleep 2}"
|
||||
}
|
||||
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
|
||||
clientPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-" + podSuffix,
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-" + podSuffix,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-" + podSuffix,
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
WorkingDir: "/opt",
|
||||
// An imperative and easily debuggable container which reads/writes vol contents for
|
||||
// us to scan in the tests or by eye.
|
||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
||||
Command: e2epod.GenerateScriptCmd(command),
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
SecurityContext: e2epod.GeneratePodSecurityContext(fsGroup, seLinuxOptions),
|
||||
Volumes: []v1.Volume{},
|
||||
},
|
||||
}
|
||||
e2epod.SetNodeSelection(&clientPod.Spec, config.ClientNodeSelection)
|
||||
|
||||
for i, test := range tests {
|
||||
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||
|
||||
// We need to make the container privileged when SELinux is enabled on the
|
||||
// host, so the test can write data to a location like /tmp. Also, due to
|
||||
// the Docker bug below, it's not currently possible to map a device with
|
||||
// a privileged container, so we don't go privileged for block volumes.
|
||||
// https://github.com/moby/moby/issues/35991
|
||||
if privileged && test.Mode == v1.PersistentVolumeBlock {
|
||||
privileged = false
|
||||
}
|
||||
clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(privileged)
|
||||
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
|
||||
Name: volumeName,
|
||||
DevicePath: fmt.Sprintf("/opt/%d", i),
|
||||
})
|
||||
} else {
|
||||
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: fmt.Sprintf("/opt/%d", i),
|
||||
})
|
||||
}
|
||||
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: test.Volume,
|
||||
})
|
||||
}
|
||||
podsNamespacer := client.CoreV1().Pods(config.Namespace)
|
||||
clientPod, err := podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if slow {
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
|
||||
} else {
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
|
||||
}
|
||||
if err != nil {
|
||||
e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name)
|
||||
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
return nil, err
|
||||
}
|
||||
return clientPod, nil
|
||||
}
|
||||
|
||||
func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
|
||||
ginkgo.By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
// Block: check content
|
||||
deviceName := fmt.Sprintf("/opt/%d", i)
|
||||
commands := generateReadBlockCmd(deviceName, len(test.ExpectedContent))
|
||||
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||
|
||||
// Check that it's a real block device
|
||||
CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
|
||||
} else {
|
||||
// Filesystem: check content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands := GenerateReadFileCmd(fileName)
|
||||
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||
|
||||
// Check that a directory has been mounted
|
||||
dirName := filepath.Dir(fileName)
|
||||
CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
|
||||
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
// Filesystem: check fsgroup
|
||||
if fsGroup != nil {
|
||||
ginkgo.By("Checking fsGroup is correct.")
|
||||
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
|
||||
}
|
||||
|
||||
// Filesystem: check fsType
|
||||
if fsType != "" {
|
||||
ginkgo.By("Checking fsType is correct.")
|
||||
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer())
|
||||
// and check that the pod sees expected data, e.g. from the server pod.
|
||||
// Multiple Tests can be specified to mount multiple volumes to a single
|
||||
// pod.
|
||||
// Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
|
||||
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.podStartTimeout.
|
||||
// It should be used for cases where "regular" dynamic provisioning of an empty volume is requested.
|
||||
func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
testVolumeClient(f, config, fsGroup, fsType, tests, false)
|
||||
}
|
||||
|
||||
// TestVolumeClientSlow is the same as TestVolumeClient except for its timeout.
|
||||
// Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
|
||||
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.slowPodStartTimeout.
|
||||
// It should be used for cases where "special" dynamic provisioning is requested, such as volume cloning
|
||||
// or snapshot restore.
|
||||
func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
testVolumeClient(f, config, fsGroup, fsType, tests, true)
|
||||
}
|
||||
|
||||
func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
|
||||
timeouts := f.Timeouts
|
||||
clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create client pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
}()
|
||||
|
||||
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// InjectContent inserts index.html with given content into given volume. It does so by
|
||||
// starting and auxiliary pod which writes the file there.
|
||||
// The volume must be writable.
|
||||
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
privileged := true
|
||||
timeouts := f.Timeouts
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
privileged = false
|
||||
}
|
||||
injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create injector pod: %v", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
}()
|
||||
|
||||
ginkgo.By("Writing text file contents in the container.")
|
||||
for i, test := range tests {
|
||||
commands := []string{"exec", injectorPod.Name, fmt.Sprintf("--namespace=%v", injectorPod.Namespace), "--"}
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
// Block: write content
|
||||
deviceName := fmt.Sprintf("/opt/%d", i)
|
||||
commands = append(commands, generateWriteBlockCmd(test.ExpectedContent, deviceName)...)
|
||||
|
||||
} else {
|
||||
// Filesystem: write content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
|
||||
}
|
||||
out, err := framework.RunKubectl(injectorPod.Namespace, commands...)
|
||||
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
|
||||
}
|
||||
|
||||
// Check that the data have been really written in this pod.
|
||||
// This tests non-persistent volume types
|
||||
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
func generateWriteCmd(content, path string) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", "echo '" + content + "' > " + path}
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||
} else {
|
||||
// TODO: is there a way on windows to get the first X bytes from a device?
|
||||
commands = []string{"powershell", "/c", "type " + fullPath}
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateWriteBlockCmd generates the corresponding command lines to write to a block device the given content.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateWriteBlockCmd(content, fullPath string) []string {
|
||||
return generateWriteCmd(content, fullPath)
|
||||
}
|
||||
|
||||
// GenerateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateReadFileCmd(fullPath string) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"cat", fullPath}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", "type " + fullPath}
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateWriteFileCmd(content, fullPath string) []string {
|
||||
return generateWriteCmd(content, fullPath)
|
||||
}
|
||||
|
||||
// CheckVolumeModeOfPath check mode of volume
|
||||
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// Check if block exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
|
||||
|
||||
// Double check that it's not directory
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
} else {
|
||||
// Check if directory exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
|
||||
|
||||
// Double check that it's not block
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
|
||||
}
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
|
||||
if exiterr, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := exiterr.ExitStatus()
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
if exiterr, ok := err.(clientexec.ExitError); ok {
|
||||
actualExitCode := exiterr.ExitStatus()
|
||||
framework.ExpectEqual(actualExitCode, exitCode,
|
||||
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
|
||||
}
|
Reference in New Issue
Block a user