mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
830
vendor/k8s.io/kubernetes/hack/.golint_failures
generated
vendored
Normal file
830
vendor/k8s.io/kubernetes/hack/.golint_failures
generated
vendored
Normal file
@ -0,0 +1,830 @@
|
||||
cluster/images/etcd-version-monitor
|
||||
cmd/gke-certificates-controller/app
|
||||
cmd/hyperkube
|
||||
cmd/kube-controller-manager/app
|
||||
cmd/kube-proxy/app
|
||||
cmd/kubeadm/app
|
||||
cmd/kubeadm/app/apis/kubeadm
|
||||
cmd/kubeadm/app/apis/kubeadm/v1alpha1
|
||||
cmd/kubeadm/app/phases/etcd/spec
|
||||
cmd/kubelet/app
|
||||
cmd/kubelet/app/options
|
||||
cmd/kubemark
|
||||
examples/guestbook-go
|
||||
pkg/api/endpoints
|
||||
pkg/api/ref
|
||||
pkg/api/testapi
|
||||
pkg/api/testing
|
||||
pkg/api/testing/compat
|
||||
pkg/api/unversioned
|
||||
pkg/api/v1/endpoints
|
||||
pkg/api/v1/pod
|
||||
pkg/api/v1/resource
|
||||
pkg/apis/abac
|
||||
pkg/apis/abac/latest
|
||||
pkg/apis/admission
|
||||
pkg/apis/admission/v1beta1
|
||||
pkg/apis/admissionregistration
|
||||
pkg/apis/admissionregistration/v1alpha1
|
||||
pkg/apis/admissionregistration/v1beta1
|
||||
pkg/apis/admissionregistration/validation
|
||||
pkg/apis/apps
|
||||
pkg/apis/apps/validation
|
||||
pkg/apis/authentication
|
||||
pkg/apis/authentication/v1
|
||||
pkg/apis/authentication/v1beta1
|
||||
pkg/apis/authorization
|
||||
pkg/apis/authorization/v1
|
||||
pkg/apis/authorization/v1beta1
|
||||
pkg/apis/authorization/validation
|
||||
pkg/apis/autoscaling
|
||||
pkg/apis/autoscaling/validation
|
||||
pkg/apis/batch
|
||||
pkg/apis/batch/validation
|
||||
pkg/apis/certificates
|
||||
pkg/apis/certificates/v1beta1
|
||||
pkg/apis/certificates/validation
|
||||
pkg/apis/componentconfig
|
||||
pkg/apis/componentconfig/v1alpha1
|
||||
pkg/apis/core
|
||||
pkg/apis/core/helper
|
||||
pkg/apis/core/helper/qos
|
||||
pkg/apis/core/v1/helper
|
||||
pkg/apis/core/v1/helper/qos
|
||||
pkg/apis/core/v1/validation
|
||||
pkg/apis/core/validation
|
||||
pkg/apis/events
|
||||
pkg/apis/events/v1beta1
|
||||
pkg/apis/extensions
|
||||
pkg/apis/extensions/validation
|
||||
pkg/apis/imagepolicy
|
||||
pkg/apis/imagepolicy/v1alpha1
|
||||
pkg/apis/networking
|
||||
pkg/apis/policy
|
||||
pkg/apis/policy/v1beta1
|
||||
pkg/apis/policy/validation
|
||||
pkg/apis/rbac/v1
|
||||
pkg/apis/rbac/v1beta1
|
||||
pkg/apis/rbac/validation
|
||||
pkg/apis/scheduling
|
||||
pkg/apis/scheduling/v1alpha1
|
||||
pkg/apis/settings
|
||||
pkg/apis/settings/v1alpha1
|
||||
pkg/apis/storage
|
||||
pkg/apis/storage/util
|
||||
pkg/apis/storage/v1/util
|
||||
pkg/apis/storage/v1alpha1
|
||||
pkg/apis/storage/v1beta1/util
|
||||
pkg/auth/authorizer/abac
|
||||
pkg/capabilities
|
||||
pkg/client/chaosclient
|
||||
pkg/client/informers/informers_generated/internalversion/internalinterfaces
|
||||
pkg/client/leaderelectionconfig
|
||||
pkg/client/tests
|
||||
pkg/client/unversioned/testclient/simple
|
||||
pkg/cloudprovider
|
||||
pkg/cloudprovider/providers/aws
|
||||
pkg/cloudprovider/providers/fake
|
||||
pkg/cloudprovider/providers/gce
|
||||
pkg/cloudprovider/providers/openstack
|
||||
pkg/cloudprovider/providers/ovirt
|
||||
pkg/cloudprovider/providers/photon
|
||||
pkg/cloudprovider/providers/vsphere
|
||||
pkg/controller
|
||||
pkg/controller/bootstrap
|
||||
pkg/controller/certificates
|
||||
pkg/controller/certificates/approver
|
||||
pkg/controller/certificates/signer
|
||||
pkg/controller/cloud
|
||||
pkg/controller/clusterroleaggregation
|
||||
pkg/controller/cronjob
|
||||
pkg/controller/daemon
|
||||
pkg/controller/daemon/util
|
||||
pkg/controller/deployment
|
||||
pkg/controller/deployment/util
|
||||
pkg/controller/disruption
|
||||
pkg/controller/endpoint
|
||||
pkg/controller/garbagecollector
|
||||
pkg/controller/garbagecollector/metaonly
|
||||
pkg/controller/job
|
||||
pkg/controller/namespace
|
||||
pkg/controller/namespace/deletion
|
||||
pkg/controller/podautoscaler
|
||||
pkg/controller/podautoscaler/metrics
|
||||
pkg/controller/podgc
|
||||
pkg/controller/replicaset
|
||||
pkg/controller/replicaset/options
|
||||
pkg/controller/replication
|
||||
pkg/controller/resourcequota
|
||||
pkg/controller/route
|
||||
pkg/controller/service
|
||||
pkg/controller/serviceaccount
|
||||
pkg/controller/statefulset
|
||||
pkg/controller/ttl
|
||||
pkg/controller/volume/attachdetach
|
||||
pkg/controller/volume/attachdetach/statusupdater
|
||||
pkg/controller/volume/attachdetach/testing
|
||||
pkg/controller/volume/events
|
||||
pkg/controller/volume/expand
|
||||
pkg/controller/volume/persistentvolume
|
||||
pkg/controller/volume/persistentvolume/options
|
||||
pkg/credentialprovider
|
||||
pkg/credentialprovider/gcp
|
||||
pkg/credentialprovider/rancher
|
||||
pkg/features
|
||||
pkg/kubeapiserver
|
||||
pkg/kubeapiserver/admission
|
||||
pkg/kubeapiserver/authenticator
|
||||
pkg/kubeapiserver/authorizer
|
||||
pkg/kubeapiserver/authorizer/modes
|
||||
pkg/kubeapiserver/options
|
||||
pkg/kubeapiserver/server
|
||||
pkg/kubectl
|
||||
pkg/kubectl/categories
|
||||
pkg/kubectl/cmd
|
||||
pkg/kubectl/cmd/auth
|
||||
pkg/kubectl/cmd/config
|
||||
pkg/kubectl/cmd/rollout
|
||||
pkg/kubectl/cmd/set
|
||||
pkg/kubectl/cmd/templates
|
||||
pkg/kubectl/cmd/testing
|
||||
pkg/kubectl/cmd/util
|
||||
pkg/kubectl/cmd/util/editor
|
||||
pkg/kubectl/cmd/util/jsonmerge
|
||||
pkg/kubectl/cmd/util/sanity
|
||||
pkg/kubectl/metricsutil
|
||||
pkg/kubectl/resource
|
||||
pkg/kubectl/testing
|
||||
pkg/kubectl/util
|
||||
pkg/kubectl/util/crlf
|
||||
pkg/kubectl/util/slice
|
||||
pkg/kubelet
|
||||
pkg/kubelet/apis
|
||||
pkg/kubelet/apis/cri/testing
|
||||
pkg/kubelet/apis/cri/v1alpha1/runtime
|
||||
pkg/kubelet/apis/deviceplugin/v1alpha
|
||||
pkg/kubelet/apis/kubeletconfig
|
||||
pkg/kubelet/apis/kubeletconfig/v1alpha1
|
||||
pkg/kubelet/cadvisor
|
||||
pkg/kubelet/cadvisor/testing
|
||||
pkg/kubelet/client
|
||||
pkg/kubelet/cm
|
||||
pkg/kubelet/cm/util
|
||||
pkg/kubelet/config
|
||||
pkg/kubelet/configmap
|
||||
pkg/kubelet/container/testing
|
||||
pkg/kubelet/custommetrics
|
||||
pkg/kubelet/dockershim
|
||||
pkg/kubelet/dockershim/cm
|
||||
pkg/kubelet/dockershim/libdocker
|
||||
pkg/kubelet/dockershim/remote
|
||||
pkg/kubelet/dockershim/testing
|
||||
pkg/kubelet/events
|
||||
pkg/kubelet/gpu
|
||||
pkg/kubelet/images
|
||||
pkg/kubelet/kuberuntime
|
||||
pkg/kubelet/leaky
|
||||
pkg/kubelet/lifecycle
|
||||
pkg/kubelet/metrics
|
||||
pkg/kubelet/network
|
||||
pkg/kubelet/network/cni
|
||||
pkg/kubelet/network/cni/testing
|
||||
pkg/kubelet/network/hairpin
|
||||
pkg/kubelet/network/hostport
|
||||
pkg/kubelet/network/hostport/testing
|
||||
pkg/kubelet/network/kubenet
|
||||
pkg/kubelet/network/testing
|
||||
pkg/kubelet/pleg
|
||||
pkg/kubelet/pod
|
||||
pkg/kubelet/pod/testing
|
||||
pkg/kubelet/preemption
|
||||
pkg/kubelet/prober
|
||||
pkg/kubelet/prober/results
|
||||
pkg/kubelet/prober/testing
|
||||
pkg/kubelet/qos
|
||||
pkg/kubelet/remote
|
||||
pkg/kubelet/rkt
|
||||
pkg/kubelet/rktshim
|
||||
pkg/kubelet/secret
|
||||
pkg/kubelet/server
|
||||
pkg/kubelet/server/portforward
|
||||
pkg/kubelet/server/remotecommand
|
||||
pkg/kubelet/server/stats
|
||||
pkg/kubelet/server/streaming
|
||||
pkg/kubelet/stats
|
||||
pkg/kubelet/status
|
||||
pkg/kubelet/status/testing
|
||||
pkg/kubelet/sysctl
|
||||
pkg/kubelet/types
|
||||
pkg/kubelet/util
|
||||
pkg/kubelet/util/cache
|
||||
pkg/kubelet/util/queue
|
||||
pkg/kubelet/util/sliceutils
|
||||
pkg/kubemark
|
||||
pkg/master
|
||||
pkg/master/controller/crdregistration
|
||||
pkg/master/tunneler
|
||||
pkg/printers/internalversion
|
||||
pkg/printers/storage
|
||||
pkg/probe
|
||||
pkg/probe/exec
|
||||
pkg/probe/http
|
||||
pkg/probe/tcp
|
||||
pkg/proxy
|
||||
pkg/proxy/apis/kubeproxyconfig
|
||||
pkg/proxy/apis/kubeproxyconfig/v1alpha1
|
||||
pkg/proxy/iptables
|
||||
pkg/proxy/userspace
|
||||
pkg/proxy/util
|
||||
pkg/proxy/winkernel
|
||||
pkg/proxy/winuserspace
|
||||
pkg/quota/evaluator/core
|
||||
pkg/registry/admissionregistration/initializerconfiguration/storage
|
||||
pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage
|
||||
pkg/registry/admissionregistration/rest
|
||||
pkg/registry/admissionregistration/validatingwebhookconfiguration/storage
|
||||
pkg/registry/apps/rest
|
||||
pkg/registry/apps/statefulset
|
||||
pkg/registry/apps/statefulset/storage
|
||||
pkg/registry/authentication/rest
|
||||
pkg/registry/authentication/tokenreview
|
||||
pkg/registry/authorization/localsubjectaccessreview
|
||||
pkg/registry/authorization/rest
|
||||
pkg/registry/authorization/selfsubjectaccessreview
|
||||
pkg/registry/authorization/subjectaccessreview
|
||||
pkg/registry/autoscaling/horizontalpodautoscaler
|
||||
pkg/registry/autoscaling/horizontalpodautoscaler/storage
|
||||
pkg/registry/autoscaling/rest
|
||||
pkg/registry/batch/cronjob
|
||||
pkg/registry/batch/cronjob/storage
|
||||
pkg/registry/batch/job
|
||||
pkg/registry/batch/job/storage
|
||||
pkg/registry/batch/rest
|
||||
pkg/registry/certificates/certificates
|
||||
pkg/registry/certificates/certificates/storage
|
||||
pkg/registry/certificates/rest
|
||||
pkg/registry/core/componentstatus
|
||||
pkg/registry/core/endpoint/storage
|
||||
pkg/registry/core/event
|
||||
pkg/registry/core/event/storage
|
||||
pkg/registry/core/limitrange/storage
|
||||
pkg/registry/core/namespace
|
||||
pkg/registry/core/namespace/storage
|
||||
pkg/registry/core/node
|
||||
pkg/registry/core/node/storage
|
||||
pkg/registry/core/persistentvolume
|
||||
pkg/registry/core/persistentvolume/storage
|
||||
pkg/registry/core/persistentvolumeclaim
|
||||
pkg/registry/core/persistentvolumeclaim/storage
|
||||
pkg/registry/core/pod
|
||||
pkg/registry/core/pod/rest
|
||||
pkg/registry/core/podtemplate/storage
|
||||
pkg/registry/core/replicationcontroller
|
||||
pkg/registry/core/replicationcontroller/storage
|
||||
pkg/registry/core/resourcequota
|
||||
pkg/registry/core/resourcequota/storage
|
||||
pkg/registry/core/rest
|
||||
pkg/registry/core/secret
|
||||
pkg/registry/core/secret/storage
|
||||
pkg/registry/core/service
|
||||
pkg/registry/core/service/allocator
|
||||
pkg/registry/core/service/allocator/storage
|
||||
pkg/registry/core/service/ipallocator
|
||||
pkg/registry/core/service/portallocator
|
||||
pkg/registry/core/service/portallocator/controller
|
||||
pkg/registry/core/service/storage
|
||||
pkg/registry/core/serviceaccount/storage
|
||||
pkg/registry/events/rest
|
||||
pkg/registry/extensions/controller/storage
|
||||
pkg/registry/extensions/daemonset
|
||||
pkg/registry/extensions/daemonset/storage
|
||||
pkg/registry/extensions/deployment
|
||||
pkg/registry/extensions/deployment/storage
|
||||
pkg/registry/extensions/ingress
|
||||
pkg/registry/extensions/ingress/storage
|
||||
pkg/registry/extensions/replicaset
|
||||
pkg/registry/extensions/replicaset/storage
|
||||
pkg/registry/extensions/rest
|
||||
pkg/registry/networking/networkpolicy/storage
|
||||
pkg/registry/networking/rest
|
||||
pkg/registry/policy/poddisruptionbudget
|
||||
pkg/registry/policy/poddisruptionbudget/storage
|
||||
pkg/registry/policy/rest
|
||||
pkg/registry/rbac
|
||||
pkg/registry/rbac/clusterrole
|
||||
pkg/registry/rbac/clusterrole/policybased
|
||||
pkg/registry/rbac/clusterrolebinding
|
||||
pkg/registry/rbac/clusterrolebinding/policybased
|
||||
pkg/registry/rbac/reconciliation
|
||||
pkg/registry/rbac/rest
|
||||
pkg/registry/rbac/role
|
||||
pkg/registry/rbac/role/policybased
|
||||
pkg/registry/rbac/rolebinding
|
||||
pkg/registry/rbac/rolebinding/policybased
|
||||
pkg/registry/rbac/validation
|
||||
pkg/registry/registrytest
|
||||
pkg/registry/scheduling/priorityclass/storage
|
||||
pkg/registry/scheduling/rest
|
||||
pkg/registry/settings/podpreset/storage
|
||||
pkg/registry/settings/rest
|
||||
pkg/registry/storage/rest
|
||||
pkg/registry/storage/storageclass
|
||||
pkg/registry/storage/storageclass/storage
|
||||
pkg/routes
|
||||
pkg/security/apparmor
|
||||
pkg/security/podsecuritypolicy
|
||||
pkg/security/podsecuritypolicy/group
|
||||
pkg/security/podsecuritypolicy/seccomp
|
||||
pkg/security/podsecuritypolicy/selinux
|
||||
pkg/security/podsecuritypolicy/user
|
||||
pkg/security/podsecuritypolicy/util
|
||||
pkg/securitycontext
|
||||
pkg/ssh
|
||||
pkg/util/bandwidth
|
||||
pkg/util/config
|
||||
pkg/util/configz
|
||||
pkg/util/dbus
|
||||
pkg/util/ebtables
|
||||
pkg/util/env
|
||||
pkg/util/file
|
||||
pkg/util/goroutinemap/exponentialbackoff
|
||||
pkg/util/initsystem
|
||||
pkg/util/ipconfig
|
||||
pkg/util/iptables
|
||||
pkg/util/iptables/testing
|
||||
pkg/util/keymutex
|
||||
pkg/util/labels
|
||||
pkg/util/mount
|
||||
pkg/util/netsh/testing
|
||||
pkg/util/node
|
||||
pkg/util/normalizer
|
||||
pkg/util/oom
|
||||
pkg/util/parsers
|
||||
pkg/util/procfs
|
||||
pkg/util/removeall
|
||||
pkg/util/resourcecontainer
|
||||
pkg/util/rlimit
|
||||
pkg/util/selinux
|
||||
pkg/util/strings
|
||||
pkg/util/sysctl
|
||||
pkg/util/sysctl/testing
|
||||
pkg/util/system
|
||||
pkg/util/taints
|
||||
pkg/util/term
|
||||
pkg/util/threading
|
||||
pkg/util/tolerations
|
||||
pkg/util/workqueue/prometheus
|
||||
pkg/version/verflag
|
||||
pkg/volume/aws_ebs
|
||||
pkg/volume/azure_dd
|
||||
pkg/volume/azure_file
|
||||
pkg/volume/cephfs
|
||||
pkg/volume/cinder
|
||||
pkg/volume/configmap
|
||||
pkg/volume/empty_dir
|
||||
pkg/volume/fc
|
||||
pkg/volume/flexvolume
|
||||
pkg/volume/flocker
|
||||
pkg/volume/gce_pd
|
||||
pkg/volume/git_repo
|
||||
pkg/volume/host_path
|
||||
pkg/volume/iscsi
|
||||
pkg/volume/local
|
||||
pkg/volume/nfs
|
||||
pkg/volume/photon_pd
|
||||
pkg/volume/portworx
|
||||
pkg/volume/rbd
|
||||
pkg/volume/scaleio
|
||||
pkg/volume/secret
|
||||
pkg/volume/storageos
|
||||
pkg/volume/testing
|
||||
pkg/volume/util
|
||||
pkg/volume/vsphere_volume
|
||||
plugin/cmd/kube-scheduler/app
|
||||
plugin/pkg/admission/antiaffinity
|
||||
plugin/pkg/admission/eventratelimit/apis/eventratelimit
|
||||
plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1
|
||||
plugin/pkg/admission/initialresources
|
||||
plugin/pkg/admission/limitranger
|
||||
plugin/pkg/admission/noderestriction
|
||||
plugin/pkg/admission/persistentvolume/label
|
||||
plugin/pkg/admission/podnodeselector
|
||||
plugin/pkg/admission/podpreset
|
||||
plugin/pkg/admission/podtolerationrestriction
|
||||
plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction
|
||||
plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1
|
||||
plugin/pkg/admission/resourcequota
|
||||
plugin/pkg/admission/resourcequota/apis/resourcequota
|
||||
plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1
|
||||
plugin/pkg/admission/security
|
||||
plugin/pkg/admission/security/podsecuritypolicy
|
||||
plugin/pkg/admission/serviceaccount
|
||||
plugin/pkg/admission/storageclass/setdefault
|
||||
plugin/pkg/auth/authorizer/node
|
||||
plugin/pkg/auth/authorizer/rbac
|
||||
plugin/pkg/scheduler/algorithm
|
||||
plugin/pkg/scheduler/algorithm/predicates
|
||||
plugin/pkg/scheduler/algorithm/priorities
|
||||
plugin/pkg/scheduler/algorithm/priorities/util
|
||||
plugin/pkg/scheduler/api
|
||||
plugin/pkg/scheduler/api/latest
|
||||
plugin/pkg/scheduler/api/v1
|
||||
plugin/pkg/scheduler/core
|
||||
plugin/pkg/scheduler/factory
|
||||
plugin/pkg/scheduler/metrics
|
||||
plugin/pkg/scheduler/schedulercache
|
||||
plugin/pkg/scheduler/testing
|
||||
plugin/pkg/scheduler/util
|
||||
staging/src/k8s.io/api/admission/v1beta1
|
||||
staging/src/k8s.io/api/admissionregistration/v1alpha1
|
||||
staging/src/k8s.io/api/admissionregistration/v1beta1
|
||||
staging/src/k8s.io/api/apps/v1
|
||||
staging/src/k8s.io/api/apps/v1beta1
|
||||
staging/src/k8s.io/api/apps/v1beta2
|
||||
staging/src/k8s.io/api/authentication/v1
|
||||
staging/src/k8s.io/api/authentication/v1beta1
|
||||
staging/src/k8s.io/api/authorization/v1
|
||||
staging/src/k8s.io/api/authorization/v1beta1
|
||||
staging/src/k8s.io/api/autoscaling/v1
|
||||
staging/src/k8s.io/api/autoscaling/v2beta1
|
||||
staging/src/k8s.io/api/batch/v1
|
||||
staging/src/k8s.io/api/batch/v1beta1
|
||||
staging/src/k8s.io/api/batch/v2alpha1
|
||||
staging/src/k8s.io/api/certificates/v1beta1
|
||||
staging/src/k8s.io/api/core/v1
|
||||
staging/src/k8s.io/api/events/v1beta1
|
||||
staging/src/k8s.io/api/extensions/v1beta1
|
||||
staging/src/k8s.io/api/imagepolicy/v1alpha1
|
||||
staging/src/k8s.io/api/networking/v1
|
||||
staging/src/k8s.io/api/policy/v1beta1
|
||||
staging/src/k8s.io/api/rbac/v1
|
||||
staging/src/k8s.io/api/rbac/v1alpha1
|
||||
staging/src/k8s.io/api/rbac/v1beta1
|
||||
staging/src/k8s.io/api/scheduling/v1alpha1
|
||||
staging/src/k8s.io/api/settings/v1alpha1
|
||||
staging/src/k8s.io/api/storage/v1
|
||||
staging/src/k8s.io/api/storage/v1alpha1
|
||||
staging/src/k8s.io/api/storage/v1beta1
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake
|
||||
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/fake
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/fake
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/features
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver
|
||||
staging/src/k8s.io/apimachinery/pkg/api/meta
|
||||
staging/src/k8s.io/apimachinery/pkg/api/testing/fuzzer
|
||||
staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip
|
||||
staging/src/k8s.io/apimachinery/pkg/api/validation
|
||||
staging/src/k8s.io/apimachinery/pkg/api/validation/path
|
||||
staging/src/k8s.io/apimachinery/pkg/apimachinery/announced
|
||||
staging/src/k8s.io/apimachinery/pkg/apimachinery/registered
|
||||
staging/src/k8s.io/apimachinery/pkg/apis/meta/fuzzer
|
||||
staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion
|
||||
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
|
||||
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation
|
||||
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1
|
||||
staging/src/k8s.io/apimachinery/pkg/apis/testapigroup
|
||||
staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1
|
||||
staging/src/k8s.io/apimachinery/pkg/conversion
|
||||
staging/src/k8s.io/apimachinery/pkg/labels
|
||||
staging/src/k8s.io/apimachinery/pkg/runtime/schema
|
||||
staging/src/k8s.io/apimachinery/pkg/runtime/serializer
|
||||
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/protobuf
|
||||
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/recognizer
|
||||
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming
|
||||
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing
|
||||
staging/src/k8s.io/apimachinery/pkg/runtime/testing
|
||||
staging/src/k8s.io/apimachinery/pkg/selection
|
||||
staging/src/k8s.io/apimachinery/pkg/test
|
||||
staging/src/k8s.io/apimachinery/pkg/types
|
||||
staging/src/k8s.io/apimachinery/pkg/util/cache
|
||||
staging/src/k8s.io/apimachinery/pkg/util/clock
|
||||
staging/src/k8s.io/apimachinery/pkg/util/diff
|
||||
staging/src/k8s.io/apimachinery/pkg/util/errors
|
||||
staging/src/k8s.io/apimachinery/pkg/util/framer
|
||||
staging/src/k8s.io/apimachinery/pkg/util/httpstream
|
||||
staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy
|
||||
staging/src/k8s.io/apimachinery/pkg/util/intstr
|
||||
staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch
|
||||
staging/src/k8s.io/apimachinery/pkg/util/mergepatch
|
||||
staging/src/k8s.io/apimachinery/pkg/util/net
|
||||
staging/src/k8s.io/apimachinery/pkg/util/proxy
|
||||
staging/src/k8s.io/apimachinery/pkg/util/rand
|
||||
staging/src/k8s.io/apimachinery/pkg/util/remotecommand
|
||||
staging/src/k8s.io/apimachinery/pkg/util/runtime
|
||||
staging/src/k8s.io/apimachinery/pkg/util/sets
|
||||
staging/src/k8s.io/apimachinery/pkg/util/sets/types
|
||||
staging/src/k8s.io/apimachinery/pkg/util/strategicpatch
|
||||
staging/src/k8s.io/apimachinery/pkg/util/uuid
|
||||
staging/src/k8s.io/apimachinery/pkg/util/validation
|
||||
staging/src/k8s.io/apimachinery/pkg/util/wait
|
||||
staging/src/k8s.io/apimachinery/pkg/util/yaml
|
||||
staging/src/k8s.io/apiserver/pkg/admission
|
||||
staging/src/k8s.io/apiserver/pkg/admission/configuration
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts
|
||||
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating
|
||||
staging/src/k8s.io/apiserver/pkg/apis/apiserver
|
||||
staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1
|
||||
staging/src/k8s.io/apiserver/pkg/apis/audit
|
||||
staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1
|
||||
staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1
|
||||
staging/src/k8s.io/apiserver/pkg/apis/audit/validation
|
||||
staging/src/k8s.io/apiserver/pkg/apis/example
|
||||
staging/src/k8s.io/apiserver/pkg/apis/example/v1
|
||||
staging/src/k8s.io/apiserver/pkg/apis/example2
|
||||
staging/src/k8s.io/apiserver/pkg/apis/example2/v1
|
||||
staging/src/k8s.io/apiserver/pkg/audit
|
||||
staging/src/k8s.io/apiserver/pkg/audit/policy
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/group
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/request/websocket
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile
|
||||
staging/src/k8s.io/apiserver/pkg/authentication/user
|
||||
staging/src/k8s.io/apiserver/pkg/authorization/authorizer
|
||||
staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints/discovery
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints/filters
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints/handlers
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints/metrics
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints/openapi/testing
|
||||
staging/src/k8s.io/apiserver/pkg/endpoints/testing
|
||||
staging/src/k8s.io/apiserver/pkg/features
|
||||
staging/src/k8s.io/apiserver/pkg/registry/generic
|
||||
staging/src/k8s.io/apiserver/pkg/registry/generic/registry
|
||||
staging/src/k8s.io/apiserver/pkg/registry/generic/rest
|
||||
staging/src/k8s.io/apiserver/pkg/registry/generic/testing
|
||||
staging/src/k8s.io/apiserver/pkg/registry/rest
|
||||
staging/src/k8s.io/apiserver/pkg/registry/rest/resttest
|
||||
staging/src/k8s.io/apiserver/pkg/server
|
||||
staging/src/k8s.io/apiserver/pkg/server/healthz
|
||||
staging/src/k8s.io/apiserver/pkg/server/httplog
|
||||
staging/src/k8s.io/apiserver/pkg/server/options
|
||||
staging/src/k8s.io/apiserver/pkg/server/routes/data/swagger
|
||||
staging/src/k8s.io/apiserver/pkg/server/storage
|
||||
staging/src/k8s.io/apiserver/pkg/storage
|
||||
staging/src/k8s.io/apiserver/pkg/storage/errors
|
||||
staging/src/k8s.io/apiserver/pkg/storage/etcd
|
||||
staging/src/k8s.io/apiserver/pkg/storage/etcd/etcdtest
|
||||
staging/src/k8s.io/apiserver/pkg/storage/etcd/metrics
|
||||
staging/src/k8s.io/apiserver/pkg/storage/etcd/testing
|
||||
staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/testingcert
|
||||
staging/src/k8s.io/apiserver/pkg/storage/etcd/util
|
||||
staging/src/k8s.io/apiserver/pkg/storage/storagebackend
|
||||
staging/src/k8s.io/apiserver/pkg/storage/testing
|
||||
staging/src/k8s.io/apiserver/pkg/storage/tests
|
||||
staging/src/k8s.io/apiserver/pkg/storage/value
|
||||
staging/src/k8s.io/apiserver/pkg/util/feature
|
||||
staging/src/k8s.io/apiserver/pkg/util/flag
|
||||
staging/src/k8s.io/apiserver/pkg/util/proxy
|
||||
staging/src/k8s.io/apiserver/pkg/util/trace
|
||||
staging/src/k8s.io/apiserver/pkg/util/webhook
|
||||
staging/src/k8s.io/apiserver/pkg/util/wsstream
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/audit/log
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/keystone
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/testing
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
staging/src/k8s.io/client-go/discovery/cached
|
||||
staging/src/k8s.io/client-go/dynamic
|
||||
staging/src/k8s.io/client-go/dynamic/fake
|
||||
staging/src/k8s.io/client-go/examples/workqueue
|
||||
staging/src/k8s.io/client-go/informers/internalinterfaces
|
||||
staging/src/k8s.io/client-go/kubernetes
|
||||
staging/src/k8s.io/client-go/kubernetes/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/scheme
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/apps/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/batch/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/core/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/networking/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/storage/v1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1
|
||||
staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake
|
||||
staging/src/k8s.io/client-go/plugin/pkg/auth/authenticator/token/oidc/testing
|
||||
staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc
|
||||
staging/src/k8s.io/client-go/rest
|
||||
staging/src/k8s.io/client-go/rest/fake
|
||||
staging/src/k8s.io/client-go/scale
|
||||
staging/src/k8s.io/client-go/scale/fake
|
||||
staging/src/k8s.io/client-go/scale/scheme
|
||||
staging/src/k8s.io/client-go/scale/scheme/appsint
|
||||
staging/src/k8s.io/client-go/scale/scheme/appsv1beta1
|
||||
staging/src/k8s.io/client-go/scale/scheme/appsv1beta2
|
||||
staging/src/k8s.io/client-go/scale/scheme/autoscalingv1
|
||||
staging/src/k8s.io/client-go/scale/scheme/extensionsint
|
||||
staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1
|
||||
staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1
|
||||
staging/src/k8s.io/client-go/testing
|
||||
staging/src/k8s.io/client-go/tools/cache
|
||||
staging/src/k8s.io/client-go/tools/cache/testing
|
||||
staging/src/k8s.io/client-go/tools/clientcmd
|
||||
staging/src/k8s.io/client-go/tools/clientcmd/api
|
||||
staging/src/k8s.io/client-go/tools/clientcmd/api/latest
|
||||
staging/src/k8s.io/client-go/tools/clientcmd/api/v1
|
||||
staging/src/k8s.io/client-go/tools/leaderelection
|
||||
staging/src/k8s.io/client-go/tools/leaderelection/resourcelock
|
||||
staging/src/k8s.io/client-go/tools/portforward
|
||||
staging/src/k8s.io/client-go/tools/record
|
||||
staging/src/k8s.io/client-go/tools/reference
|
||||
staging/src/k8s.io/client-go/transport
|
||||
staging/src/k8s.io/client-go/util/cert/triple
|
||||
staging/src/k8s.io/client-go/util/exec
|
||||
staging/src/k8s.io/client-go/util/flowcontrol
|
||||
staging/src/k8s.io/client-go/util/integer
|
||||
staging/src/k8s.io/client-go/util/jsonpath
|
||||
staging/src/k8s.io/client-go/util/retry
|
||||
staging/src/k8s.io/client-go/util/testing
|
||||
staging/src/k8s.io/code-generator/cmd/client-gen/args
|
||||
staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme
|
||||
staging/src/k8s.io/code-generator/cmd/client-gen/types
|
||||
staging/src/k8s.io/code-generator/cmd/conversion-gen/generators
|
||||
staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf
|
||||
staging/src/k8s.io/code-generator/cmd/informer-gen/generators
|
||||
staging/src/k8s.io/code-generator/cmd/lister-gen/generators
|
||||
staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
|
||||
staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/validation
|
||||
staging/src/k8s.io/kube-aggregator/pkg/apiserver
|
||||
staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/internalinterfaces
|
||||
staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces
|
||||
staging/src/k8s.io/kube-aggregator/pkg/cmd/server
|
||||
staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister
|
||||
staging/src/k8s.io/kube-aggregator/pkg/controllers/status
|
||||
staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
staging/src/k8s.io/metrics/pkg/apis/custom_metrics
|
||||
staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
staging/src/k8s.io/metrics/pkg/apis/metrics
|
||||
staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1
|
||||
staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1
|
||||
staging/src/k8s.io/metrics/pkg/client/custom_metrics
|
||||
staging/src/k8s.io/metrics/pkg/client/custom_metrics/fake
|
||||
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle
|
||||
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
|
||||
staging/src/k8s.io/sample-apiserver/pkg/apiserver
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/fake
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/scheme
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/internalversion/typed/wardle/internalversion/fake
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/fake
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/scheme
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/clientset/versioned/typed/wardle/v1alpha1/fake
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/internalinterfaces
|
||||
staging/src/k8s.io/sample-apiserver/pkg/client/informers/internalversion/internalinterfaces
|
||||
staging/src/k8s.io/sample-apiserver/pkg/cmd/server
|
||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
|
||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
||||
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller
|
||||
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1
|
||||
staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned
|
||||
staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake
|
||||
staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme
|
||||
staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1
|
||||
staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake
|
||||
staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces
|
||||
test/e2e
|
||||
test/e2e/apimachinery
|
||||
test/e2e/apps
|
||||
test/e2e/auth
|
||||
test/e2e/autoscaling
|
||||
test/e2e/chaosmonkey
|
||||
test/e2e/common
|
||||
test/e2e/framework
|
||||
test/e2e/framework/metrics
|
||||
test/e2e/framework/timer
|
||||
test/e2e/instrumentation
|
||||
test/e2e/instrumentation/logging
|
||||
test/e2e/instrumentation/monitoring
|
||||
test/e2e/kubectl
|
||||
test/e2e/lifecycle
|
||||
test/e2e/lifecycle/bootstrap
|
||||
test/e2e/multicluster
|
||||
test/e2e/network
|
||||
test/e2e/node
|
||||
test/e2e/scalability
|
||||
test/e2e/scheduling
|
||||
test/e2e/servicecatalog
|
||||
test/e2e/storage
|
||||
test/e2e/ui
|
||||
test/e2e/upgrades
|
||||
test/e2e/upgrades/apps
|
||||
test/e2e/upgrades/storage
|
||||
test/e2e_node
|
||||
test/e2e_node/builder
|
||||
test/e2e_node/environment
|
||||
test/e2e_node/remote
|
||||
test/e2e_node/runner/remote
|
||||
test/e2e_node/services
|
||||
test/e2e_node/system
|
||||
test/images/net/nat
|
||||
test/images/netexec
|
||||
test/images/nettest
|
||||
test/images/no-snat-test
|
||||
test/images/no-snat-test-proxy
|
||||
test/images/resource-consumer
|
||||
test/images/resource-consumer/common
|
||||
test/images/resource-consumer/controller
|
||||
test/integration
|
||||
test/integration/auth
|
||||
test/integration/evictions
|
||||
test/integration/framework
|
||||
test/integration/master
|
||||
test/integration/replicaset
|
||||
test/integration/replicationcontroller
|
||||
test/integration/scheduler
|
||||
test/integration/scheduler_perf
|
||||
test/integration/volume
|
||||
test/list
|
||||
test/utils
|
||||
test/utils/image
|
76
vendor/k8s.io/kubernetes/hack/BUILD
generated
vendored
Normal file
76
vendor/k8s.io/kubernetes/hack/BUILD
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//hack/boilerplate:all-srcs",
|
||||
"//hack/cmd/teststale:all-srcs",
|
||||
"//hack/e2e-internal:all-srcs",
|
||||
"//hack/lib:all-srcs",
|
||||
"//hack/make-rules:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
sh_test(
|
||||
name = "verify-boilerplate",
|
||||
srcs = ["verify-boilerplate.sh"],
|
||||
data = ["//:all-srcs"],
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
# Disable gofmt test until we can figure out how to access the gofmt binary.
|
||||
# https://github.com/bazelbuild/rules_go/issues/511
|
||||
#sh_test(
|
||||
# name = "verify-gofmt",
|
||||
# srcs = ["verify-gofmt.sh"],
|
||||
# data = [
|
||||
# "//:all-srcs",
|
||||
# "@io_bazel_rules_go_toolchain//:toolchain",
|
||||
# ],
|
||||
# tags = ["manual"],
|
||||
#)
|
||||
|
||||
test_suite(
|
||||
name = "verify-all",
|
||||
tags = ["manual"],
|
||||
tests = [
|
||||
"verify-boilerplate",
|
||||
# "verify-gofmt",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "hack",
|
||||
importpath = "k8s.io/kubernetes/hack",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["e2e_test.go"],
|
||||
data = glob(["testdata/**"]),
|
||||
importpath = "k8s.io/kubernetes/hack",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["e2e.go"],
|
||||
importpath = "k8s.io/kubernetes/hack",
|
||||
)
|
29
vendor/k8s.io/kubernetes/hack/OWNERS
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/hack/OWNERS
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
reviewers:
|
||||
- cblecker
|
||||
- eparis
|
||||
- fejta
|
||||
- ixdy
|
||||
- jbeda
|
||||
- lavalamp
|
||||
- spxtr
|
||||
- zmerlynn
|
||||
- sttts
|
||||
- gmarek
|
||||
- vishh
|
||||
approvers:
|
||||
- cblecker
|
||||
- deads2k
|
||||
- eparis
|
||||
- fabianofranz
|
||||
- fejta
|
||||
- ixdy
|
||||
- jbeda
|
||||
- lavalamp
|
||||
- madhusudancs
|
||||
- pwittrock
|
||||
- shashidharatd
|
||||
- spxtr
|
||||
- zmerlynn
|
||||
- sttts
|
||||
- gmarek
|
||||
- vishh
|
3
vendor/k8s.io/kubernetes/hack/autogenerated_placeholder.txt
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/hack/autogenerated_placeholder.txt
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
This file is autogenerated, but we've stopped checking such files into the
|
||||
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
|
||||
populate this file.
|
27
vendor/k8s.io/kubernetes/hack/benchmark-go.sh
generated
vendored
Executable file
27
vendor/k8s.io/kubernetes/hack/benchmark-go.sh
generated
vendored
Executable file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
make test \
|
||||
WHAT="$*" \
|
||||
KUBE_COVER="" \
|
||||
KUBE_RACE=" " \
|
||||
KUBE_TEST_ARGS="-- -test.run='^X' -benchtime=1s -bench=. -benchmem" \
|
31
vendor/k8s.io/kubernetes/hack/boilerplate/BUILD
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/hack/boilerplate/BUILD
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
exports_files(glob(["*.txt"]))
|
||||
|
||||
py_test(
|
||||
name = "boilerplate_test",
|
||||
srcs = [
|
||||
"boilerplate.py",
|
||||
"boilerplate_test.py",
|
||||
],
|
||||
data = glob([
|
||||
"*.txt",
|
||||
"test/*",
|
||||
]),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//hack/boilerplate/test:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.Dockerfile.txt
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.Dockerfile.txt
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
14
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.Makefile.txt
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.Makefile.txt
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
16
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
/*
|
||||
Copyright YEAR The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
198
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py
generated
vendored
Executable file
198
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py
generated
vendored
Executable file
@ -0,0 +1,198 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import difflib
|
||||
import glob
|
||||
import json
|
||||
import mmap
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"filenames",
|
||||
help="list of files to check, all files if unspecified",
|
||||
nargs='*')
|
||||
|
||||
rootdir = os.path.dirname(__file__) + "/../../"
|
||||
rootdir = os.path.abspath(rootdir)
|
||||
parser.add_argument(
|
||||
"--rootdir", default=rootdir, help="root directory to examine")
|
||||
|
||||
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
|
||||
parser.add_argument(
|
||||
"--boilerplate-dir", default=default_boilerplate_dir)
|
||||
|
||||
parser.add_argument(
|
||||
"-v", "--verbose",
|
||||
help="give verbose output regarding why a file does not pass",
|
||||
action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
|
||||
|
||||
def get_refs():
|
||||
refs = {}
|
||||
|
||||
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
|
||||
extension = os.path.basename(path).split(".")[1]
|
||||
|
||||
ref_file = open(path, 'r')
|
||||
ref = ref_file.read().splitlines()
|
||||
ref_file.close()
|
||||
refs[extension] = ref
|
||||
|
||||
return refs
|
||||
|
||||
def file_passes(filename, refs, regexs):
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except Exception as exc:
|
||||
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
|
||||
return False
|
||||
|
||||
data = f.read()
|
||||
f.close()
|
||||
|
||||
basename = os.path.basename(filename)
|
||||
extension = file_extension(filename)
|
||||
if extension != "":
|
||||
ref = refs[extension]
|
||||
else:
|
||||
ref = refs[basename]
|
||||
|
||||
# remove build tags from the top of Go files
|
||||
if extension == "go":
|
||||
p = regexs["go_build_constraints"]
|
||||
(data, found) = p.subn("", data, 1)
|
||||
|
||||
# remove shebang from the top of shell files
|
||||
if extension == "sh":
|
||||
p = regexs["shebang"]
|
||||
(data, found) = p.subn("", data, 1)
|
||||
|
||||
data = data.splitlines()
|
||||
|
||||
# if our test file is smaller than the reference it surely fails!
|
||||
if len(ref) > len(data):
|
||||
print('File %s smaller than reference (%d < %d)' %
|
||||
(filename, len(data), len(ref)),
|
||||
file=verbose_out)
|
||||
return False
|
||||
|
||||
# trim our file to the same number of lines as the reference file
|
||||
data = data[:len(ref)]
|
||||
|
||||
p = regexs["year"]
|
||||
for d in data:
|
||||
if p.search(d):
|
||||
print('File %s is missing the year' % filename, file=verbose_out)
|
||||
return False
|
||||
|
||||
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
|
||||
p = regexs["date"]
|
||||
for i, d in enumerate(data):
|
||||
(data[i], found) = p.subn('YEAR', d)
|
||||
if found != 0:
|
||||
break
|
||||
|
||||
# if we don't match the reference at this point, fail
|
||||
if ref != data:
|
||||
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
|
||||
if args.verbose:
|
||||
print(file=verbose_out)
|
||||
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
|
||||
print(line, file=verbose_out)
|
||||
print(file=verbose_out)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def file_extension(filename):
|
||||
return os.path.splitext(filename)[1].split(".")[-1].lower()
|
||||
|
||||
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
|
||||
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
|
||||
"pkg/generated/bindata.go"]
|
||||
|
||||
def normalize_files(files):
|
||||
newfiles = []
|
||||
for pathname in files:
|
||||
if any(x in pathname for x in skipped_dirs):
|
||||
continue
|
||||
newfiles.append(pathname)
|
||||
for i, pathname in enumerate(newfiles):
|
||||
if not os.path.isabs(pathname):
|
||||
newfiles[i] = os.path.join(args.rootdir, pathname)
|
||||
return newfiles
|
||||
|
||||
def get_files(extensions):
|
||||
files = []
|
||||
if len(args.filenames) > 0:
|
||||
files = args.filenames
|
||||
else:
|
||||
for root, dirs, walkfiles in os.walk(args.rootdir):
|
||||
# don't visit certain dirs. This is just a performance improvement
|
||||
# as we would prune these later in normalize_files(). But doing it
|
||||
# cuts down the amount of filesystem walking we do and cuts down
|
||||
# the size of the file list
|
||||
for d in skipped_dirs:
|
||||
if d in dirs:
|
||||
dirs.remove(d)
|
||||
|
||||
for name in walkfiles:
|
||||
pathname = os.path.join(root, name)
|
||||
files.append(pathname)
|
||||
|
||||
files = normalize_files(files)
|
||||
outfiles = []
|
||||
for pathname in files:
|
||||
basename = os.path.basename(pathname)
|
||||
extension = file_extension(pathname)
|
||||
if extension in extensions or basename in extensions:
|
||||
outfiles.append(pathname)
|
||||
return outfiles
|
||||
|
||||
def get_regexs():
|
||||
regexs = {}
|
||||
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
|
||||
regexs["year"] = re.compile( 'YEAR' )
|
||||
# dates can be 2014, 2015, 2016, 2017, or 2018; company holder names can be anything
|
||||
regexs["date"] = re.compile( '(2014|2015|2016|2017|2018)' )
|
||||
# strip // +build \n\n build constraints
|
||||
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
|
||||
# strip #!.* from shell scripts
|
||||
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
|
||||
return regexs
|
||||
|
||||
def main():
|
||||
regexs = get_regexs()
|
||||
refs = get_refs()
|
||||
filenames = get_files(refs.keys())
|
||||
|
||||
for filename in filenames:
|
||||
if not file_passes(filename, refs, regexs):
|
||||
print(filename, file=sys.stdout)
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
16
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py.txt
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py.txt
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
14
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.sh.txt
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.sh.txt
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
52
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate_test.py
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate_test.py
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import boilerplate
|
||||
import unittest
|
||||
import StringIO
|
||||
import os
|
||||
import sys
|
||||
|
||||
class TestBoilerplate(unittest.TestCase):
|
||||
"""
|
||||
Note: run this test from the hack/boilerplate directory.
|
||||
|
||||
$ python -m unittest boilerplate_test
|
||||
"""
|
||||
|
||||
def test_boilerplate(self):
|
||||
os.chdir("test/")
|
||||
|
||||
class Args(object):
|
||||
def __init__(self):
|
||||
self.filenames = []
|
||||
self.rootdir = "."
|
||||
self.boilerplate_dir = "../"
|
||||
self.verbose = True
|
||||
|
||||
# capture stdout
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = StringIO.StringIO()
|
||||
|
||||
boilerplate.args = Args()
|
||||
ret = boilerplate.main()
|
||||
|
||||
output = sorted(sys.stdout.getvalue().split())
|
||||
|
||||
sys.stdout = old_stdout
|
||||
|
||||
self.assertEquals(
|
||||
output, ['././fail.go', '././fail.py'])
|
28
vendor/k8s.io/kubernetes/hack/boilerplate/test/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/hack/boilerplate/test/BUILD
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fail.go",
|
||||
"pass.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/hack/boilerplate/test",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/hack/boilerplate/test/fail.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/hack/boilerplate/test/fail.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
fail
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package test
|
17
vendor/k8s.io/kubernetes/hack/boilerplate/test/fail.py
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/hack/boilerplate/test/fail.py
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# failed
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
17
vendor/k8s.io/kubernetes/hack/boilerplate/test/pass.go
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/hack/boilerplate/test/pass.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package test
|
17
vendor/k8s.io/kubernetes/hack/boilerplate/test/pass.py
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/hack/boilerplate/test/pass.py
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
True
|
31
vendor/k8s.io/kubernetes/hack/build-cross.sh
generated
vendored
Executable file
31
vendor/k8s.io/kubernetes/hack/build-cross.sh
generated
vendored
Executable file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is a vestigial redirection. Please do not add "real" logic.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
echo "NOTE: $0 has been replaced by 'make cross'"
|
||||
echo
|
||||
echo "The equivalent of this invocation is: "
|
||||
echo " make cross"
|
||||
echo
|
||||
echo
|
||||
make --no-print-directory -C "${KUBE_ROOT}" cross
|
37
vendor/k8s.io/kubernetes/hack/build-go.sh
generated
vendored
Executable file
37
vendor/k8s.io/kubernetes/hack/build-go.sh
generated
vendored
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is a vestigial redirection. Please do not add "real" logic.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# For help output
|
||||
ARGHELP=""
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
ARGHELP="WHAT='$@'"
|
||||
fi
|
||||
|
||||
echo "NOTE: $0 has been replaced by 'make' or 'make all'"
|
||||
echo
|
||||
echo "The equivalent of this invocation is: "
|
||||
echo " make ${ARGHELP}"
|
||||
echo
|
||||
echo
|
||||
make --no-print-directory -C "${KUBE_ROOT}" all WHAT="$*"
|
54
vendor/k8s.io/kubernetes/hack/build-ui.sh
generated
vendored
Executable file
54
vendor/k8s.io/kubernetes/hack/build-ui.sh
generated
vendored
Executable file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script builds ui assets into a single go datafile
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
cd "${KUBE_ROOT}"
|
||||
|
||||
if ! which go-bindata > /dev/null 2>&1 ; then
|
||||
echo "Cannot find go-bindata. Install with \"go get github.com/jteeuwen/go-bindata/...\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly TMP_DATAFILE="/tmp/datafile.go"
|
||||
readonly SWAGGER_SRC="third_party/swagger-ui/..."
|
||||
readonly SWAGGER_PKG="swagger"
|
||||
|
||||
function kube::hack::build_ui() {
|
||||
local pkg="$1"
|
||||
local src="$2"
|
||||
local output_file="staging/src/k8s.io/apiserver/pkg/server/routes/data/${pkg}/datafile.go"
|
||||
|
||||
go-bindata -nocompress -o "${output_file}" -prefix ${PWD} -pkg "${pkg}" "${src}"
|
||||
|
||||
local year=$(date +%Y)
|
||||
cat hack/boilerplate/boilerplate.go.txt | sed "s/YEAR/${year}/" > "${TMP_DATAFILE}"
|
||||
echo -e "// generated by hack/build-ui.sh; DO NOT EDIT\n" >> "${TMP_DATAFILE}"
|
||||
cat "${output_file}" >> "${TMP_DATAFILE}"
|
||||
|
||||
gofmt -s -w "${TMP_DATAFILE}"
|
||||
|
||||
mv "${TMP_DATAFILE}" "${output_file}"
|
||||
}
|
||||
|
||||
kube::hack::build_ui "${SWAGGER_PKG}" "${SWAGGER_SRC}"
|
241
vendor/k8s.io/kubernetes/hack/cherry_pick_pull.sh
generated
vendored
Executable file
241
vendor/k8s.io/kubernetes/hack/cherry_pick_pull.sh
generated
vendored
Executable file
@ -0,0 +1,241 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
|
||||
# meta.) Assumes you care about pulls from remote "upstream" and
|
||||
# checks thems out to a branch named:
|
||||
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
declare -r KUBE_ROOT="$(dirname "${BASH_SOURCE}")/.."
|
||||
cd "${KUBE_ROOT}"
|
||||
|
||||
declare -r STARTINGBRANCH=$(git symbolic-ref --short HEAD)
|
||||
declare -r REBASEMAGIC="${KUBE_ROOT}/.git/rebase-apply"
|
||||
DRY_RUN=${DRY_RUN:-""}
|
||||
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
|
||||
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
|
||||
FORK_REMOTE=${FORK_REMOTE:-origin}
|
||||
|
||||
if [[ -z ${GITHUB_USER:-} ]]; then
|
||||
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! which hub > /dev/null; then
|
||||
echo "Can't find 'hub' tool in PATH, please install from https://github.com/github/hub"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
|
||||
echo
|
||||
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
|
||||
echo " Examples:"
|
||||
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
|
||||
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
|
||||
echo
|
||||
echo " Set the DRY_RUN environment var to skip git push and creating PR."
|
||||
echo " This is useful for creating patches to a release branch without making a PR."
|
||||
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
|
||||
echo
|
||||
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
|
||||
echo " This is useful when picking commits containing changes to API documentation."
|
||||
echo
|
||||
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
|
||||
echo " To override the default remote names to what you have locally."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
|
||||
echo "!!! Dirty tree. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -e "${REBASEMAGIC}" ]]; then
|
||||
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -r BRANCH="$1"
|
||||
shift 1
|
||||
declare -r PULLS=( "$@" )
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
declare -r PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
|
||||
declare -r PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
|
||||
|
||||
echo "+++ Updating remotes..."
|
||||
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
|
||||
|
||||
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
|
||||
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
|
||||
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -r NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
|
||||
declare -r NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
|
||||
declare -r NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
|
||||
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
|
||||
|
||||
cleanbranch=""
|
||||
prtext=""
|
||||
gitamcleanup=false
|
||||
function return_to_kansas {
|
||||
if [[ "${gitamcleanup}" == "true" ]]; then
|
||||
echo
|
||||
echo "+++ Aborting in-progress git am."
|
||||
git am --abort >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# return to the starting branch and delete the PR text file
|
||||
if [[ -z "${DRY_RUN}" ]]; then
|
||||
echo
|
||||
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
|
||||
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
|
||||
if [[ -n "${cleanbranch}" ]]; then
|
||||
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
if [[ -n "${prtext}" ]]; then
|
||||
rm "${prtext}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
trap return_to_kansas EXIT
|
||||
|
||||
SUBJECTS=()
|
||||
function make-a-pr() {
|
||||
local rel="$(basename "${BRANCH}")"
|
||||
echo
|
||||
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
|
||||
|
||||
# This looks like an unnecessary use of a tmpfile, but it avoids
|
||||
# https://github.com/github/hub/issues/976 Otherwise stdin is stolen
|
||||
# when we shove the heredoc at hub directly, tickling the ioctl
|
||||
# crash.
|
||||
prtext="$(mktemp -t prtext.XXXX)" # cleaned in return_to_kansas
|
||||
cat >"${prtext}" <<EOF
|
||||
Automated cherry pick of ${PULLSUBJ}
|
||||
|
||||
Cherry pick of ${PULLSUBJ} on ${rel}.
|
||||
|
||||
$(printf '%s\n' "${SUBJECTS[@]}")
|
||||
EOF
|
||||
|
||||
hub pull-request -F "${prtext}" -h "${GITHUB_USER}:${NEWBRANCH}" -b "kubernetes:${rel}"
|
||||
}
|
||||
|
||||
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
|
||||
cleanbranch="${NEWBRANCHUNIQ}"
|
||||
|
||||
gitamcleanup=true
|
||||
for pull in "${PULLS[@]}"; do
|
||||
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
|
||||
curl -o "/tmp/${pull}.patch" -sSL "http://pr.k8s.io/${pull}.patch"
|
||||
echo
|
||||
echo "+++ About to attempt cherry pick of PR. To reattempt:"
|
||||
echo " $ git am -3 /tmp/${pull}.patch"
|
||||
echo
|
||||
git am -3 "/tmp/${pull}.patch" || {
|
||||
conflicts=false
|
||||
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|
||||
|| [[ -e "${REBASEMAGIC}" ]]; do
|
||||
conflicts=true # <-- We should have detected conflicts once
|
||||
echo
|
||||
echo "+++ Conflicts detected:"
|
||||
echo
|
||||
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
|
||||
echo
|
||||
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
|
||||
read -p "+++ Proceed (anything but 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
echo
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${conflicts}" != "true" ]]; then
|
||||
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# set the subject
|
||||
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
|
||||
SUBJECTS+=("#${pull}: ${subject}")
|
||||
|
||||
# remove the patch file from /tmp
|
||||
rm -f "/tmp/${pull}.patch"
|
||||
done
|
||||
gitamcleanup=false
|
||||
|
||||
# Re-generate docs (if needed)
|
||||
if [[ -n "${REGENERATE_DOCS}" ]]; then
|
||||
echo
|
||||
echo "Regenerating docs..."
|
||||
if ! hack/generate-docs.sh; then
|
||||
echo
|
||||
echo "hack/generate-docs.sh FAILED to complete."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DRY_RUN}" ]]; then
|
||||
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
|
||||
echo "To return to the branch you were in when you invoked this script:"
|
||||
echo
|
||||
echo " git checkout ${STARTINGBRANCH}"
|
||||
echo
|
||||
echo "To delete this branch:"
|
||||
echo
|
||||
echo " git branch -D ${NEWBRANCHUNIQ}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if git remote -v | grep ^${FORK_REMOTE} | grep kubernetes/kubernetes.git; then
|
||||
echo "!!! You have ${FORK_REMOTE} configured as your kubernetes/kubernetes.git"
|
||||
echo "This isn't normal. Leaving you with push instructions:"
|
||||
echo
|
||||
echo "+++ First manually push the branch this script created:"
|
||||
echo
|
||||
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
|
||||
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
|
||||
echo
|
||||
make-a-pr
|
||||
cleanbranch=""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
|
||||
echo
|
||||
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
read -p "+++ Proceed (anything but 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
make-a-pr
|
41
vendor/k8s.io/kubernetes/hack/cmd/teststale/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/hack/cmd/teststale/BUILD
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "teststale",
|
||||
importpath = "k8s.io/kubernetes/hack/cmd/teststale",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["teststale_test.go"],
|
||||
importpath = "k8s.io/kubernetes/hack/cmd/teststale",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["teststale.go"],
|
||||
importpath = "k8s.io/kubernetes/hack/cmd/teststale",
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
209
vendor/k8s.io/kubernetes/hack/cmd/teststale/teststale.go
generated
vendored
Normal file
209
vendor/k8s.io/kubernetes/hack/cmd/teststale/teststale.go
generated
vendored
Normal file
@ -0,0 +1,209 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// teststale checks the staleness of a test binary. go test -c builds a test
|
||||
// binary but it does no staleness check. In other words, every time one runs
|
||||
// go test -c, it compiles the test packages and links the binary even when
|
||||
// nothing has changed. This program helps to mitigate that problem by allowing
|
||||
// to check the staleness of a given test package and its binary.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const usageHelp = "" +
|
||||
`This program checks the staleness of a given test package and its test
|
||||
binary so that one can make a decision about re-building the test binary.
|
||||
|
||||
Usage:
|
||||
teststale -binary=/path/to/test/binary -package=package
|
||||
|
||||
Example:
|
||||
teststale -binary="$HOME/gosrc/bin/e2e.test" -package="k8s.io/kubernetes/test/e2e"
|
||||
|
||||
`
|
||||
|
||||
var (
|
||||
binary = flag.String("binary", "", "filesystem path to the test binary file. Example: \"$HOME/gosrc/bin/e2e.test\"")
|
||||
pkgPath = flag.String("package", "", "import path of the test package in the format used while importing packages. Example: \"k8s.io/kubernetes/test/e2e\"")
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintln(os.Stderr, usageHelp)
|
||||
fmt.Fprintln(os.Stderr, "Flags:")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
// golist is an interface emulating the `go list` command to get package information.
|
||||
// TODO: Evaluate using `go/build` package instead. It doesn't provide staleness
|
||||
// information, but we can probably run `go list` and `go/build.Import()` concurrently
|
||||
// in goroutines and merge the results. Evaluate if that's faster.
|
||||
type golist interface {
|
||||
pkgInfo(pkgPaths []string) ([]pkg, error)
|
||||
}
|
||||
|
||||
// execmd implements the `golist` interface.
|
||||
type execcmd struct {
|
||||
cmd string
|
||||
args []string
|
||||
env []string
|
||||
}
|
||||
|
||||
func (e *execcmd) pkgInfo(pkgPaths []string) ([]pkg, error) {
|
||||
args := append(e.args, pkgPaths...)
|
||||
cmd := exec.Command(e.cmd, args...)
|
||||
cmd.Env = e.env
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to obtain the metadata output stream: %v", err)
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(stdout)
|
||||
|
||||
// Start executing the command
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("command did not start: %v", err)
|
||||
}
|
||||
|
||||
var pkgs []pkg
|
||||
for {
|
||||
var p pkg
|
||||
if err := dec.Decode(&p); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal metadata for package %s: %v", p.ImportPath, err)
|
||||
}
|
||||
pkgs = append(pkgs, p)
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return nil, fmt.Errorf("command did not complete: %v", err)
|
||||
}
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
type pkg struct {
|
||||
Dir string
|
||||
ImportPath string
|
||||
Target string
|
||||
Stale bool
|
||||
TestGoFiles []string
|
||||
TestImports []string
|
||||
XTestGoFiles []string
|
||||
XTestImports []string
|
||||
}
|
||||
|
||||
func (p *pkg) isNewerThan(cmd golist, buildTime time.Time) bool {
|
||||
// If the package itself is stale, then we have to rebuild the whole thing anyway.
|
||||
if p.Stale {
|
||||
return true
|
||||
}
|
||||
|
||||
// Test for file staleness
|
||||
for _, f := range p.TestGoFiles {
|
||||
if isNewerThan(filepath.Join(p.Dir, f), buildTime) {
|
||||
glog.V(4).Infof("test Go file %s is stale", f)
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, f := range p.XTestGoFiles {
|
||||
if isNewerThan(filepath.Join(p.Dir, f), buildTime) {
|
||||
glog.V(4).Infof("external test Go file %s is stale", f)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
imps := []string{}
|
||||
imps = append(imps, p.TestImports...)
|
||||
imps = append(imps, p.XTestImports...)
|
||||
|
||||
// This calls `go list` the second time. This is required because the first
|
||||
// call to `go list` checks the staleness of the package in question by
|
||||
// looking the non-test dependencies, but it doesn't look at the test
|
||||
// dependencies. However, it returns the list of test dependencies. This
|
||||
// second call to `go list` checks the staleness of all the test
|
||||
// dependencies.
|
||||
pkgs, err := cmd.pkgInfo(imps)
|
||||
if err != nil || len(pkgs) < 1 {
|
||||
glog.V(4).Infof("failed to obtain metadata for packages %s: %v", imps, err)
|
||||
return true
|
||||
}
|
||||
|
||||
for _, p := range pkgs {
|
||||
if p.Stale {
|
||||
glog.V(4).Infof("import %q is stale", p.ImportPath)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isNewerThan(filename string, buildTime time.Time) bool {
|
||||
stat, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return stat.ModTime().After(buildTime)
|
||||
}
|
||||
|
||||
// isTestStale checks if the test binary is stale and needs to rebuilt.
|
||||
// Some of the ideas here are inspired by how Go does staleness checks.
|
||||
func isTestStale(cmd golist, binPath, pkgPath string) bool {
|
||||
bStat, err := os.Stat(binPath)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Couldn't obtain the modified time of the binary %s: %v", binPath, err)
|
||||
return true
|
||||
}
|
||||
buildTime := bStat.ModTime()
|
||||
|
||||
pkgs, err := cmd.pkgInfo([]string{pkgPath})
|
||||
if err != nil || len(pkgs) < 1 {
|
||||
glog.V(4).Infof("Couldn't retrieve test package information for package %s: %v", pkgPath, err)
|
||||
return false
|
||||
}
|
||||
|
||||
return pkgs[0].isNewerThan(cmd, buildTime)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
cmd := &execcmd{
|
||||
cmd: "go",
|
||||
args: []string{
|
||||
"list",
|
||||
"-json",
|
||||
},
|
||||
env: os.Environ(),
|
||||
}
|
||||
if !isTestStale(cmd, *binary, *pkgPath) {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
325
vendor/k8s.io/kubernetes/hack/cmd/teststale/teststale_test.go
generated
vendored
Normal file
325
vendor/k8s.io/kubernetes/hack/cmd/teststale/teststale_test.go
generated
vendored
Normal file
@ -0,0 +1,325 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// seed for rand.Source to generate data for files
|
||||
seed int64 = 42
|
||||
|
||||
// 1K binary file
|
||||
binLen = 1024
|
||||
|
||||
// Directory of the test package relative to $GOPATH
|
||||
testImportDir = "example.com/proj/pkg"
|
||||
)
|
||||
|
||||
var (
|
||||
pastHour = time.Now().Add(-1 * time.Hour)
|
||||
|
||||
// The test package we are testing against
|
||||
testPkg = path.Join(testImportDir, "test")
|
||||
)
|
||||
|
||||
// fakegolist implements the `golist` interface providing fake package information for testing.
|
||||
type fakegolist struct {
|
||||
dir string
|
||||
importMap map[string]pkg
|
||||
testFiles []string
|
||||
binfile string
|
||||
}
|
||||
|
||||
func newFakegolist() (*fakegolist, error) {
|
||||
dir, err := ioutil.TempDir("", "teststale")
|
||||
if err != nil {
|
||||
// test can't proceed without a temp directory.
|
||||
return nil, fmt.Errorf("failed to create a temp directory for testing: %v", err)
|
||||
}
|
||||
|
||||
// Set the temp directory as the $GOPATH
|
||||
if err := os.Setenv("GOPATH", dir); err != nil {
|
||||
// can't proceed without pointing the $GOPATH to the temp directory.
|
||||
return nil, fmt.Errorf("failed to set \"$GOPATH\" pointing to %q: %v", dir, err)
|
||||
}
|
||||
|
||||
// Setup $GOPATH directory layout.
|
||||
// Yeah! I am bored of repeatedly writing "if err != nil {}"!
|
||||
if os.MkdirAll(filepath.Join(dir, "bin"), 0750) != nil ||
|
||||
os.MkdirAll(filepath.Join(dir, "pkg", "linux_amd64"), 0750) != nil ||
|
||||
os.MkdirAll(filepath.Join(dir, "src"), 0750) != nil {
|
||||
return nil, fmt.Errorf("failed to setup the $GOPATH directory structure")
|
||||
}
|
||||
|
||||
// Create a temp file to represent the test binary.
|
||||
binfile, err := ioutil.TempFile("", "testbin")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create the temp file to represent the test binary: %v", err)
|
||||
}
|
||||
|
||||
// Could have used crypto/rand instead, but it doesn't matter.
|
||||
rr := rand.New(rand.NewSource(42))
|
||||
bin := make([]byte, binLen)
|
||||
if _, err = rr.Read(bin); err != nil {
|
||||
return nil, fmt.Errorf("couldn't read from the random source: %v", err)
|
||||
}
|
||||
if _, err := binfile.Write(bin); err != nil {
|
||||
return nil, fmt.Errorf("couldn't write to the binary file %q: %v", binfile.Name(), err)
|
||||
}
|
||||
if err := binfile.Close(); err != nil {
|
||||
// It is arguable whether this should be fatal.
|
||||
return nil, fmt.Errorf("failed to close the binary file %q: %v", binfile.Name(), err)
|
||||
}
|
||||
|
||||
if err := os.Chtimes(binfile.Name(), time.Now(), time.Now()); err != nil {
|
||||
return nil, fmt.Errorf("failed to modify the mtime of the binary file %q: %v", binfile.Name(), err)
|
||||
}
|
||||
|
||||
// Create test source files directory.
|
||||
testdir := filepath.Join(dir, "src", testPkg)
|
||||
if err := os.MkdirAll(testdir, 0750); err != nil {
|
||||
return nil, fmt.Errorf("failed to create test source directory %q: %v", testdir, err)
|
||||
}
|
||||
|
||||
fgl := &fakegolist{
|
||||
dir: dir,
|
||||
importMap: map[string]pkg{
|
||||
"example.com/proj/pkg/test": {
|
||||
Dir: path.Join(dir, "src", testPkg),
|
||||
ImportPath: testPkg,
|
||||
Target: path.Join(dir, "pkg", "linux_amd64", testImportDir, "test.a"),
|
||||
Stale: false,
|
||||
TestGoFiles: []string{
|
||||
"foo_test.go",
|
||||
"bar_test.go",
|
||||
},
|
||||
TestImports: []string{
|
||||
"example.com/proj/pkg/p1",
|
||||
"example.com/proj/pkg/p1/c11",
|
||||
"example.com/proj/pkg/p2",
|
||||
"example.com/proj/cmd/p3/c12/c23",
|
||||
"strings",
|
||||
"testing",
|
||||
},
|
||||
XTestGoFiles: []string{
|
||||
"xfoo_test.go",
|
||||
"xbar_test.go",
|
||||
"xbaz_test.go",
|
||||
},
|
||||
XTestImports: []string{
|
||||
"example.com/proj/pkg/test",
|
||||
"example.com/proj/pkg/p1",
|
||||
"example.com/proj/cmd/p3/c12/c23",
|
||||
"os",
|
||||
"testing",
|
||||
},
|
||||
},
|
||||
"example.com/proj/pkg/p1": {Stale: false},
|
||||
"example.com/proj/pkg/p1/c11": {Stale: false},
|
||||
"example.com/proj/pkg/p2": {Stale: false},
|
||||
"example.com/proj/cmd/p3/c12/c23": {Stale: false},
|
||||
"strings": {Stale: false},
|
||||
"testing": {Stale: false},
|
||||
"os": {Stale: false},
|
||||
},
|
||||
testFiles: []string{
|
||||
"foo_test.go",
|
||||
"bar_test.go",
|
||||
"xfoo_test.go",
|
||||
"xbar_test.go",
|
||||
"xbaz_test.go",
|
||||
},
|
||||
binfile: binfile.Name(),
|
||||
}
|
||||
|
||||
// Create test source files.
|
||||
for _, fn := range fgl.testFiles {
|
||||
fp := filepath.Join(testdir, fn)
|
||||
if _, err := os.Create(fp); err != nil {
|
||||
return nil, fmt.Errorf("failed to create the test file %q: %v", fp, err)
|
||||
}
|
||||
if err := os.Chtimes(fp, time.Now(), pastHour); err != nil {
|
||||
return nil, fmt.Errorf("failed to modify the mtime of the test file %q: %v", binfile.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return fgl, nil
|
||||
}
|
||||
|
||||
func (fgl *fakegolist) pkgInfo(pkgPaths []string) ([]pkg, error) {
|
||||
var pkgs []pkg
|
||||
for _, path := range pkgPaths {
|
||||
p, ok := fgl.importMap[path]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("package %q not found", path)
|
||||
}
|
||||
pkgs = append(pkgs, p)
|
||||
}
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
func (fgl *fakegolist) chMtime(filename string, mtime time.Time) error {
|
||||
for _, fn := range fgl.testFiles {
|
||||
if fn == filename {
|
||||
fp := filepath.Join(fgl.dir, "src", testPkg, fn)
|
||||
if err := os.Chtimes(fp, time.Now(), mtime); err != nil {
|
||||
return fmt.Errorf("failed to modify the mtime of %q: %v", filename, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("file %q not found", filename)
|
||||
}
|
||||
|
||||
func (fgl *fakegolist) chStale(pkg string, stale bool) error {
|
||||
if p, ok := fgl.importMap[pkg]; ok {
|
||||
p.Stale = stale
|
||||
fgl.importMap[pkg] = p
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("package %q not found", pkg)
|
||||
}
|
||||
|
||||
func (fgl *fakegolist) cleanup() {
|
||||
os.RemoveAll(fgl.dir)
|
||||
os.Remove(fgl.binfile)
|
||||
}
|
||||
|
||||
func TestIsTestStale(t *testing.T) {
|
||||
cases := []struct {
|
||||
fileMtime map[string]time.Time
|
||||
pkgStaleness map[string]bool
|
||||
result bool
|
||||
}{
|
||||
// Basic test: binary is fresh, all modifications were before the binary was built.
|
||||
{
|
||||
result: false,
|
||||
},
|
||||
// A local test file is new, hence binary must be stale.
|
||||
{
|
||||
fileMtime: map[string]time.Time{
|
||||
"foo_test.go": time.Now().Add(1 * time.Hour),
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
// Test package is new, so binary must be stale.
|
||||
{
|
||||
pkgStaleness: map[string]bool{
|
||||
"example.com/proj/pkg/test": true,
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
// Test package dependencies are new, so binary must be stale.
|
||||
{
|
||||
pkgStaleness: map[string]bool{
|
||||
"example.com/proj/cmd/p3/c12/c23": true,
|
||||
"strings": true,
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
// External test files are new, hence binary must be stale.
|
||||
{
|
||||
fileMtime: map[string]time.Time{
|
||||
"xfoo_test.go": time.Now().Add(1 * time.Hour),
|
||||
"xbar_test.go": time.Now().Add(2 * time.Hour),
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
// External test dependency is new, so binary must be stale.
|
||||
{
|
||||
pkgStaleness: map[string]bool{
|
||||
"os": true,
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
// Multiple source files and dependencies are new, so binary must be stale.
|
||||
{
|
||||
fileMtime: map[string]time.Time{
|
||||
"foo_test.go": time.Now().Add(1 * time.Hour),
|
||||
"xfoo_test.go": time.Now().Add(2 * time.Hour),
|
||||
"xbar_test.go": time.Now().Add(3 * time.Hour),
|
||||
},
|
||||
pkgStaleness: map[string]bool{
|
||||
"example.com/proj/pkg/p1": true,
|
||||
"example.com/proj/pkg/p1/c11": true,
|
||||
"example.com/proj/pkg/p2": true,
|
||||
"example.com/proj/cmd/p3/c12/c23": true,
|
||||
"strings": true,
|
||||
"os": true,
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
// Everything is new, so binary must be stale.
|
||||
{
|
||||
fileMtime: map[string]time.Time{
|
||||
"foo_test.go": time.Now().Add(3 * time.Hour),
|
||||
"bar_test.go": time.Now().Add(1 * time.Hour),
|
||||
"xfoo_test.go": time.Now().Add(2 * time.Hour),
|
||||
"xbar_test.go": time.Now().Add(1 * time.Hour),
|
||||
"xbaz_test.go": time.Now().Add(2 * time.Hour),
|
||||
},
|
||||
pkgStaleness: map[string]bool{
|
||||
"example.com/proj/pkg/p1": true,
|
||||
"example.com/proj/pkg/p1/c11": true,
|
||||
"example.com/proj/pkg/p2": true,
|
||||
"example.com/proj/cmd/p3/c12/c23": true,
|
||||
"example.com/proj/pkg/test": true,
|
||||
"strings": true,
|
||||
"testing": true,
|
||||
"os": true,
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
fgl, err := newFakegolist()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup the test: %v", err)
|
||||
}
|
||||
defer fgl.cleanup()
|
||||
|
||||
for fn, mtime := range tc.fileMtime {
|
||||
if err := fgl.chMtime(fn, mtime); err != nil {
|
||||
t.Fatalf("failed to change the mtime of %q: %v", fn, err)
|
||||
}
|
||||
}
|
||||
|
||||
for pkg, stale := range tc.pkgStaleness {
|
||||
if err := fgl.chStale(pkg, stale); err != nil {
|
||||
t.Fatalf("failed to change the staleness of %q: %v", pkg, err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.result != isTestStale(fgl, fgl.binfile, testPkg) {
|
||||
if tc.result {
|
||||
t.Errorf("Expected test package %q to be stale", testPkg)
|
||||
} else {
|
||||
t.Errorf("Expected test package %q to be not stale", testPkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
34
vendor/k8s.io/kubernetes/hack/dev-build-and-push.sh
generated
vendored
Executable file
34
vendor/k8s.io/kubernetes/hack/dev-build-and-push.sh
generated
vendored
Executable file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will build a dev release and push it to an existing cluster.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# Build a dev release
|
||||
make -f ${KUBE_ROOT}/Makefile quick-release
|
||||
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Building a release failed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now push this out to the cluster
|
||||
"${KUBE_ROOT}/cluster/kube-push.sh"
|
35
vendor/k8s.io/kubernetes/hack/dev-build-and-up.sh
generated
vendored
Executable file
35
vendor/k8s.io/kubernetes/hack/dev-build-and-up.sh
generated
vendored
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will build a dev release and bring up a new cluster with that
|
||||
# release.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# Build a dev release
|
||||
make -f ${KUBE_ROOT}/Makefile quick-release
|
||||
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Building the release failed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now bring a new cluster up with that release.
|
||||
"${KUBE_ROOT}/cluster/kube-up.sh"
|
49
vendor/k8s.io/kubernetes/hack/dev-push-hyperkube.sh
generated
vendored
Executable file
49
vendor/k8s.io/kubernetes/hack/dev-push-hyperkube.sh
generated
vendored
Executable file
@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script builds hyperkube and then the hyperkube image.
|
||||
# REGISTRY and VERSION must be set.
|
||||
# Example usage:
|
||||
# $ export REGISTRY=gcr.io/someone
|
||||
# $ export VERSION=v1.4.0-testfix
|
||||
# ./hack/dev-push-hyperkube.sh
|
||||
# That will build and push gcr.io/someone/hyperkube-amd64:v1.4.0-testfix
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT="$(dirname "${BASH_SOURCE}")/.."
|
||||
source "${KUBE_ROOT}/build/common.sh"
|
||||
|
||||
if [[ -z "${REGISTRY:-}" ]]; then
|
||||
echo "REGISTRY must be set"
|
||||
exit -1
|
||||
fi
|
||||
if [[ -z "${VERSION:-}" ]]; then
|
||||
echo "VERSION must be set"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
IMAGE="${REGISTRY}/hyperkube-amd64:${VERSION}"
|
||||
|
||||
kube::build::verify_prereqs
|
||||
kube::build::build_image
|
||||
kube::build::run_build_command make WHAT=cmd/hyperkube
|
||||
kube::build::copy_output
|
||||
|
||||
make -C "${KUBE_ROOT}/cluster/images/hyperkube" build
|
||||
docker push "${IMAGE}"
|
14
vendor/k8s.io/kubernetes/hack/e2e-internal/BUILD
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/hack/e2e-internal/BUILD
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
33
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-cluster-size.sh
generated
vendored
Executable file
33
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-cluster-size.sh
generated
vendored
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
|
||||
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
|
||||
: ${KUBE_CONFIG_FILE:="config-test.sh"}
|
||||
|
||||
export KUBECTL KUBE_CONFIG_FILE
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
prepare-e2e
|
||||
|
||||
#TODO(colhom): spec and implement federated version of this
|
||||
${KUBECTL} get nodes --no-headers | wc -l
|
32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-down.sh
generated
vendored
Executable file
32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-down.sh
generated
vendored
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
|
||||
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
|
||||
: ${KUBE_CONFIG_FILE:="config-test.sh"}
|
||||
|
||||
export KUBECTL KUBE_CONFIG_FILE
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
prepare-e2e
|
||||
|
||||
test-teardown
|
35
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-grow-cluster.sh
generated
vendored
Executable file
35
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-grow-cluster.sh
generated
vendored
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
|
||||
if [[ ! -z "${1:-}" ]]; then
|
||||
export KUBE_GCE_ZONE="${1}"
|
||||
fi
|
||||
if [[ ! -z "${2:-}" ]]; then
|
||||
export MULTIZONE="${2}"
|
||||
fi
|
||||
if [[ ! -z "${3:-}" ]]; then
|
||||
export KUBE_REPLICATE_EXISTING_MASTER="${3}"
|
||||
fi
|
||||
if [[ ! -z "${4:-}" ]]; then
|
||||
export KUBE_USE_EXISTING_MASTER="${4}"
|
||||
fi
|
||||
if [[ -z "${NUM_NODES:-}" ]]; then
|
||||
export NUM_NODES=3
|
||||
fi
|
||||
|
||||
source "${KUBE_ROOT}/hack/e2e-internal/e2e-up.sh"
|
33
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-shrink-cluster.sh
generated
vendored
Executable file
33
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-shrink-cluster.sh
generated
vendored
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
|
||||
if [[ ! -z "${1:-}" ]]; then
|
||||
export KUBE_GCE_ZONE="${1}"
|
||||
fi
|
||||
if [[ ! -z "${2:-}" ]]; then
|
||||
export MULTIZONE="${2}"
|
||||
fi
|
||||
if [[ ! -z "${3:-}" ]]; then
|
||||
export KUBE_DELETE_NODES="${3}"
|
||||
fi
|
||||
if [[ ! -z "${4:-}" ]]; then
|
||||
export KUBE_USE_EXISTING_MASTER="${4}"
|
||||
fi
|
||||
|
||||
source "${KUBE_ROOT}/hack/e2e-internal/e2e-down.sh"
|
||||
|
32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-status.sh
generated
vendored
Executable file
32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-status.sh
generated
vendored
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
|
||||
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
|
||||
: ${KUBE_CONFIG_FILE:="config-test.sh"}
|
||||
|
||||
export KUBECTL KUBE_CONFIG_FILE
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
prepare-e2e
|
||||
|
||||
${KUBECTL} version
|
32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-up.sh
generated
vendored
Executable file
32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-up.sh
generated
vendored
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
|
||||
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
|
||||
: ${KUBE_CONFIG_FILE:="config-test.sh"}
|
||||
|
||||
export KUBECTL KUBE_CONFIG_FILE
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
prepare-e2e
|
||||
|
||||
test-setup
|
43
vendor/k8s.io/kubernetes/hack/e2e-node-test.sh
generated
vendored
Executable file
43
vendor/k8s.io/kubernetes/hack/e2e-node-test.sh
generated
vendored
Executable file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is a vestigial redirection. Please do not add "real" logic.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# For help output
|
||||
ARGHELP=""
|
||||
if [[ -n "${FOCUS:-}" ]]; then
|
||||
ARGHELP="FOCUS='${FOCUS}' "
|
||||
fi
|
||||
if [[ -n "${SKIP:-}" ]]; then
|
||||
ARGHELP="${ARGHELP}SKIP='${SKIP}'"
|
||||
fi
|
||||
|
||||
echo "NOTE: $0 has been replaced by 'make test-e2e-node'"
|
||||
echo
|
||||
echo "This script supports a number of parameters passed as environment variables."
|
||||
echo "Please see the Makefile for more details."
|
||||
echo
|
||||
echo "The equivalent of this invocation is: "
|
||||
echo " make test-e2e-node ${ARGHELP}"
|
||||
echo
|
||||
echo
|
||||
make --no-print-directory -C "${KUBE_ROOT}" test-e2e-node FOCUS=${FOCUS:-} SKIP=${SKIP:-}
|
168
vendor/k8s.io/kubernetes/hack/e2e.go
generated
vendored
Normal file
168
vendor/k8s.io/kubernetes/hack/e2e.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// User-interface for test-infra/kubetest/e2e.go
|
||||
// Equivalent to go get -u k8s.io/test-infra/kubetest && kubetest "${@}"
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type flags struct {
|
||||
get bool
|
||||
old time.Duration
|
||||
args []string
|
||||
}
|
||||
|
||||
const (
|
||||
getDefault = true
|
||||
oldDefault = 24 * time.Hour
|
||||
)
|
||||
|
||||
func parse(args []string) (flags, error) {
|
||||
fs := flag.NewFlagSet(args[0], flag.ContinueOnError)
|
||||
get := fs.Bool("get", getDefault, "go get -u kubetest if old or not installed")
|
||||
old := fs.Duration("old", oldDefault, "Consider kubetest old if it exceeds this")
|
||||
var a []string
|
||||
if err := fs.Parse(args[1:]); err == flag.ErrHelp {
|
||||
os.Stderr.WriteString(" -- kubetestArgs\n")
|
||||
os.Stderr.WriteString(" All flags after -- are passed to the kubetest program\n")
|
||||
return flags{}, err
|
||||
} else if err != nil {
|
||||
log.Print("NOTICE: go run hack/e2e.go is now a shim for test-infra/kubetest")
|
||||
log.Printf(" Usage: go run hack/e2e.go [--get=%v] [--old=%v] -- [KUBETEST_ARGS]", getDefault, oldDefault)
|
||||
log.Print(" The separator is required to use --get or --old flags")
|
||||
log.Print(" The -- flag separator also suppresses this message")
|
||||
a = args[len(args)-fs.NArg()-1:]
|
||||
} else {
|
||||
a = fs.Args()
|
||||
}
|
||||
return flags{*get, *old, a}, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
f, err := parse(os.Args)
|
||||
if err != nil {
|
||||
os.Exit(2)
|
||||
}
|
||||
t := newTester()
|
||||
k, err := t.getKubetest(f.get, f.old)
|
||||
if err != nil {
|
||||
log.Fatalf("err: %v", err)
|
||||
}
|
||||
log.Printf("Calling kubetest %v...", strings.Join(f.args, " "))
|
||||
if err = t.wait(k, f.args...); err != nil {
|
||||
log.Fatalf("err: %v", err)
|
||||
}
|
||||
log.Print("Done")
|
||||
}
|
||||
|
||||
func wait(cmd string, args ...string) error {
|
||||
sigChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChannel, os.Interrupt)
|
||||
|
||||
c := exec.Command(cmd, args...)
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
sig := <-sigChannel
|
||||
if err := c.Process.Signal(sig); err != nil {
|
||||
log.Fatalf("could not send %s signal %s: %v", cmd, sig, err)
|
||||
}
|
||||
}()
|
||||
return c.Wait()
|
||||
}
|
||||
|
||||
// Struct that allows unit tests to override functionality.
|
||||
type tester struct {
|
||||
// os.Stat
|
||||
stat func(string) (os.FileInfo, error)
|
||||
// exec.LookPath
|
||||
lookPath func(string) (string, error)
|
||||
// build.Default.GOPATH
|
||||
goPath string
|
||||
wait func(string, ...string) error
|
||||
}
|
||||
|
||||
func newTester() tester {
|
||||
return tester{os.Stat, exec.LookPath, build.Default.GOPATH, wait}
|
||||
}
|
||||
|
||||
// Try to find kubetest, either GOPATH/bin/kubetest or PATH
|
||||
func (t tester) lookKubetest() (string, error) {
|
||||
// Check for kubetest in GOPATH/bin
|
||||
if t.goPath != "" {
|
||||
p := filepath.Join(t.goPath, "bin", "kubetest")
|
||||
_, err := t.stat(p)
|
||||
if err == nil {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check for kubetest in PATH
|
||||
p, err := t.lookPath("kubetest")
|
||||
return p, err
|
||||
}
|
||||
|
||||
// Upgrade if kubetest does not exist or has not been updated today
|
||||
func (t tester) getKubetest(get bool, old time.Duration) (string, error) {
|
||||
// Find kubetest installation
|
||||
p, err := t.lookKubetest()
|
||||
if err == nil && !get {
|
||||
return p, nil // Installed, Skip update
|
||||
}
|
||||
if err == nil {
|
||||
// Installed recently?
|
||||
if s, err := t.stat(p); err != nil {
|
||||
return p, err // Cannot stat
|
||||
} else if time.Since(s.ModTime()) <= old {
|
||||
return p, nil // Recently updated
|
||||
} else if t.goPath == "" {
|
||||
log.Print("Skipping kubetest upgrade because $GOPATH is empty")
|
||||
return p, nil
|
||||
}
|
||||
log.Printf("The kubetest binary is older than %s.", old)
|
||||
}
|
||||
if t.goPath == "" {
|
||||
return "", fmt.Errorf("Cannot install kubetest until $GOPATH is set")
|
||||
}
|
||||
log.Print("Updating kubetest binary...")
|
||||
cmd := []string{"go", "get", "-u", "k8s.io/test-infra/kubetest"}
|
||||
if err = t.wait(cmd[0], cmd[1:]...); err != nil {
|
||||
return "", fmt.Errorf("%s: %v", strings.Join(cmd, " "), err) // Could not upgrade
|
||||
}
|
||||
if p, err = t.lookKubetest(); err != nil {
|
||||
return "", err // Cannot find kubetest
|
||||
} else if err = t.wait("touch", p); err != nil {
|
||||
return "", err // Could not touch
|
||||
} else {
|
||||
return p, nil // Updated modtime
|
||||
}
|
||||
}
|
363
vendor/k8s.io/kubernetes/hack/e2e_test.go
generated
vendored
Normal file
363
vendor/k8s.io/kubernetes/hack/e2e_test.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Unit tests for hack/e2e.go shim
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FileInfo struct {
|
||||
when time.Time
|
||||
}
|
||||
|
||||
func (f FileInfo) Name() string {
|
||||
return "fake-file"
|
||||
}
|
||||
|
||||
func (f FileInfo) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f FileInfo) Mode() os.FileMode {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f FileInfo) ModTime() time.Time {
|
||||
return f.when
|
||||
}
|
||||
|
||||
func (f FileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f FileInfo) Sys() interface{} {
|
||||
return f
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
cases := []struct {
|
||||
args []string
|
||||
expected flags
|
||||
err error
|
||||
}{
|
||||
{
|
||||
[]string{"hello", "world"},
|
||||
flags{getDefault, oldDefault, []string{"world"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"hello", "--", "--venus", "--karaoke"},
|
||||
flags{getDefault, oldDefault, []string{"--venus", "--karaoke"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"hello", "--alpha", "--beta"},
|
||||
flags{getDefault, oldDefault, []string{"--alpha", "--beta"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"so", "--get", "--boo"},
|
||||
flags{true, oldDefault, []string{"--boo"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"omg", "--get=false", "--", "ugh"},
|
||||
flags{false, oldDefault, []string{"ugh"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"wee", "--old=5m", "--get"},
|
||||
flags{true, 5 * time.Minute, []string{}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"fun", "--times", "--old=666s"},
|
||||
flags{getDefault, oldDefault, []string{"--times", "--old=666s"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"wut", "-h"},
|
||||
flags{},
|
||||
flag.ErrHelp,
|
||||
},
|
||||
{
|
||||
[]string{"wut", "--", "-h"},
|
||||
flags{getDefault, oldDefault, []string{"-h"}},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
a, err := parse(c.args)
|
||||
if err != c.err {
|
||||
t.Errorf("%d: a=%v != e%v", i, err, c.err)
|
||||
}
|
||||
e := c.expected
|
||||
if a.get != e.get {
|
||||
t.Errorf("%d: a=%v != e=%v", i, a.get, e.get)
|
||||
}
|
||||
if a.old != e.old {
|
||||
t.Errorf("%d: a=%v != e=%v", i, a.old, e.old)
|
||||
}
|
||||
if !reflect.DeepEqual(a.args, e.args) {
|
||||
t.Errorf("%d: a=%v != e=%v", i, a.args, e.args)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLook(t *testing.T) {
|
||||
lpf := errors.New("LookPath failed")
|
||||
sf := errors.New("Stat failed")
|
||||
lpnc := errors.New("LookPath should not be called")
|
||||
snc := errors.New("Stat should not be called")
|
||||
cases := []struct {
|
||||
stat error
|
||||
lookPath error
|
||||
goPath string
|
||||
expected error
|
||||
}{
|
||||
{ // GOPATH set, stat succeeds returns gopath
|
||||
stat: nil,
|
||||
lookPath: lpnc,
|
||||
goPath: "fake-gopath/",
|
||||
expected: nil,
|
||||
},
|
||||
{ // GOPATH set, stat fails, terms on lookpath
|
||||
stat: sf,
|
||||
lookPath: lpf,
|
||||
goPath: "fake-gopath/",
|
||||
expected: lpf,
|
||||
},
|
||||
{ // GOPATH unset, stat not called, terms on lookpath
|
||||
stat: snc,
|
||||
lookPath: lpf,
|
||||
goPath: "",
|
||||
expected: lpf,
|
||||
},
|
||||
{ // GOPATH unset, stat not called, lookpath matches
|
||||
stat: snc,
|
||||
lookPath: nil,
|
||||
goPath: "",
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
l := tester{
|
||||
func(string) (os.FileInfo, error) {
|
||||
return FileInfo{}, c.stat
|
||||
},
|
||||
func(string) (string, error) {
|
||||
if c.lookPath != nil {
|
||||
return "FAILED", c.lookPath
|
||||
}
|
||||
return "$PATH-FOUND", nil
|
||||
},
|
||||
c.goPath,
|
||||
nil, // wait
|
||||
}
|
||||
if _, err := l.lookKubetest(); err != c.expected {
|
||||
t.Errorf("err: %s != %s", err, c.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetKubetest(t *testing.T) {
|
||||
gp := "fake-gopath"
|
||||
gpk := filepath.Join(gp, "bin", "kubetest")
|
||||
p := "PATH"
|
||||
pk := filepath.Join(p, "kubetest")
|
||||
eu := errors.New("upgrade failed")
|
||||
euVerbose := fmt.Errorf("go get -u k8s.io/test-infra/kubetest: %v", eu)
|
||||
et := errors.New("touch failed")
|
||||
cases := []struct {
|
||||
name string
|
||||
get bool
|
||||
old time.Duration
|
||||
|
||||
stat string // stat succeeds on this file
|
||||
path bool // file exists on path
|
||||
age time.Duration // age of mod time on file
|
||||
upgraded bool // go get -u succeeds
|
||||
touched bool // touch succeeds
|
||||
goPath string // GOPATH var
|
||||
|
||||
returnPath string
|
||||
returnError error
|
||||
}{
|
||||
{name: "0: Pass when on GOPATH/bin",
|
||||
get: false,
|
||||
old: 0,
|
||||
|
||||
stat: gpk,
|
||||
path: false,
|
||||
age: 100,
|
||||
upgraded: false,
|
||||
touched: false,
|
||||
goPath: gp,
|
||||
|
||||
returnPath: gpk,
|
||||
returnError: nil,
|
||||
},
|
||||
{name: "1: Pass when on PATH",
|
||||
get: false,
|
||||
old: 0,
|
||||
|
||||
stat: pk,
|
||||
path: true,
|
||||
age: 100,
|
||||
upgraded: false,
|
||||
touched: false,
|
||||
goPath: gp,
|
||||
|
||||
returnPath: pk,
|
||||
returnError: nil,
|
||||
},
|
||||
{name: "2: Don't upgrade if on PATH and GOPATH is ''",
|
||||
get: true,
|
||||
old: 0,
|
||||
|
||||
stat: pk,
|
||||
path: true,
|
||||
age: 100,
|
||||
upgraded: false,
|
||||
touched: false,
|
||||
goPath: "",
|
||||
|
||||
returnPath: pk,
|
||||
returnError: nil,
|
||||
},
|
||||
{name: "3: Don't upgrade on PATH when young.",
|
||||
get: true,
|
||||
old: time.Hour,
|
||||
|
||||
stat: pk,
|
||||
path: true,
|
||||
age: time.Second,
|
||||
upgraded: false,
|
||||
touched: false,
|
||||
goPath: gp,
|
||||
|
||||
returnPath: pk,
|
||||
returnError: nil,
|
||||
},
|
||||
{name: "4: Upgrade if old but GOPATH is set.",
|
||||
get: true,
|
||||
old: 0,
|
||||
|
||||
stat: pk,
|
||||
path: true,
|
||||
age: time.Second,
|
||||
upgraded: true,
|
||||
touched: true,
|
||||
goPath: gp,
|
||||
|
||||
returnPath: pk,
|
||||
returnError: nil,
|
||||
},
|
||||
{name: "5: Fail if upgrade fails",
|
||||
get: true,
|
||||
old: 0,
|
||||
|
||||
stat: pk,
|
||||
path: true,
|
||||
age: time.Second,
|
||||
upgraded: false,
|
||||
touched: false,
|
||||
goPath: gpk,
|
||||
|
||||
returnPath: "",
|
||||
returnError: euVerbose,
|
||||
},
|
||||
{name: "6: Fail if touch fails",
|
||||
get: true,
|
||||
old: 0,
|
||||
|
||||
stat: pk,
|
||||
path: true,
|
||||
age: time.Second,
|
||||
upgraded: true,
|
||||
touched: false,
|
||||
goPath: gpk,
|
||||
|
||||
returnPath: "",
|
||||
returnError: et,
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
didUp := false
|
||||
didTouch := false
|
||||
l := tester{
|
||||
stat: func(p string) (os.FileInfo, error) {
|
||||
// stat
|
||||
if p != c.stat {
|
||||
return nil, fmt.Errorf("Failed to find %s", p)
|
||||
}
|
||||
return FileInfo{time.Now().Add(c.age * -1)}, nil
|
||||
},
|
||||
lookPath: func(name string) (string, error) {
|
||||
if c.path {
|
||||
return filepath.Join(p, name), nil
|
||||
}
|
||||
return "", fmt.Errorf("Not on path: %s", name)
|
||||
},
|
||||
goPath: c.goPath,
|
||||
wait: func(cmd string, args ...string) error {
|
||||
if cmd == "go" {
|
||||
if c.upgraded {
|
||||
didUp = true
|
||||
return nil
|
||||
}
|
||||
return eu
|
||||
}
|
||||
if c.touched {
|
||||
didTouch = true
|
||||
return nil
|
||||
}
|
||||
return et
|
||||
},
|
||||
}
|
||||
p, e := l.getKubetest(c.get, c.old)
|
||||
if p != c.returnPath {
|
||||
t.Errorf("%d: test=%q returnPath %q != %q", i, c.name, p, c.returnPath)
|
||||
}
|
||||
if e == nil || c.returnError == nil {
|
||||
if e != c.returnError {
|
||||
t.Errorf("%d: test=%q returnError %q != %q", i, c.name, e, c.returnError)
|
||||
}
|
||||
} else {
|
||||
if e.Error() != c.returnError.Error() {
|
||||
t.Errorf("%d: test=%q returnError %q != %q", i, c.name, e, c.returnError)
|
||||
}
|
||||
}
|
||||
if didUp != c.upgraded {
|
||||
t.Errorf("%d: test=%q bad upgrade state of %v", i, c.name, didUp)
|
||||
}
|
||||
if didTouch != c.touched {
|
||||
t.Errorf("%d: test=%q bad touch state of %v", i, c.name, didTouch)
|
||||
}
|
||||
}
|
||||
}
|
45
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/Dockerfile
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM java:7-jre
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
asciidoctor \
|
||||
unzip \
|
||||
--no-install-recommends \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install gradle
|
||||
RUN wget -O /tmp/gradle.zip https://services.gradle.org/distributions/gradle-2.5-bin.zip \
|
||||
&& mkdir -p build/ \
|
||||
&& unzip /tmp/gradle.zip -d build/ \
|
||||
&& rm /tmp/gradle.zip \
|
||||
&& mkdir -p gradle-cache/
|
||||
|
||||
ENV GRADLE_USER_HOME=/gradle-cache
|
||||
|
||||
COPY build.gradle build/
|
||||
COPY gen-swagger-docs.sh build/
|
||||
|
||||
# Run the script once to download the dependent java libraries into the image
|
||||
RUN mkdir -p /output /swagger-source \
|
||||
&& wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/swagger-spec/v1.json -O /swagger-source/v1.json \
|
||||
&& wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/pkg/api/v1/register.go -O /register.go \
|
||||
&& build/gen-swagger-docs.sh v1 \
|
||||
&& rm -rf /output/* /swagger-source/* /register.go
|
||||
|
||||
RUN chmod -R 777 build/ \
|
||||
&& chmod -R 777 gradle-cache/
|
||||
|
||||
ENTRYPOINT ["build/gen-swagger-docs.sh"]
|
18
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/README.md
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/README.md
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
This folder contains the sources needed to build the gen-swagger-doc container.
|
||||
|
||||
To build the container image,
|
||||
|
||||
```
|
||||
$ sudo docker build -t gcr.io/google_containers/gen-swagger-docs:v1 .
|
||||
```
|
||||
|
||||
To generate the html docs,
|
||||
|
||||
```
|
||||
$ ./gen-swagger-docs.sh <API version> <absolute output path, default to PWD>
|
||||
```
|
||||
|
||||
The generated definitions.html and operations.html will be stored in output paths.
|
||||
|
||||
|
||||
[]()
|
18
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/build.gradle
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/build.gradle
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
buildscript {
|
||||
repositories {
|
||||
mavenLocal()
|
||||
jcenter()
|
||||
}
|
||||
|
||||
dependencies {
|
||||
classpath 'io.github.robwin:swagger2markup:0.6.0'
|
||||
}
|
||||
}
|
||||
|
||||
task gendocs << {
|
||||
io.github.robwin.swagger2markup.Swagger2MarkupConverter
|
||||
.from("./input.json")
|
||||
.build()
|
||||
.intoFolder("./");
|
||||
println '*** generating docs to ./'
|
||||
}
|
5982
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/example-output/definitions.html
generated
vendored
Normal file
5982
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/example-output/definitions.html
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
23842
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/example-output/operations.html
generated
vendored
Normal file
23842
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/example-output/operations.html
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
69
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/gen-swagger-docs.sh
generated
vendored
Executable file
69
vendor/k8s.io/kubernetes/hack/gen-swagger-doc/gen-swagger-docs.sh
generated
vendored
Executable file
@ -0,0 +1,69 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script to generate docs from the latest swagger spec.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
cd /build
|
||||
|
||||
# gendocs takes "input.json" as the input swagger spec.
|
||||
# $1 is expected to be <group>_<version>
|
||||
cp /swagger-source/"$1".json input.json
|
||||
|
||||
./gradle-2.5/bin/gradle gendocs --info
|
||||
|
||||
#insert a TOC for top level API objects
|
||||
buf="== Top Level API Objects\n\n"
|
||||
top_level_models=$(grep '&[A-Za-z]*{},' /register.go | sed 's/.*&//;s/{},//')
|
||||
|
||||
# check if the top level models exist in the definitions.adoc. If they exist,
|
||||
# their name will be <version>.<model_name>
|
||||
VERSION="${1#*_}"
|
||||
for m in $top_level_models
|
||||
do
|
||||
if grep -xq "=== ${VERSION}.$m" ./definitions.adoc
|
||||
then
|
||||
buf+="* <<${VERSION}.$m>>\n"
|
||||
fi
|
||||
done
|
||||
sed -i "1i $buf" ./definitions.adoc
|
||||
|
||||
# fix the links in .adoc, replace <<x.y>> with link:definitions.html#_x_y[x.y], and lowercase the _x_y part
|
||||
sed -i -e 's|<<\(.*\)\.\(.*\)>>|link:#_\L\1_\2\E[\1.\2]|g' ./definitions.adoc
|
||||
sed -i -e 's|<<\(.*\)\.\(.*\)>>|link:../definitions#_\L\1_\2\E[\1.\2]|g' ./paths.adoc
|
||||
|
||||
# fix the link to <<any>>
|
||||
sed -i -e 's|<<any>>|link:#_any[any]|g' ./definitions.adoc
|
||||
sed -i -e 's|<<any>>|link:../definitions#_any[any]|g' ./paths.adoc
|
||||
|
||||
# change the title of paths.adoc from "paths" to "operations"
|
||||
sed -i 's|== Paths|== Operations|g' ./paths.adoc
|
||||
|
||||
# $$ has special meaning in asciidoc, we need to escape it
|
||||
sed -i 's|\$\$|+++$$+++|g' ./definitions.adoc
|
||||
|
||||
echo -e "=== any\nRepresents an untyped JSON map - see the description of the field for more info about the structure of this object." >> ./definitions.adoc
|
||||
|
||||
asciidoctor definitions.adoc
|
||||
asciidoctor paths.adoc
|
||||
|
||||
cp definitions.html /output/
|
||||
cp paths.html /output/operations.html
|
||||
|
||||
echo "SUCCESS"
|
87
vendor/k8s.io/kubernetes/hack/generate-bindata.sh
generated
vendored
Executable file
87
vendor/k8s.io/kubernetes/hack/generate-bindata.sh
generated
vendored
Executable file
@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
if [[ -z "${KUBE_ROOT:-}" ]]; then
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
fi
|
||||
|
||||
source "${KUBE_ROOT}/cluster/lib/logging.sh"
|
||||
|
||||
if [[ ! -d "${KUBE_ROOT}/examples" ]]; then
|
||||
echo "${KUBE_ROOT}/examples not detected. This script should be run from a location where the source dirs are available."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# kube::golang::build_kube_toolchain installs the vendored go-bindata in
|
||||
# $GOPATH/bin, so make sure that's explicitly part of our $PATH.
|
||||
export PATH="${GOPATH}/bin:${PATH}"
|
||||
|
||||
if ! which go-bindata &>/dev/null ; then
|
||||
echo "Cannot find go-bindata."
|
||||
exit 5
|
||||
fi
|
||||
|
||||
# run the generation from the root directory for stable output
|
||||
pushd "${KUBE_ROOT}"
|
||||
|
||||
# These are files for e2e tests.
|
||||
BINDATA_OUTPUT="test/e2e/generated/bindata.go"
|
||||
go-bindata -nometadata -o "${BINDATA_OUTPUT}.tmp" -pkg generated \
|
||||
-ignore .jpg -ignore .png -ignore .md \
|
||||
"examples/..." \
|
||||
"test/e2e/testing-manifests/..." \
|
||||
"test/images/..." \
|
||||
"test/fixtures/..."
|
||||
|
||||
gofmt -s -w "${BINDATA_OUTPUT}.tmp"
|
||||
|
||||
# Here we compare and overwrite only if different to avoid updating the
|
||||
# timestamp and triggering a rebuild. The 'cat' redirect trick to preserve file
|
||||
# permissions of the target file.
|
||||
if ! cmp -s "${BINDATA_OUTPUT}.tmp" "${BINDATA_OUTPUT}" ; then
|
||||
cat "${BINDATA_OUTPUT}.tmp" > "${BINDATA_OUTPUT}"
|
||||
V=2 kube::log::info "Generated bindata file : ${BINDATA_OUTPUT} has $(wc -l ${BINDATA_OUTPUT}) lines of lovely automated artifacts"
|
||||
else
|
||||
V=2 kube::log::info "No changes in generated bindata file: ${BINDATA_OUTPUT}"
|
||||
fi
|
||||
|
||||
rm -f "${BINDATA_OUTPUT}.tmp"
|
||||
|
||||
# These are files for runtime code
|
||||
BINDATA_OUTPUT="pkg/generated/bindata.go"
|
||||
go-bindata -nometadata -nocompress -o "${BINDATA_OUTPUT}.tmp" -pkg generated \
|
||||
-ignore .jpg -ignore .png -ignore .md \
|
||||
"translations/..."
|
||||
|
||||
gofmt -s -w "${BINDATA_OUTPUT}.tmp"
|
||||
|
||||
# Here we compare and overwrite only if different to avoid updating the
|
||||
# timestamp and triggering a rebuild. The 'cat' redirect trick to preserve file
|
||||
# permissions of the target file.
|
||||
if ! cmp -s "${BINDATA_OUTPUT}.tmp" "${BINDATA_OUTPUT}" ; then
|
||||
cat "${BINDATA_OUTPUT}.tmp" > "${BINDATA_OUTPUT}"
|
||||
V=2 kube::log::info "Generated bindata file : ${BINDATA_OUTPUT} has $(wc -l ${BINDATA_OUTPUT}) lines of lovely automated artifacts"
|
||||
else
|
||||
V=2 kube::log::info "No changes in generated bindata file: ${BINDATA_OUTPUT}"
|
||||
fi
|
||||
|
||||
rm -f "${BINDATA_OUTPUT}.tmp"
|
||||
|
||||
popd
|
49
vendor/k8s.io/kubernetes/hack/generate-docs.sh
generated
vendored
Executable file
49
vendor/k8s.io/kubernetes/hack/generate-docs.sh
generated
vendored
Executable file
@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This file is not intended to be run automatically. It is meant to be run
|
||||
# immediately before exporting docs. We do not want to check these documents in
|
||||
# by default.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::setup_env
|
||||
|
||||
BINS=(
|
||||
cmd/gendocs
|
||||
cmd/genkubedocs
|
||||
cmd/genman
|
||||
cmd/genyaml
|
||||
)
|
||||
make -C "${KUBE_ROOT}" WHAT="${BINS[*]}"
|
||||
|
||||
kube::util::ensure-temp-dir
|
||||
|
||||
kube::util::gen-docs "${KUBE_TEMP}"
|
||||
|
||||
# remove all of the old docs
|
||||
kube::util::remove-gen-docs
|
||||
|
||||
# copy fresh docs into the repo.
|
||||
# the shopt is so that we get docs/.generated_docs from the glob.
|
||||
shopt -s dotglob
|
||||
cp -af "${KUBE_TEMP}"/* "${KUBE_ROOT}"
|
||||
shopt -u dotglob
|
83
vendor/k8s.io/kubernetes/hack/get-build.sh
generated
vendored
Executable file
83
vendor/k8s.io/kubernetes/hack/get-build.sh
generated
vendored
Executable file
@ -0,0 +1,83 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
declare -r KUBE_RELEASE_BUCKET_URL="https://storage.googleapis.com/kubernetes-release"
|
||||
declare -r KUBE_DEV_RELEASE_BUCKET_URL="https://storage.googleapis.com/kubernetes-release-dev"
|
||||
declare -r KUBE_TAR_NAME="kubernetes.tar.gz"
|
||||
|
||||
usage() {
|
||||
echo "${0} [-v] <version number or publication>"
|
||||
echo " -v: Don't get tars, just print the version number"
|
||||
echo ""
|
||||
echo ' Version number or publication is either a proper version number'
|
||||
echo ' (e.g. "v1.0.6", "v1.2.0-alpha.1.881+376438b69c7612") or a version'
|
||||
echo ' publication of the form <bucket>/<version> (e.g. "release/stable",'
|
||||
echo ' "ci/latest-1"). Some common ones are:'
|
||||
echo ' - "release/stable"'
|
||||
echo ' - "release/latest"'
|
||||
echo ' - "ci/latest"'
|
||||
echo ' See the docs on getting builds for more information about version'
|
||||
echo ' publication.'
|
||||
}
|
||||
|
||||
print_version=false
|
||||
|
||||
while getopts ":vh" opt; do
|
||||
case ${opt} in
|
||||
v)
|
||||
print_version="true"
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set_binary_version "${1}"
|
||||
|
||||
if [[ "${print_version}" == "true" ]]; then
|
||||
echo "${KUBE_VERSION}"
|
||||
else
|
||||
echo "Using version at ${1}: ${KUBE_VERSION}" >&2
|
||||
if [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
|
||||
curl --fail -o "kubernetes-${KUBE_VERSION}.tar.gz" "${KUBE_RELEASE_BUCKET_URL}/release/${KUBE_VERSION}/${KUBE_TAR_NAME}"
|
||||
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
|
||||
curl --fail -o "kubernetes-${KUBE_VERSION}.tar.gz" "${KUBE_DEV_RELEASE_BUCKET_URL}/ci/${KUBE_VERSION}/${KUBE_TAR_NAME}"
|
||||
else
|
||||
echo "Version doesn't match regexp" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
158
vendor/k8s.io/kubernetes/hack/ginkgo-e2e.sh
generated
vendored
Executable file
158
vendor/k8s.io/kubernetes/hack/ginkgo-e2e.sh
generated
vendored
Executable file
@ -0,0 +1,158 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# Find the ginkgo binary build as part of the release.
|
||||
ginkgo=$(kube::util::find-binary "ginkgo")
|
||||
e2e_test=$(kube::util::find-binary "e2e.test")
|
||||
|
||||
# --- Setup some env vars.
|
||||
|
||||
GINKGO_PARALLEL=${GINKGO_PARALLEL:-n} # set to 'y' to run tests in parallel
|
||||
CLOUD_CONFIG=${CLOUD_CONFIG:-""}
|
||||
|
||||
# If 'y', Ginkgo's reporter will not print out in color when tests are run
|
||||
# in parallel
|
||||
GINKGO_NO_COLOR=${GINKGO_NO_COLOR:-n}
|
||||
|
||||
# If 'y', will rerun failed tests once to give them a second chance.
|
||||
GINKGO_TOLERATE_FLAKES=${GINKGO_TOLERATE_FLAKES:-n}
|
||||
|
||||
: ${KUBECTL:="${KUBE_ROOT}/cluster/kubectl.sh"}
|
||||
: ${KUBE_CONFIG_FILE:="config-test.sh"}
|
||||
|
||||
export KUBECTL KUBE_CONFIG_FILE
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
# ---- Do cloud-provider-specific setup
|
||||
if [[ -n "${KUBERNETES_CONFORMANCE_TEST:-}" ]]; then
|
||||
echo "Conformance test: not doing test setup."
|
||||
KUBERNETES_PROVIDER=${KUBERNETES_CONFORMANCE_PROVIDER:-"skeleton"}
|
||||
|
||||
detect-master-from-kubeconfig
|
||||
|
||||
auth_config=(
|
||||
"--kubeconfig=${KUBECONFIG}"
|
||||
)
|
||||
else
|
||||
echo "Setting up for KUBERNETES_PROVIDER=\"${KUBERNETES_PROVIDER}\"."
|
||||
|
||||
prepare-e2e
|
||||
|
||||
detect-master >/dev/null
|
||||
KUBE_MASTER_URL="${KUBE_MASTER_URL:-https://${KUBE_MASTER_IP:-}}"
|
||||
|
||||
auth_config=(
|
||||
"--kubeconfig=${KUBECONFIG:-$DEFAULT_KUBECONFIG}"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
|
||||
NODE_INSTANCE_GROUP="${NODE_INSTANCE_PREFIX}-group"
|
||||
fi
|
||||
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
|
||||
set_num_migs
|
||||
NODE_INSTANCE_GROUP=""
|
||||
for ((i=1; i<=${NUM_MIGS}; i++)); do
|
||||
if [[ $i == ${NUM_MIGS} ]]; then
|
||||
# We are assigning the same mig names as create-nodes function from cluster/gce/util.sh.
|
||||
NODE_INSTANCE_GROUP="${NODE_INSTANCE_GROUP}${NODE_INSTANCE_PREFIX}-group"
|
||||
else
|
||||
NODE_INSTANCE_GROUP="${NODE_INSTANCE_GROUP}${NODE_INSTANCE_PREFIX}-group-${i},"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# TODO(kubernetes/test-infra#3330): Allow NODE_INSTANCE_GROUP to be
|
||||
# set before we get here, which eliminates any cluster/gke use if
|
||||
# KUBERNETES_CONFORMANCE_PROVIDER is set to "gke".
|
||||
if [[ -z "${NODE_INSTANCE_GROUP:-}" ]] && [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
|
||||
detect-node-instance-groups
|
||||
NODE_INSTANCE_GROUP=$(kube::util::join , "${NODE_INSTANCE_GROUPS[@]}")
|
||||
fi
|
||||
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "azure" ]]; then
|
||||
if [[ ${CLOUD_CONFIG} == "" ]]; then
|
||||
echo "Missing azure cloud config"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
ginkgo_args=()
|
||||
if [[ -n "${CONFORMANCE_TEST_SKIP_REGEX:-}" ]]; then
|
||||
ginkgo_args+=("--skip=${CONFORMANCE_TEST_SKIP_REGEX}")
|
||||
ginkgo_args+=("--seed=1436380640")
|
||||
fi
|
||||
if [[ -n "${GINKGO_PARALLEL_NODES:-}" ]]; then
|
||||
ginkgo_args+=("--nodes=${GINKGO_PARALLEL_NODES}")
|
||||
elif [[ ${GINKGO_PARALLEL} =~ ^[yY]$ ]]; then
|
||||
ginkgo_args+=("--nodes=25")
|
||||
fi
|
||||
|
||||
if [[ "${GINKGO_UNTIL_IT_FAILS:-}" == true ]]; then
|
||||
ginkgo_args+=("--untilItFails=true")
|
||||
fi
|
||||
|
||||
FLAKE_ATTEMPTS=1
|
||||
if [[ "${GINKGO_TOLERATE_FLAKES}" == "y" ]]; then
|
||||
FLAKE_ATTEMPTS=2
|
||||
fi
|
||||
|
||||
if [[ "${GINKGO_NO_COLOR}" == "y" ]]; then
|
||||
ginkgo_args+=("--noColor")
|
||||
fi
|
||||
|
||||
# The --host setting is used only when providing --auth_config
|
||||
# If --kubeconfig is used, the host to use is retrieved from the .kubeconfig
|
||||
# file and the one provided with --host is ignored.
|
||||
# Add path for things like running kubectl binary.
|
||||
export PATH=$(dirname "${e2e_test}"):"${PATH}"
|
||||
"${ginkgo}" "${ginkgo_args[@]:+${ginkgo_args[@]}}" "${e2e_test}" -- \
|
||||
"${auth_config[@]:+${auth_config[@]}}" \
|
||||
--ginkgo.flakeAttempts="${FLAKE_ATTEMPTS}" \
|
||||
--host="${KUBE_MASTER_URL}" \
|
||||
--provider="${KUBERNETES_PROVIDER}" \
|
||||
--gce-project="${PROJECT:-}" \
|
||||
--gce-zone="${ZONE:-}" \
|
||||
--gce-region="${REGION:-}" \
|
||||
--gce-multizone="${MULTIZONE:-false}" \
|
||||
--gke-cluster="${CLUSTER_NAME:-}" \
|
||||
--kube-master="${KUBE_MASTER:-}" \
|
||||
--cluster-tag="${CLUSTER_ID:-}" \
|
||||
--cloud-config-file="${CLOUD_CONFIG:-}" \
|
||||
--repo-root="${KUBE_ROOT}" \
|
||||
--node-instance-group="${NODE_INSTANCE_GROUP:-}" \
|
||||
--prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \
|
||||
--network="${KUBE_GCE_NETWORK:-${KUBE_GKE_NETWORK:-e2e}}" \
|
||||
--node-tag="${NODE_TAG:-}" \
|
||||
--master-tag="${MASTER_TAG:-}" \
|
||||
--cluster-monitoring-mode="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" \
|
||||
${KUBE_CONTAINER_RUNTIME:+"--container-runtime=${KUBE_CONTAINER_RUNTIME}"} \
|
||||
${MASTER_OS_DISTRIBUTION:+"--master-os-distro=${MASTER_OS_DISTRIBUTION}"} \
|
||||
${NODE_OS_DISTRIBUTION:+"--node-os-distro=${NODE_OS_DISTRIBUTION}"} \
|
||||
${NUM_NODES:+"--num-nodes=${NUM_NODES}"} \
|
||||
${E2E_REPORT_DIR:+"--report-dir=${E2E_REPORT_DIR}"} \
|
||||
${E2E_REPORT_PREFIX:+"--report-prefix=${E2E_REPORT_PREFIX}"} \
|
||||
"${@:-}"
|
36
vendor/k8s.io/kubernetes/hack/godep-restore.sh
generated
vendored
Executable file
36
vendor/k8s.io/kubernetes/hack/godep-restore.sh
generated
vendored
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
kube::log::status "Restoring kubernetes godeps"
|
||||
|
||||
if kube::util::godep_restored >/dev/null 2>&1; then
|
||||
kube::log::status "Dependencies appear to be current - skipping download"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
kube::util::ensure_godep_version
|
||||
|
||||
kube::log::status "Downloading dependencies - this might take a while"
|
||||
GOPATH="${GOPATH}:${KUBE_ROOT}/staging" godep restore "$@"
|
||||
kube::log::status "Done"
|
90
vendor/k8s.io/kubernetes/hack/godep-save.sh
generated
vendored
Executable file
90
vendor/k8s.io/kubernetes/hack/godep-save.sh
generated
vendored
Executable file
@ -0,0 +1,90 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
kube::log::status "Ensuring prereqs"
|
||||
kube::util::ensure_single_dir_gopath
|
||||
kube::util::ensure_no_staging_repos_in_gopath
|
||||
|
||||
kube::util::ensure_godep_version
|
||||
|
||||
BACKUP=_tmp/godep-save.$RANDOM
|
||||
mkdir -p "${BACKUP}"
|
||||
|
||||
function kube::godep_save::cleanup() {
|
||||
if [[ -d "${BACKUP}/vendor" ]]; then
|
||||
kube::log::error "${BACKUP}/vendor exists, restoring it"
|
||||
rm -rf vendor
|
||||
mv "${BACKUP}/vendor" vendor
|
||||
fi
|
||||
if [[ -d "${BACKUP}/Godeps" ]]; then
|
||||
kube::log::error "${BACKUP}/Godeps exists, restoring it"
|
||||
rm -rf Godeps
|
||||
mv "${BACKUP}/Godeps" Godeps
|
||||
fi
|
||||
}
|
||||
kube::util::trap_add kube::godep_save::cleanup EXIT
|
||||
|
||||
# Clear old state, but save it in case of error
|
||||
if [[ -d vendor ]]; then
|
||||
mv vendor "${BACKUP}/vendor"
|
||||
fi
|
||||
if [[ -d Godeps ]]; then
|
||||
mv Godeps "${BACKUP}/Godeps"
|
||||
fi
|
||||
|
||||
# Some things we want in godeps aren't code dependencies, so ./...
|
||||
# won't pick them up.
|
||||
REQUIRED_BINS=(
|
||||
"github.com/onsi/ginkgo/ginkgo"
|
||||
"github.com/jteeuwen/go-bindata/go-bindata"
|
||||
"github.com/tools/godep"
|
||||
"./..."
|
||||
)
|
||||
|
||||
kube::log::status "Running godep save - this might take a while"
|
||||
# This uses $(pwd) rather than ${KUBE_ROOT} because KUBE_ROOT will be
|
||||
# realpath'ed, and godep barfs ("... is not using a known version control
|
||||
# system") on our staging dirs.
|
||||
GOPATH="${GOPATH}:$(pwd)/staging" godep save "${REQUIRED_BINS[@]}"
|
||||
|
||||
# create a symlink in vendor directory pointing to the staging client. This
|
||||
# let other packages use the staging client as if it were vendored.
|
||||
for repo in $(ls staging/src/k8s.io); do
|
||||
if [ ! -e "vendor/k8s.io/${repo}" ]; then
|
||||
ln -s "../../staging/src/k8s.io/${repo}" "vendor/k8s.io/${repo}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Workaround broken symlink in docker repo because godep copies the link, but
|
||||
# not the target
|
||||
rm -rf vendor/github.com/docker/docker/project/
|
||||
|
||||
kube::log::status "Updating BUILD files"
|
||||
hack/update-bazel.sh >/dev/null
|
||||
|
||||
kube::log::status "Updating LICENSES file"
|
||||
hack/update-godep-licenses.sh >/dev/null
|
||||
|
||||
# Clean up
|
||||
rm -rf "${BACKUP}"
|
300
vendor/k8s.io/kubernetes/hack/grab-profiles.sh
generated
vendored
Executable file
300
vendor/k8s.io/kubernetes/hack/grab-profiles.sh
generated
vendored
Executable file
@ -0,0 +1,300 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
function grab_profiles_from_component {
|
||||
local requested_profiles=$1
|
||||
local mem_pprof_flags=$2
|
||||
local binary=$3
|
||||
local tunnel_port=$4
|
||||
local path=$5
|
||||
local output_prefix=$6
|
||||
local timestamp=$7
|
||||
|
||||
echo "binary: $binary"
|
||||
|
||||
for profile in ${requested_profiles}; do
|
||||
case ${profile} in
|
||||
cpu)
|
||||
go tool pprof "-pdf" "${binary}" "http://localhost:${tunnel_port}${path}/debug/pprof/profile" > "${output_prefix}-${profile}-profile-${timestamp}.pdf"
|
||||
;;
|
||||
mem)
|
||||
# There are different kinds of memory profiles that are available that
|
||||
# had to be grabbed separately: --inuse-space, --inuse-objects,
|
||||
# --alloc-space, --alloc-objects. We need to iterate over all requested
|
||||
# kinds.
|
||||
for flag in ${mem_pprof_flags}; do
|
||||
go tool pprof "-${flag}" "-pdf" "${binary}" "http://localhost:${tunnel_port}${path}/debug/pprof/heap" > "${output_prefix}-${profile}-${flag}-profile-${timestamp}.pdf"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
server_addr=""
|
||||
kubelet_addreses=""
|
||||
kubelet_binary=""
|
||||
master_binary=""
|
||||
scheduler_binary=""
|
||||
scheduler_port="10251"
|
||||
controller_manager_port="10252"
|
||||
controller_manager_binary=""
|
||||
requested_profiles=""
|
||||
mem_pprof_flags=""
|
||||
profile_components=""
|
||||
output_dir="."
|
||||
tunnel_port="${tunnel_port:-1234}"
|
||||
|
||||
args=$(getopt -o s:mho:k:c -l server:,master,heapster,output:,kubelet:,scheduler,controller-manager,help,inuse-space,inuse-objects,alloc-space,alloc-objects,cpu,kubelet-binary:,master-binary:,scheduler-binary:,controller-manager-binary:,scheduler-port:,controller-manager-port: -- "$@")
|
||||
if [[ $? -ne 0 ]]; then
|
||||
>&2 echo "Error in getopt"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
HEAPSTER_VERSION="v0.18.2"
|
||||
MASTER_PPROF_PATH=""
|
||||
HEAPSTER_PPROF_PATH="/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster"
|
||||
KUBELET_PPROF_PATH_PREFIX="/api/v1/proxy/nodes"
|
||||
SCHEDULER_PPROF_PATH_PREFIX="/api/v1/proxy/namespaces/kube-system/pods/kube-scheduler"
|
||||
CONTROLLER_MANAGER_PPROF_PATH_PREFIX="/api/v1/proxy/namespaces/kube-system/pods/kube-controller-manager"
|
||||
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case $1 in
|
||||
-s|--server)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argument to --server flag"
|
||||
exit 1
|
||||
fi
|
||||
server_addr=$1
|
||||
shift
|
||||
;;
|
||||
-m|--master)
|
||||
shift
|
||||
profile_components="master ${profile_components}"
|
||||
;;
|
||||
--master-binary)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argumet to --master-binary flag"
|
||||
exit 1
|
||||
fi
|
||||
master_binary=$1
|
||||
shift
|
||||
;;
|
||||
-h|--heapster)
|
||||
shift
|
||||
profile_components="heapster ${profile_components}"
|
||||
;;
|
||||
-k|--kubelet)
|
||||
shift
|
||||
profile_components="kubelet ${profile_components}"
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argumet to --kubelet flag"
|
||||
exit 1
|
||||
fi
|
||||
kubelet_addreses="$1 $kubelet_addreses"
|
||||
shift
|
||||
;;
|
||||
--kubelet-binary)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argumet to --kubelet-binary flag"
|
||||
exit 1
|
||||
fi
|
||||
kubelet_binary=$1
|
||||
shift
|
||||
;;
|
||||
--scheduler)
|
||||
shift
|
||||
profile_components="scheduler ${profile_components}"
|
||||
;;
|
||||
--scheduler-binary)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argumet to --scheduler-binary flag"
|
||||
exit 1
|
||||
fi
|
||||
scheduler_binary=$1
|
||||
shift
|
||||
;;
|
||||
--scheduler-port)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argumet to --scheduler-port flag"
|
||||
exit 1
|
||||
fi
|
||||
scheduler_port=$1
|
||||
shift
|
||||
;;
|
||||
-c|--controller-manager)
|
||||
shift
|
||||
profile_components="controller-manager ${profile_components}"
|
||||
;;
|
||||
--controller-manager-binary)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argumet to --controller-manager-binary flag"
|
||||
exit 1
|
||||
fi
|
||||
controller_manager_binary=$1
|
||||
shift
|
||||
;;
|
||||
--controller-manager-port)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argumet to --controller-manager-port flag"
|
||||
exit 1
|
||||
fi
|
||||
controller-managerr_port=$1
|
||||
shift
|
||||
;;
|
||||
-o|--output)
|
||||
shift
|
||||
if [ -z "$1" ]; then
|
||||
>&2 echo "empty argument to --output flag"
|
||||
exit 1
|
||||
fi
|
||||
output_dir=$1
|
||||
shift
|
||||
;;
|
||||
--inuse-space)
|
||||
shift
|
||||
requested_profiles="mem ${requested_profiles}"
|
||||
mem_pprof_flags="inuse_space ${mem_pprof_flags}"
|
||||
;;
|
||||
--inuse-objects)
|
||||
shift
|
||||
requested_profiles="mem ${requested_profiles}"
|
||||
mem_pprof_flags="inuse_objects ${mem_pprof_flags}"
|
||||
;;
|
||||
--alloc-space)
|
||||
shift
|
||||
requested_profiles="mem ${requested_profiles}"
|
||||
mem_pprof_flags="alloc_space ${mem_pprof_flags}"
|
||||
;;
|
||||
--alloc-objects)
|
||||
shift
|
||||
requested_profiles="mem ${requested_profiles}"
|
||||
mem_pprof_flags="alloc_objects ${mem_pprof_flags}"
|
||||
;;
|
||||
--cpu)
|
||||
shift
|
||||
requested_profiles="cpu ${requested_profiles}"
|
||||
;;
|
||||
--help)
|
||||
shift
|
||||
echo "Recognized options:
|
||||
-o/--output,
|
||||
-s/--server,
|
||||
-m/--master,
|
||||
-h/--heapster,
|
||||
--inuse-space,
|
||||
--inuse-objects,
|
||||
--alloc-space,
|
||||
--alloc-objects,
|
||||
--cpu,
|
||||
--help"
|
||||
exit 0
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break;
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "${server_addr}" ]]; then
|
||||
>&2 echo "Server flag is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${profile_components}" ]]; then
|
||||
>&2 echo "Choose at least one component to profile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${requested_profiles}" ]]; then
|
||||
>&2 echo "Choose at least one profiling option"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gcloud compute ssh "${server_addr}" --ssh-flag=-nN --ssh-flag=-L${tunnel_port}:localhost:8080 &
|
||||
|
||||
echo "Waiting for tunnel to be created..."
|
||||
kube::util::wait_for_url http://localhost:${tunnel_port}/healthz
|
||||
|
||||
SSH_PID=$(pgrep -f "/usr/bin/ssh.*${tunnel_port}:localhost:8080")
|
||||
kube::util::trap_add 'kill $SSH_PID' EXIT
|
||||
kube::util::trap_add 'kill $SSH_PID' SIGTERM
|
||||
|
||||
requested_profiles=$(echo ${requested_profiles} | xargs -n1 | LC_ALL=C sort -u | xargs)
|
||||
profile_components=$(echo ${profile_components} | xargs -n1 | LC_ALL=C sort -u | xargs)
|
||||
kubelet_addreses=$(echo ${kubelet_addreses} | xargs -n1 | LC_ALL=C sort -u | xargs)
|
||||
echo "requested profiles: ${requested_profiles}"
|
||||
echo "flags for heap profile: ${mem_pprof_flags}"
|
||||
|
||||
timestamp=$(date +%Y%m%d%H%M%S)
|
||||
binary=""
|
||||
|
||||
for component in ${profile_components}; do
|
||||
case ${component} in
|
||||
master)
|
||||
path=${MASTER_PPROF_PATH}
|
||||
binary=${master_binary}
|
||||
;;
|
||||
controller-manager)
|
||||
path="${CONTROLLER_MANAGER_PPROF_PATH_PREFIX}-${server_addr}:${controller_manager_port}"
|
||||
binary=${controller_manager_binary}
|
||||
;;
|
||||
scheduler)
|
||||
path="${SCHEDULER_PPROF_PATH_PREFIX}-${server_addr}:${scheduler_port}"
|
||||
binary=${scheduler_binary}
|
||||
;;
|
||||
heapster)
|
||||
rm heapster
|
||||
wget https://github.com/kubernetes/heapster/releases/download/${HEAPSTER_VERSION}/heapster
|
||||
kube::util::trap_add 'rm -f heapster' EXIT
|
||||
kube::util::trap_add 'rm -f heapster' SIGTERM
|
||||
binary=heapster
|
||||
path=${HEAPSTER_PPROF_PATH}
|
||||
;;
|
||||
kubelet)
|
||||
path="${KUBELET_PPROF_PATH_PREFIX}"
|
||||
if [[ -z "${kubelet_binary}" ]]; then
|
||||
binary="${KUBE_ROOT}/_output/local/bin/linux/amd64/kubelet"
|
||||
else
|
||||
binary=${kubelet_binary}
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ "${component}" == "kubelet" ]]; then
|
||||
for node in $(echo ${kubelet_addreses} | sed 's/[,;]/\n/g'); do
|
||||
grab_profiles_from_component "${requested_profiles}" "${mem_pprof_flags}" "${binary}" "${tunnel_port}" "${path}/${node}" "${output_dir}/${component}" "${timestamp}"
|
||||
done
|
||||
else
|
||||
grab_profiles_from_component "${requested_profiles}" "${mem_pprof_flags}" "${binary}" "${tunnel_port}" "${path}" "${output_dir}/${component}" "${timestamp}"
|
||||
fi
|
||||
done
|
87
vendor/k8s.io/kubernetes/hack/import-restrictions.yaml
generated
vendored
Normal file
87
vendor/k8s.io/kubernetes/hack/import-restrictions.yaml
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
- baseImportPath: "./pkg/apis/core/"
|
||||
allowedImports:
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/apiserver/pkg/util/feature
|
||||
- k8s.io/kubernetes/pkg/apis/core
|
||||
- k8s.io/kubernetes/pkg/features
|
||||
- k8s.io/kubernetes/pkg/fieldpath
|
||||
- k8s.io/kubernetes/pkg/util
|
||||
- k8s.io/api/core/v1
|
||||
|
||||
# the following are temporary and should go away. Think twice (or more) before adding anything here.
|
||||
# Main goal: pkg/apis should be as self-contained as possible.
|
||||
- k8s.io/kubernetes/pkg/apis/extensions
|
||||
- k8s.io/kubernetes/pkg/api/legacyscheme
|
||||
- k8s.io/kubernetes/pkg/api/testapi
|
||||
- k8s.io/api/extensions/v1beta1
|
||||
ignoredSubTrees:
|
||||
- "./pkg/apis/core/validation"
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/apimachinery/"
|
||||
allowedImports:
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/kube-openapi
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/api/"
|
||||
allowedImports:
|
||||
- k8s.io/api
|
||||
- k8s.io/apimachinery
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/code-generator/"
|
||||
ignoredSubTrees:
|
||||
- "./vendor/k8s.io/code-generator/_test"
|
||||
allowedImports:
|
||||
- k8s.io/gengo
|
||||
- k8s.io/code-generator
|
||||
- k8s.io/kube-openapi
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/client-go/"
|
||||
allowedImports:
|
||||
- k8s.io/api
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/client-go
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/apiserver/"
|
||||
allowedImports:
|
||||
- k8s.io/api
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/apiserver
|
||||
- k8s.io/client-go
|
||||
- k8s.io/kube-openapi
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/metrics/"
|
||||
allowedImports:
|
||||
- k8s.io/api
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/client-go
|
||||
- k8s.io/metrics
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/kube-aggregator/"
|
||||
allowedImports:
|
||||
- k8s.io/api
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/apiserver
|
||||
- k8s.io/client-go
|
||||
- k8s.io/kube-aggregator
|
||||
- k8s.io/kube-openapi
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/sample-apiserver/"
|
||||
allowedImports:
|
||||
- k8s.io/api
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/apiserver
|
||||
- k8s.io/client-go
|
||||
- k8s.io/sample-apiserver
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/apiextensions-apiserver/"
|
||||
allowedImports:
|
||||
- k8s.io/api
|
||||
- k8s.io/apiextensions-apiserver
|
||||
- k8s.io/apimachinery
|
||||
- k8s.io/apiserver
|
||||
- k8s.io/client-go
|
||||
|
||||
- baseImportPath: "./vendor/k8s.io/kube-openapi/"
|
||||
allowedImports:
|
||||
- k8s.io/kube-openapi
|
||||
- k8s.io/gengo
|
27
vendor/k8s.io/kubernetes/hack/install-etcd.sh
generated
vendored
Executable file
27
vendor/k8s.io/kubernetes/hack/install-etcd.sh
generated
vendored
Executable file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Convenience script to download and install etcd in third_party.
|
||||
# Mostly just used by CI.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::etcd::install
|
4
vendor/k8s.io/kubernetes/hack/jenkins/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/hack/jenkins/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- sig-testing-approvers
|
||||
reviewers:
|
||||
- sig-testing-reviewers
|
100
vendor/k8s.io/kubernetes/hack/jenkins/README.md
generated
vendored
Normal file
100
vendor/k8s.io/kubernetes/hack/jenkins/README.md
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
# Jenkins
|
||||
|
||||
[Jenkins](http://jenkins-ci.org/) is a pluggable continuous
|
||||
integration system. The Google team is running two Jenkins servers in GCE for
|
||||
the Kubernetes project. The post-commit instance runs continuous builds, unit
|
||||
tests, integration tests, code verification tests, and end-to-end tests on
|
||||
multiple providers using the latest commits to the Kubernetes repo from the
|
||||
master and release branches. The PR Jenkins instance runs these tests on each
|
||||
PR by a trusted contributor, it but only runs a subset of the end-to-end tests
|
||||
and only on GCE.
|
||||
|
||||
## General flow
|
||||
The flow of the post-commit Jenkins instance:
|
||||
* Under the `kubernetes-build` job: Every 2 minutes, Jenkins polls for a batch
|
||||
of new commits, after which it runs the `build.sh` script (in this directory)
|
||||
on the latest tip. This results in build assets getting pushed to GCS and the
|
||||
`latest.txt` file in the `ci` bucket being updated.
|
||||
* On trigger, and every half hour (which effectively means all the time, unless
|
||||
we're failing cluster creation), e2e variants run, on the latest build assets
|
||||
in GCS:
|
||||
* `kubernetes-e2e-gce`: Standard GCE e2e.
|
||||
* `kubernetes-e2e-gke`: GKE provider e2e, with head k8s client and GKE
|
||||
creating clusters at its default version.
|
||||
* `kubernetes-e2e-aws`: AWS provider e2e. This only runs once a day.
|
||||
* Each job will not run concurrently with itself, so, for instance,
|
||||
Jenkins executor will only ever run one `kubernetes-build`
|
||||
job. However, it may run the jobs in parallel,
|
||||
i.e. `kubernetes-build` may be run at the same time as
|
||||
`kubernetes-e2e-gce`. For this reason, you may see your changes
|
||||
pushed to our GCS bucket rapidly, but they may take some time to
|
||||
fully work through Jenkins. Or you may get lucky and catch the
|
||||
train in 5 minutes.
|
||||
* There are many jobs not listed here, including upgrade tests, soak tests, and
|
||||
tests for previous releases.
|
||||
|
||||
## Scripts
|
||||
|
||||
The scripts in this directory are directly used by Jenkins, either by
|
||||
curl from githubusercontent (if we don't have a git checkout handy) or
|
||||
by executing it from the git checkout. Since Jenkins is an entity
|
||||
outside this repository, it's tricky to keep documentation for it up
|
||||
to date quickly. However, the scripts themselves attempt to provide
|
||||
color for the configuration(s) that each script runs in.
|
||||
|
||||
## GCS Log Format
|
||||
|
||||
Our `upload-to-gcs.sh` script runs at the start and end of every job. Logs on
|
||||
post-commit Jenkins go under `gs://kubernetes-jenkins/logs/`. Logs on PR
|
||||
Jenkins go under `gs://kubernetes-jenkins-pull/pr-logs/pull/PULL_NUMBER/`.
|
||||
Individual run logs go into the `JOB_NAME/BUILD_NUMBER` folder.
|
||||
|
||||
At the start of the job, it uploads `started.json` containing the version of
|
||||
Kubernetes under test and the timestamp.
|
||||
|
||||
At the end, it uploads `finished.json` containing the result and timestamp, as
|
||||
well as the build log into `build-log.txt`. Under `artifacts/` we put our
|
||||
test results in `junit_XY.xml`, along with gcp resource lists and cluster logs.
|
||||
|
||||
It also updates `latest-build.txt` at the end to point to this build number.
|
||||
In the end, the directory structure looks like this:
|
||||
|
||||
```
|
||||
gs://kubernetes-jenkins/logs/kubernetes-e2e-gce/
|
||||
latest-build.txt
|
||||
12345/
|
||||
build-log.txt
|
||||
started.json
|
||||
finished.json
|
||||
artifacts/
|
||||
gcp-resources-{before, after}.txt
|
||||
junit_{00, 01, ...}.xml
|
||||
jenkins-e2e-master/{kube-apiserver.log, ...}
|
||||
jenkins-e2e-node-abcd/{kubelet.log, ...}
|
||||
12344/
|
||||
...
|
||||
```
|
||||
|
||||
The munger uses `latest-build.txt` and the JUnit reports to figure out whether
|
||||
or not the job is healthy.
|
||||
|
||||
## Job Builder
|
||||
|
||||
New jobs should be specified as YAML files to be processed by [Jenkins Job
|
||||
Builder](http://docs.openstack.org/infra/jenkins-job-builder/). The YAML files
|
||||
live in `jenkins/job-configs` and its subfolders **in the
|
||||
[kubernetes/test-infra repository](https://github.com/kubernetes/test-infra)**.
|
||||
Jenkins runs Jenkins Job Builder in a Docker container defined in
|
||||
`job-builder-image`, and triggers it using `update-jobs.sh`. Jenkins Job Builder
|
||||
uses a config file called
|
||||
[jenkins_jobs.ini](http://docs.openstack.org/infra/jenkins-job-builder/execution.html)
|
||||
which contains the location and credentials of the Jenkins server.
|
||||
|
||||
E2E Job definitions are templated to avoid code duplication. To add a new job,
|
||||
add a new entry to the appropriate `project`.
|
||||
[This](https://github.com/kubernetes/kubernetes/commit/eb273e5a4bdd3905f881563ada4e6543c7eb96b5)
|
||||
is an example of a commit which does this. If necessary, create a new project, as in
|
||||
[this](https://github.com/kubernetes/kubernetes/commit/09c27cdabc300e0420a2914100bedb565c23ed73)
|
||||
commit.
|
||||
|
||||
[]()
|
74
vendor/k8s.io/kubernetes/hack/jenkins/build.sh
generated
vendored
Executable file
74
vendor/k8s.io/kubernetes/hack/jenkins/build.sh
generated
vendored
Executable file
@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# kubernetes-build job: Triggered by github checkins on a 5 minute
|
||||
# poll. We abort this job if it takes longer than 10m. (Typically this
|
||||
# job takes about ~5m as of 0.8.0, but it's actually not completely
|
||||
# hermetic right now due to things like the golang image. It can take
|
||||
# ~8m if you force it to be totally hermetic.)
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
# !!! ALERT !!! Jenkins default $HOME is /var/lib/jenkins, which is
|
||||
# global across jobs. We change $HOME instead to ${WORKSPACE}, which
|
||||
# is an incoming variable Jenkins provides us for this job's scratch
|
||||
# space.
|
||||
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
|
||||
# Skip gcloud update checking
|
||||
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true
|
||||
|
||||
: ${KUBE_RELEASE_RUN_TESTS:="n"}
|
||||
export KUBE_RELEASE_RUN_TESTS
|
||||
|
||||
# Clean stuff out. Assume the last build left the tree in an odd
|
||||
# state.
|
||||
rm -rf ~/.kube*
|
||||
make clean
|
||||
|
||||
# Uncomment if you want to purge the Docker cache completely each
|
||||
# build. It costs about 150s each build to pull the golang image and
|
||||
# rebuild the kube-build:cross image, but these rarely change.
|
||||
# docker ps -aq | xargs -r docker rm
|
||||
# docker images -q | xargs -r docker rmi
|
||||
|
||||
# Build
|
||||
# Jobs explicitly set KUBE_FASTBUILD to desired settings.
|
||||
make release
|
||||
|
||||
# Push to GCS?
|
||||
if [[ ${KUBE_SKIP_PUSH_GCS:-} =~ ^[yY]$ ]]; then
|
||||
echo "Not pushed to GCS..."
|
||||
else
|
||||
readonly release_infra_clone="${WORKSPACE}/_tmp/release.git"
|
||||
mkdir -p ${WORKSPACE}/_tmp
|
||||
git clone https://github.com/kubernetes/release ${release_infra_clone}
|
||||
|
||||
push_build=${release_infra_clone}/push-build.sh
|
||||
|
||||
[[ -n "${KUBE_GCS_RELEASE_BUCKET-}" ]] \
|
||||
&& bucket_flag="--bucket=${KUBE_GCS_RELEASE_BUCKET-}"
|
||||
[[ -n "${KUBE_GCS_RELEASE_SUFFIX-}" ]] \
|
||||
&& gcs_suffix_flag="--gcs-suffix=${KUBE_GCS_RELEASE_SUFFIX-}"
|
||||
${push_build} ${bucket_flag-} ${gcs_suffix_flag-} \
|
||||
--nomock --verbose --ci
|
||||
fi
|
||||
|
||||
sha256sum _output/release-tars/kubernetes*.tar.gz
|
45
vendor/k8s.io/kubernetes/hack/jenkins/gotest.sh
generated
vendored
Executable file
45
vendor/k8s.io/kubernetes/hack/jenkins/gotest.sh
generated
vendored
Executable file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Runs the unit and integration tests, production JUnit-style XML test reports
|
||||
# in ${WORKSPACE}/_artifacts.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
# !!! ALERT !!! Jenkins default $HOME is /var/lib/jenkins, which is
|
||||
# global across jobs. We change $HOME instead to ${WORKSPACE}, which
|
||||
# is an incoming variable Jenkins provides us for this job's scratch
|
||||
# space.
|
||||
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
|
||||
export GOPATH=${HOME}/_gopath
|
||||
export PATH=${GOPATH}/bin:${HOME}/third_party/etcd:/usr/local/go/bin:$PATH
|
||||
|
||||
# Install a few things needed by unit and /integration tests.
|
||||
command -v etcd &>/dev/null || ./hack/install-etcd.sh
|
||||
go get -u github.com/jstemmer/go-junit-report
|
||||
|
||||
# Enable the Go race detector.
|
||||
export KUBE_RACE=-race
|
||||
# Produce a JUnit-style XML test report for Jenkins.
|
||||
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/_artifacts
|
||||
# Save the verbose stdout as well.
|
||||
export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y
|
||||
|
||||
make test
|
||||
make test-integration
|
60
vendor/k8s.io/kubernetes/hack/jenkins/test-dockerized.sh
generated
vendored
Executable file
60
vendor/k8s.io/kubernetes/hack/jenkins/test-dockerized.sh
generated
vendored
Executable file
@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
retry() {
|
||||
for i in {1..5}; do
|
||||
"$@" && return 0 || sleep $i
|
||||
done
|
||||
"$@"
|
||||
}
|
||||
|
||||
# Runs the unit and integration tests, producing JUnit-style XML test
|
||||
# reports in ${WORKSPACE}/artifacts. This script is intended to be run from
|
||||
# kubekins-test container with a kubernetes repo mapped in. See
|
||||
# k8s.io/test-infra/scenarios/kubernetes_verify.py
|
||||
|
||||
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
|
||||
|
||||
retry go get github.com/tools/godep && godep version
|
||||
retry go get github.com/jstemmer/go-junit-report
|
||||
|
||||
# Enable the Go race detector.
|
||||
export KUBE_RACE=-race
|
||||
# Disable coverage report
|
||||
export KUBE_COVER="n"
|
||||
# Produce a JUnit-style XML test report for Jenkins.
|
||||
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/artifacts
|
||||
export ARTIFACTS_DIR=${WORKSPACE}/artifacts
|
||||
# Save the verbose stdout as well.
|
||||
export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y
|
||||
export KUBE_TIMEOUT='-timeout 300s'
|
||||
export KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=4
|
||||
export LOG_LEVEL=4
|
||||
|
||||
cd /go/src/k8s.io/kubernetes
|
||||
|
||||
make generated_files
|
||||
go install ./cmd/...
|
||||
./hack/install-etcd.sh
|
||||
|
||||
make test-cmd
|
||||
make test-integration
|
||||
./hack/test-update-storage-objects.sh
|
295
vendor/k8s.io/kubernetes/hack/jenkins/upload-to-gcs.sh
generated
vendored
Executable file
295
vendor/k8s.io/kubernetes/hack/jenkins/upload-to-gcs.sh
generated
vendored
Executable file
@ -0,0 +1,295 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script uploads metadata and test results to Google Cloud Storage, in the
|
||||
# location indicated by JENKINS_GCS_LOGS_PATH. By default, we use the Google
|
||||
# kubernetes-jenkins bucket.
|
||||
#
|
||||
# The script looks for one of two environment variables to be set:
|
||||
# JENKINS_BUILD_STARTED: set to a nonempty string to upload version
|
||||
# information to 'started.json'. The value of the variable is not
|
||||
# currently used.
|
||||
# JENKINS_BUILD_FINISHED: set to the Jenkins build result to upload the build
|
||||
# result to 'finished.json', any test artifacts, and update the
|
||||
# 'latest-build.txt' file pointer. Since this script uses gsutil directly,
|
||||
# it's a bit faster at uploading large numbers of files than the GCS Jenkins
|
||||
# plugin. It also makes use of gsutil's gzip functionality.
|
||||
#
|
||||
# Note: for magicfile support to work correctly, the "file" utility must be
|
||||
# installed.
|
||||
|
||||
# TODO(rmmh): rewrite this script in Python so we can actually test it!
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
if [[ -n "${JENKINS_BUILD_STARTED:-}" && -n "${JENKINS_BUILD_FINISHED:-}" ]]; then
|
||||
echo "Error: JENKINS_BUILD_STARTED and JENKINS_BUILD_FINISHED should not both be set!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! ${JENKINS_UPLOAD_TO_GCS:-y} =~ ^[yY]$ ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Attempt to determine if we're running against a repo other than
|
||||
# kubernetes/kubernetes to determine whether to place PR logs in a different
|
||||
# location.
|
||||
#
|
||||
# In the current CI system, the tracked repo is named remote. This is not true
|
||||
# in general for most devs, where origin and upstream are more common.
|
||||
GCS_SUBDIR=""
|
||||
readonly remote_git_repo=$(git config --get remote.remote.url | sed 's:.*github.com/::' || true)
|
||||
if [[ -n "${remote_git_repo}" ]]; then
|
||||
case "${remote_git_repo}" in
|
||||
# main repo: nothing extra
|
||||
kubernetes/kubernetes) GCS_SUBDIR="" ;;
|
||||
# a different repo on the k8s org: just the repo name (strip kubernetes/)
|
||||
kubernetes/*) GCS_SUBDIR="${remote_git_repo#kubernetes/}/" ;;
|
||||
# any other repo: ${org}_${repo} (replace / with _)
|
||||
*) GCS_SUBDIR="${remote_git_repo/\//_}/" ;;
|
||||
esac
|
||||
if [[ "${remote_git_repo}" != "kubernetes/kubernetes" ]]; then
|
||||
# also store the repo in started.json, so Gubernator can link it properly.
|
||||
export BUILD_METADATA_REPO="${remote_git_repo}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ${JOB_NAME} =~ -pull- ]]; then
|
||||
: ${JENKINS_GCS_LOGS_PATH:="gs://kubernetes-jenkins/pr-logs/pull/${GCS_SUBDIR}${ghprbPullId:-unknown}"}
|
||||
: ${JENKINS_GCS_LATEST_PATH:="gs://kubernetes-jenkins/pr-logs/directory"}
|
||||
: ${JENKINS_GCS_LOGS_INDIRECT:="gs://kubernetes-jenkins/pr-logs/directory/${JOB_NAME}"}
|
||||
else
|
||||
: ${JENKINS_GCS_LOGS_PATH:="gs://kubernetes-jenkins/logs"}
|
||||
: ${JENKINS_GCS_LATEST_PATH:="gs://kubernetes-jenkins/logs"}
|
||||
: ${JENKINS_GCS_LOGS_INDIRECT:=""}
|
||||
fi
|
||||
|
||||
readonly artifacts_path="${WORKSPACE}/_artifacts"
|
||||
readonly gcs_job_path="${JENKINS_GCS_LOGS_PATH}/${JOB_NAME}"
|
||||
readonly gcs_build_path="${gcs_job_path}/${BUILD_NUMBER}"
|
||||
readonly gcs_latest_path="${JENKINS_GCS_LATEST_PATH}/${JOB_NAME}"
|
||||
readonly gcs_indirect_path="${JENKINS_GCS_LOGS_INDIRECT}"
|
||||
readonly gcs_acl="public-read"
|
||||
readonly results_url=${gcs_build_path//"gs:/"/"https://console.cloud.google.com/storage/browser"}
|
||||
readonly timestamp=$(date +%s)
|
||||
|
||||
#########################################################################
|
||||
# $0 is called from different contexts so figure out where kubernetes is.
|
||||
# Sets non-exported global kubernetes_base_path and defaults to "."
|
||||
function set_kubernetes_base_path () {
|
||||
for kubernetes_base_path in kubernetes go/src/k8s.io/kubernetes .; do
|
||||
# Pick a canonical item to find in a kubernetes tree which could be a
|
||||
# raw source tree or an expanded tarball.
|
||||
|
||||
[[ -f ${kubernetes_base_path}/cluster/common.sh ]] && break
|
||||
done
|
||||
}
|
||||
|
||||
#########################################################################
|
||||
# Try to discover the kubernetes version.
|
||||
# prints version
|
||||
function find_version() {
|
||||
(
|
||||
# Where are we?
|
||||
# This could be set in the global scope at some point if we need to
|
||||
# discover the kubernetes path elsewhere.
|
||||
set_kubernetes_base_path
|
||||
|
||||
cd ${kubernetes_base_path}
|
||||
|
||||
if [[ -e "version" ]]; then
|
||||
cat version
|
||||
elif [[ -e "hack/lib/version.sh" ]]; then
|
||||
export KUBE_ROOT="."
|
||||
source "hack/lib/version.sh"
|
||||
kube::version::get_version_vars
|
||||
echo "${KUBE_GIT_VERSION-}"
|
||||
else
|
||||
# Last resort from the started.json
|
||||
gsutil cat ${gcs_build_path}/started.json 2>/dev/null |\
|
||||
sed -n 's/ *"version": *"\([^"]*\)",*/\1/p'
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
# Output started.json. Use test function below!
|
||||
function print_started() {
|
||||
local metadata_keys=$(compgen -e | grep ^BUILD_METADATA_)
|
||||
echo "{"
|
||||
echo " \"version\": \"${version}\"," # TODO(fejta): retire
|
||||
echo " \"job-version\": \"${version}\","
|
||||
echo " \"timestamp\": ${timestamp},"
|
||||
if [[ -n "${metadata_keys}" ]]; then
|
||||
# Any exported variables of the form BUILD_METADATA_KEY=VALUE
|
||||
# will be available as started["metadata"][KEY.lower()].
|
||||
echo " \"metadata\": {"
|
||||
local sep="" # leading commas are easy to track
|
||||
for env_var in $metadata_keys; do
|
||||
local var_upper="${env_var#BUILD_METADATA_}"
|
||||
echo " $sep\"${var_upper,,}\": \"${!env_var}\""
|
||||
sep=","
|
||||
done
|
||||
echo " },"
|
||||
fi
|
||||
echo " \"jenkins-node\": \"${NODE_NAME:-}\""
|
||||
echo "}"
|
||||
}
|
||||
|
||||
# Use this to test changes to print_started.
|
||||
if [[ -n "${TEST_STARTED_JSON:-}" ]]; then
|
||||
version=$(find_version)
|
||||
cat <(print_started) | jq .
|
||||
exit
|
||||
fi
|
||||
|
||||
function upload_version() {
|
||||
local -r version=$(find_version)
|
||||
local upload_attempt
|
||||
|
||||
echo -n 'Run starting at '; date -d "@${timestamp}"
|
||||
|
||||
if [[ -n "${version}" ]]; then
|
||||
echo "Found Kubernetes version: ${version}"
|
||||
else
|
||||
echo "Could not find Kubernetes version"
|
||||
fi
|
||||
|
||||
local -r json_file="${gcs_build_path}/started.json"
|
||||
for upload_attempt in {1..3}; do
|
||||
echo "Uploading version to: ${json_file} (attempt ${upload_attempt})"
|
||||
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" <(print_started) "${json_file}" || continue
|
||||
break
|
||||
done
|
||||
}
|
||||
|
||||
#########################################################################
|
||||
# Maintain a single file storing the full build version, Jenkins' job number
|
||||
# build state. Limit its size so it does not grow unbounded.
|
||||
# This is primarily used for and by the
|
||||
# github.com/kubernetes/release/find_green_build tool.
|
||||
# @param build_result - the state of the build
|
||||
#
|
||||
function update_job_result_cache() {
|
||||
local -r build_result=$1
|
||||
local -r version=$(find_version)
|
||||
local -r job_results=${gcs_job_path}/jobResultsCache.json
|
||||
local -r tmp_results="${WORKSPACE}/_tmp/jobResultsCache.tmp"
|
||||
# TODO: This constraint is insufficient. The boundary for secondary
|
||||
# job cache should be date based on the last primary build.
|
||||
# The issue is we are trying to find a matched green set of results
|
||||
# at a given hash, but all of the jobs run at wildly different lengths.
|
||||
local -r cache_size=300
|
||||
local upload_attempt
|
||||
|
||||
if [[ -n "${version}" ]]; then
|
||||
echo "Found Kubernetes version: ${version}"
|
||||
else
|
||||
echo "Could not find Kubernetes version"
|
||||
fi
|
||||
|
||||
mkdir -p ${tmp_results%/*}
|
||||
|
||||
# Construct a valid json file
|
||||
echo "[" > ${tmp_results}
|
||||
|
||||
for upload_attempt in $(seq 3); do
|
||||
echo "Copying ${job_results} to ${tmp_results} (attempt ${upload_attempt})"
|
||||
# The sed construct below is stripping out only the "version" lines
|
||||
# and then ensuring there's a single comma at the end of the line.
|
||||
gsutil -q cat ${job_results} 2>&- |\
|
||||
sed -n 's/^\({"version".*}\),*/\1,/p' |\
|
||||
tail -${cache_size} >> ${tmp_results} || continue
|
||||
break
|
||||
done
|
||||
|
||||
echo "{\"version\": \"${version}\", \"buildnumber\": \"${BUILD_NUMBER}\"," \
|
||||
"\"result\": \"${build_result}\"}" >> ${tmp_results}
|
||||
|
||||
echo "]" >> ${tmp_results}
|
||||
|
||||
for upload_attempt in $(seq 3); do
|
||||
echo "Copying ${tmp_results} to ${job_results} (attempt ${upload_attempt})"
|
||||
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" \
|
||||
${tmp_results} ${job_results} || continue
|
||||
break
|
||||
done
|
||||
|
||||
rm -f ${tmp_results}
|
||||
}
|
||||
|
||||
function upload_artifacts_and_build_result() {
|
||||
local -r build_result=$1
|
||||
local upload_attempt
|
||||
|
||||
echo -n 'Run finished at '; date -d "@${timestamp}"
|
||||
|
||||
for upload_attempt in {1..3}; do
|
||||
echo "Uploading to ${gcs_build_path} (attempt ${upload_attempt})"
|
||||
echo "Uploading build result: ${build_result}"
|
||||
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" <(
|
||||
echo "{"
|
||||
echo " \"result\": \"${build_result}\","
|
||||
echo " \"timestamp\": ${timestamp}"
|
||||
echo "}"
|
||||
) "${gcs_build_path}/finished.json" || continue
|
||||
if [[ -d "${artifacts_path}" && -n $(ls -A "${artifacts_path}") ]]; then
|
||||
echo "Uploading artifacts"
|
||||
gsutil -m -q -o "GSUtil:use_magicfile=True" cp -a "${gcs_acl}" -r -c \
|
||||
-z log,txt,xml "${artifacts_path}" "${gcs_build_path}/artifacts" || continue
|
||||
fi
|
||||
if [[ -e "${WORKSPACE}/build-log.txt" ]]; then
|
||||
echo "Uploading build log"
|
||||
gsutil -q cp -Z -a "${gcs_acl}" "${WORKSPACE}/build-log.txt" "${gcs_build_path}"
|
||||
fi
|
||||
|
||||
# For pull jobs, keep a canonical ordering for tools that want to examine
|
||||
# the output.
|
||||
if [[ "${gcs_indirect_path}" != "" ]]; then
|
||||
echo "Writing ${gcs_build_path} to ${gcs_indirect_path}/${BUILD_NUMBER}.txt"
|
||||
echo "${gcs_build_path}" | \
|
||||
gsutil -q -h "Content-Type:text/plain" \
|
||||
cp -a "${gcs_acl}" - "${gcs_indirect_path}/${BUILD_NUMBER}.txt" || continue
|
||||
echo "Marking build ${BUILD_NUMBER} as the latest completed build for this PR"
|
||||
echo "${BUILD_NUMBER}" | \
|
||||
gsutil -q -h "Content-Type:text/plain" -h "Cache-Control:private, max-age=0, no-transform" \
|
||||
cp -a "${gcs_acl}" - "${gcs_job_path}/latest-build.txt" || continue
|
||||
fi
|
||||
|
||||
# Mark this build as the latest completed.
|
||||
echo "Marking build ${BUILD_NUMBER} as the latest completed build"
|
||||
echo "${BUILD_NUMBER}" | \
|
||||
gsutil -q -h "Content-Type:text/plain" -h "Cache-Control:private, max-age=0, no-transform" \
|
||||
cp -a "${gcs_acl}" - "${gcs_latest_path}/latest-build.txt" || continue
|
||||
break # all uploads succeeded if we hit this point
|
||||
done
|
||||
|
||||
echo -e "\n\n\n*** View logs and artifacts at ${results_url} ***\n\n"
|
||||
}
|
||||
|
||||
if [[ -z "${BOOTSTRAP_MIGRATION:-}" ]]; then
|
||||
if [[ -n "${JENKINS_BUILD_STARTED:-}" ]]; then
|
||||
upload_version
|
||||
elif [[ -n "${JENKINS_BUILD_FINISHED:-}" ]]; then
|
||||
upload_artifacts_and_build_result ${JENKINS_BUILD_FINISHED}
|
||||
update_job_result_cache ${JENKINS_BUILD_FINISHED}
|
||||
else
|
||||
echo "ERROR: Called without JENKINS_BUILD_STARTED or JENKINS_BUILD_FINISHED set."
|
||||
echo "ERROR: this should not happen"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
48
vendor/k8s.io/kubernetes/hack/jenkins/verify-dockerized.sh
generated
vendored
Executable file
48
vendor/k8s.io/kubernetes/hack/jenkins/verify-dockerized.sh
generated
vendored
Executable file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
retry() {
|
||||
for i in {1..5}; do
|
||||
"$@" && return 0 || sleep $i
|
||||
done
|
||||
"$@"
|
||||
}
|
||||
|
||||
# This script is intended to be run from kubekins-test container with a
|
||||
# kubernetes repo mapped in. See k8s.io/test-infra/scenarios/kubernetes_verify.py
|
||||
|
||||
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
|
||||
|
||||
# Set artifacts directory
|
||||
export ARTIFACTS_DIR=${WORKSPACE}/artifacts
|
||||
|
||||
retry go get github.com/tools/godep && godep version
|
||||
|
||||
export LOG_LEVEL=4
|
||||
|
||||
cd /go/src/k8s.io/kubernetes
|
||||
|
||||
# hack/verify-client-go.sh requires all dependencies exist in the GOPATH.
|
||||
# the retry helps avoid flakes while keeping total time bounded.
|
||||
./hack/godep-restore.sh || ./hack/godep-restore.sh
|
||||
|
||||
./hack/install-etcd.sh
|
||||
make verify
|
37
vendor/k8s.io/kubernetes/hack/jenkins/verify.sh
generated
vendored
Executable file
37
vendor/k8s.io/kubernetes/hack/jenkins/verify.sh
generated
vendored
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Sets up the environment (e.g. installing godep and etcd if necessary)
|
||||
# and then runs all of the verification checks.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
# !!! ALERT !!! Jenkins default $HOME is /var/lib/jenkins, which is
|
||||
# global across jobs. We change $HOME instead to ${WORKSPACE}, which
|
||||
# is an incoming variable Jenkins provides us for this job's scratch
|
||||
# space.
|
||||
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
|
||||
export GOPATH=${HOME}/_gopath
|
||||
export PATH=${GOPATH}/bin:${HOME}/third_party/etcd:/usr/local/go/bin:$PATH
|
||||
|
||||
# Install a few things needed by the verification tests.
|
||||
command -v etcd &>/dev/null || ./hack/install-etcd.sh
|
||||
go get -u github.com/tools/godep
|
||||
|
||||
make verify
|
1
vendor/k8s.io/kubernetes/hack/lib/.gitattributes
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/hack/lib/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
version.sh export-subst
|
30
vendor/k8s.io/kubernetes/hack/lib/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/hack/lib/BUILD
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
sh_library(
|
||||
name = "lib",
|
||||
srcs = [
|
||||
"etcd.sh",
|
||||
"golang.sh",
|
||||
"init.sh",
|
||||
"swagger.sh",
|
||||
"test.sh",
|
||||
"util.sh",
|
||||
"version.sh",
|
||||
],
|
||||
deps = [
|
||||
"//cluster/lib",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
110
vendor/k8s.io/kubernetes/hack/lib/etcd.sh
generated
vendored
Executable file
110
vendor/k8s.io/kubernetes/hack/lib/etcd.sh
generated
vendored
Executable file
@ -0,0 +1,110 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A set of helpers for starting/running etcd for tests
|
||||
|
||||
ETCD_VERSION=${ETCD_VERSION:-3.1.10}
|
||||
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
|
||||
ETCD_PORT=${ETCD_PORT:-2379}
|
||||
|
||||
kube::etcd::validate() {
|
||||
# validate if in path
|
||||
which etcd >/dev/null || {
|
||||
kube::log::usage "etcd must be in your PATH"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# validate it is not running
|
||||
if pgrep -x etcd >/dev/null 2>&1; then
|
||||
kube::log::usage "etcd appears to already be running on this machine (`pgrep -xl etcd`) (or its a zombie and you need to kill its parent)."
|
||||
kube::log::usage "retry after you resolve this etcd error."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# validate installed version is at least equal to minimum
|
||||
version=$(etcd --version | tail -n +1 | head -n 1 | cut -d " " -f 3)
|
||||
if [[ $(kube::etcd::version $ETCD_VERSION) -gt $(kube::etcd::version $version) ]]; then
|
||||
export PATH=$KUBE_ROOT/third_party/etcd:$PATH
|
||||
hash etcd
|
||||
echo $PATH
|
||||
version=$(etcd --version | head -n 1 | cut -d " " -f 3)
|
||||
if [[ $(kube::etcd::version $ETCD_VERSION) -gt $(kube::etcd::version $version) ]]; then
|
||||
kube::log::usage "etcd version ${ETCD_VERSION} or greater required."
|
||||
kube::log::info "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
kube::etcd::version() {
|
||||
printf '%s\n' "${@}" | awk -F . '{ printf("%d%03d%03d\n", $1, $2, $3) }'
|
||||
}
|
||||
|
||||
kube::etcd::start() {
|
||||
# validate before running
|
||||
kube::etcd::validate
|
||||
|
||||
# Start etcd
|
||||
ETCD_DIR=${ETCD_DIR:-$(mktemp -d 2>/dev/null || mktemp -d -t test-etcd.XXXXXX)}
|
||||
if [[ -d "${ARTIFACTS_DIR:-}" ]]; then
|
||||
ETCD_LOGFILE="${ARTIFACTS_DIR}/etcd.$(uname -n).$(id -un).log.DEBUG.$(date +%Y%m%d-%H%M%S).$$"
|
||||
else
|
||||
ETCD_LOGFILE=/dev/null
|
||||
fi
|
||||
kube::log::info "etcd --advertise-client-urls http://${ETCD_HOST}:${ETCD_PORT} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --debug > \"${ETCD_LOGFILE}\" 2>/dev/null"
|
||||
etcd --advertise-client-urls http://${ETCD_HOST}:${ETCD_PORT} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --debug 2> "${ETCD_LOGFILE}" >/dev/null &
|
||||
ETCD_PID=$!
|
||||
|
||||
echo "Waiting for etcd to come up."
|
||||
kube::util::wait_for_url "http://${ETCD_HOST}:${ETCD_PORT}/v2/machines" "etcd: " 0.25 80
|
||||
curl -fs -X PUT "http://${ETCD_HOST}:${ETCD_PORT}/v2/keys/_test"
|
||||
}
|
||||
|
||||
kube::etcd::stop() {
|
||||
kill "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
wait "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
}
|
||||
|
||||
kube::etcd::clean_etcd_dir() {
|
||||
rm -rf "${ETCD_DIR-}"
|
||||
}
|
||||
|
||||
kube::etcd::cleanup() {
|
||||
kube::etcd::stop
|
||||
kube::etcd::clean_etcd_dir
|
||||
}
|
||||
|
||||
kube::etcd::install() {
|
||||
(
|
||||
cd "${KUBE_ROOT}/third_party"
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
download_file="etcd-v${ETCD_VERSION}-darwin-amd64.zip"
|
||||
url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${download_file}"
|
||||
kube::util::download_file "${url}" "${download_file}"
|
||||
unzip -o "${download_file}"
|
||||
ln -fns "etcd-v${ETCD_VERSION}-darwin-amd64" etcd
|
||||
rm "${download_file}"
|
||||
else
|
||||
url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
|
||||
download_file="etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
|
||||
kube::util::download_file "${url}" "${download_file}"
|
||||
tar xzf "${download_file}"
|
||||
ln -fns "etcd-v${ETCD_VERSION}-linux-amd64" etcd
|
||||
fi
|
||||
kube::log::info "etcd v${ETCD_VERSION} installed. To use:"
|
||||
kube::log::info "export PATH=$(pwd)/etcd:\${PATH}"
|
||||
)
|
||||
}
|
712
vendor/k8s.io/kubernetes/hack/lib/golang.sh
generated
vendored
Executable file
712
vendor/k8s.io/kubernetes/hack/lib/golang.sh
generated
vendored
Executable file
@ -0,0 +1,712 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The golang package that we are building.
|
||||
readonly KUBE_GO_PACKAGE=k8s.io/kubernetes
|
||||
readonly KUBE_GOPATH="${KUBE_OUTPUT}/go"
|
||||
|
||||
# The set of server targets that we are only building for Linux
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::server_targets() {
|
||||
local targets=(
|
||||
cmd/kube-proxy
|
||||
cmd/kube-apiserver
|
||||
cmd/kube-controller-manager
|
||||
cmd/cloud-controller-manager
|
||||
cmd/kubelet
|
||||
cmd/kubeadm
|
||||
cmd/hyperkube
|
||||
vendor/k8s.io/kube-aggregator
|
||||
vendor/k8s.io/apiextensions-apiserver
|
||||
plugin/cmd/kube-scheduler
|
||||
cluster/gce/gci/mounter
|
||||
)
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
|
||||
readonly KUBE_SERVER_TARGETS=($(kube::golang::server_targets))
|
||||
readonly KUBE_SERVER_BINARIES=("${KUBE_SERVER_TARGETS[@]##*/}")
|
||||
|
||||
# The set of server targets that we are only building for Kubernetes nodes
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::node_targets() {
|
||||
local targets=(
|
||||
cmd/kube-proxy
|
||||
cmd/kubeadm
|
||||
cmd/kubelet
|
||||
)
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
|
||||
readonly KUBE_NODE_TARGETS=($(kube::golang::node_targets))
|
||||
readonly KUBE_NODE_BINARIES=("${KUBE_NODE_TARGETS[@]##*/}")
|
||||
readonly KUBE_NODE_BINARIES_WIN=("${KUBE_NODE_BINARIES[@]/%/.exe}")
|
||||
|
||||
if [[ -n "${KUBE_BUILD_PLATFORMS:-}" ]]; then
|
||||
readonly KUBE_SERVER_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
readonly KUBE_NODE_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
readonly KUBE_TEST_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
readonly KUBE_CLIENT_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
elif [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then
|
||||
readonly KUBE_SERVER_PLATFORMS=(linux/amd64)
|
||||
readonly KUBE_NODE_PLATFORMS=(linux/amd64)
|
||||
if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then
|
||||
readonly KUBE_TEST_PLATFORMS=(
|
||||
darwin/amd64
|
||||
linux/amd64
|
||||
)
|
||||
readonly KUBE_CLIENT_PLATFORMS=(
|
||||
darwin/amd64
|
||||
linux/amd64
|
||||
)
|
||||
else
|
||||
readonly KUBE_TEST_PLATFORMS=(linux/amd64)
|
||||
readonly KUBE_CLIENT_PLATFORMS=(linux/amd64)
|
||||
fi
|
||||
else
|
||||
|
||||
# The server platform we are building on.
|
||||
readonly KUBE_SERVER_PLATFORMS=(
|
||||
linux/amd64
|
||||
linux/arm
|
||||
linux/arm64
|
||||
linux/s390x
|
||||
linux/ppc64le
|
||||
)
|
||||
|
||||
# The node platforms we build for
|
||||
readonly KUBE_NODE_PLATFORMS=(
|
||||
linux/amd64
|
||||
linux/arm
|
||||
linux/arm64
|
||||
linux/s390x
|
||||
linux/ppc64le
|
||||
windows/amd64
|
||||
)
|
||||
|
||||
# If we update this we should also update the set of platforms whose standard library is precompiled for in build/build-image/cross/Dockerfile
|
||||
readonly KUBE_CLIENT_PLATFORMS=(
|
||||
linux/amd64
|
||||
linux/386
|
||||
linux/arm
|
||||
linux/arm64
|
||||
linux/s390x
|
||||
linux/ppc64le
|
||||
darwin/amd64
|
||||
darwin/386
|
||||
windows/amd64
|
||||
windows/386
|
||||
)
|
||||
|
||||
# Which platforms we should compile test targets for. Not all client platforms need these tests
|
||||
readonly KUBE_TEST_PLATFORMS=(
|
||||
linux/amd64
|
||||
linux/arm
|
||||
linux/arm64
|
||||
linux/s390x
|
||||
linux/ppc64le
|
||||
darwin/amd64
|
||||
windows/amd64
|
||||
)
|
||||
fi
|
||||
|
||||
# The set of client targets that we are building for all platforms
|
||||
# If you update this list, please also update build/BUILD.
|
||||
readonly KUBE_CLIENT_TARGETS=(
|
||||
cmd/kubectl
|
||||
)
|
||||
readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}")
|
||||
readonly KUBE_CLIENT_BINARIES_WIN=("${KUBE_CLIENT_BINARIES[@]/%/.exe}")
|
||||
|
||||
# The set of test targets that we are building for all platforms
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::test_targets() {
|
||||
local targets=(
|
||||
cmd/gendocs
|
||||
cmd/genkubedocs
|
||||
cmd/genman
|
||||
cmd/genyaml
|
||||
cmd/genswaggertypedocs
|
||||
cmd/linkcheck
|
||||
vendor/github.com/onsi/ginkgo/ginkgo
|
||||
test/e2e/e2e.test
|
||||
)
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
readonly KUBE_TEST_TARGETS=($(kube::golang::test_targets))
|
||||
readonly KUBE_TEST_BINARIES=("${KUBE_TEST_TARGETS[@]##*/}")
|
||||
readonly KUBE_TEST_BINARIES_WIN=("${KUBE_TEST_BINARIES[@]/%/.exe}")
|
||||
# If you update this list, please also update build/BUILD.
|
||||
readonly KUBE_TEST_PORTABLE=(
|
||||
test/e2e/testing-manifests
|
||||
test/kubemark
|
||||
hack/e2e.go
|
||||
hack/e2e-internal
|
||||
hack/get-build.sh
|
||||
hack/ginkgo-e2e.sh
|
||||
hack/lib
|
||||
)
|
||||
|
||||
# Test targets which run on the Kubernetes clusters directly, so we only
|
||||
# need to target server platforms.
|
||||
# These binaries will be distributed in the kubernetes-test tarball.
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::server_test_targets() {
|
||||
local targets=(
|
||||
cmd/kubemark
|
||||
vendor/github.com/onsi/ginkgo/ginkgo
|
||||
)
|
||||
|
||||
if [[ "${OSTYPE:-}" == "linux"* ]]; then
|
||||
targets+=( test/e2e_node/e2e_node.test )
|
||||
fi
|
||||
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
|
||||
readonly KUBE_TEST_SERVER_TARGETS=($(kube::golang::server_test_targets))
|
||||
readonly KUBE_TEST_SERVER_BINARIES=("${KUBE_TEST_SERVER_TARGETS[@]##*/}")
|
||||
readonly KUBE_TEST_SERVER_PLATFORMS=("${KUBE_SERVER_PLATFORMS[@]}")
|
||||
|
||||
# Gigabytes desired for parallel platform builds. 11 is fairly
|
||||
# arbitrary, but is a reasonable splitting point for 2015
|
||||
# laptops-versus-not.
|
||||
readonly KUBE_PARALLEL_BUILD_MEMORY=11
|
||||
|
||||
# TODO(pipejakob) gke-certificates-controller is included here to exercise its
|
||||
# compilation, but it doesn't need to be distributed in any of our tars. Its
|
||||
# code is only living in this repo temporarily until it finds a new home.
|
||||
readonly KUBE_ALL_TARGETS=(
|
||||
"${KUBE_SERVER_TARGETS[@]}"
|
||||
"${KUBE_CLIENT_TARGETS[@]}"
|
||||
"${KUBE_TEST_TARGETS[@]}"
|
||||
"${KUBE_TEST_SERVER_TARGETS[@]}"
|
||||
cmd/gke-certificates-controller
|
||||
)
|
||||
readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}")
|
||||
|
||||
readonly KUBE_STATIC_LIBRARIES=(
|
||||
cloud-controller-manager
|
||||
kube-apiserver
|
||||
kube-controller-manager
|
||||
kube-scheduler
|
||||
kube-proxy
|
||||
kube-aggregator
|
||||
kubeadm
|
||||
kubectl
|
||||
)
|
||||
|
||||
# Add any files with those //generate annotations in the array below.
|
||||
readonly KUBE_BINDATAS=(
|
||||
test/e2e/generated/gobindata_util.go
|
||||
)
|
||||
|
||||
kube::golang::is_statically_linked_library() {
|
||||
local e
|
||||
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
|
||||
# Allow individual overrides--e.g., so that you can get a static build of
|
||||
# kubectl for inclusion in a container.
|
||||
if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then
|
||||
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
|
||||
fi
|
||||
return 1;
|
||||
}
|
||||
|
||||
# kube::binaries_from_targets take a list of build targets and return the
|
||||
# full go package to be built
|
||||
kube::golang::binaries_from_targets() {
|
||||
local target
|
||||
for target; do
|
||||
# If the target starts with what looks like a domain name, assume it has a
|
||||
# fully-qualified package name rather than one that needs the Kubernetes
|
||||
# package prepended.
|
||||
if [[ "${target}" =~ ^([[:alnum:]]+".")+[[:alnum:]]+"/" ]]; then
|
||||
echo "${target}"
|
||||
else
|
||||
echo "${KUBE_GO_PACKAGE}/${target}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Asks golang what it thinks the host platform is. The go tool chain does some
|
||||
# slightly different things when the target platform matches the host platform.
|
||||
kube::golang::host_platform() {
|
||||
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
|
||||
}
|
||||
|
||||
# Takes the platform name ($1) and sets the appropriate golang env variables
|
||||
# for that platform.
|
||||
kube::golang::set_platform_envs() {
|
||||
[[ -n ${1-} ]] || {
|
||||
kube::log::error_exit "!!! Internal error. No platform set in kube::golang::set_platform_envs"
|
||||
}
|
||||
|
||||
export GOOS=${platform%/*}
|
||||
export GOARCH=${platform##*/}
|
||||
|
||||
# Do not set CC when building natively on a platform, only if cross-compiling from linux/amd64
|
||||
if [[ $(kube::golang::host_platform) == "linux/amd64" ]]; then
|
||||
# Dynamic CGO linking for other server architectures than linux/amd64 goes here
|
||||
# If you want to include support for more server platforms than these, add arch-specific gcc names here
|
||||
case "${platform}" in
|
||||
"linux/arm")
|
||||
export CGO_ENABLED=1
|
||||
export CC=arm-linux-gnueabihf-gcc
|
||||
;;
|
||||
"linux/arm64")
|
||||
export CGO_ENABLED=1
|
||||
export CC=aarch64-linux-gnu-gcc
|
||||
;;
|
||||
"linux/ppc64le")
|
||||
export CGO_ENABLED=1
|
||||
export CC=powerpc64le-linux-gnu-gcc
|
||||
;;
|
||||
"linux/s390x")
|
||||
export CGO_ENABLED=1
|
||||
export CC=s390x-linux-gnu-gcc
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
kube::golang::unset_platform_envs() {
|
||||
unset GOOS
|
||||
unset GOARCH
|
||||
unset GOROOT
|
||||
unset CGO_ENABLED
|
||||
unset CC
|
||||
}
|
||||
|
||||
# Create the GOPATH tree under $KUBE_OUTPUT
|
||||
kube::golang::create_gopath_tree() {
|
||||
local go_pkg_dir="${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}"
|
||||
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
|
||||
|
||||
mkdir -p "${go_pkg_basedir}"
|
||||
|
||||
# TODO: This symlink should be relative.
|
||||
if [[ ! -e "${go_pkg_dir}" || "$(readlink ${go_pkg_dir})" != "${KUBE_ROOT}" ]]; then
|
||||
ln -snf "${KUBE_ROOT}" "${go_pkg_dir}"
|
||||
fi
|
||||
|
||||
cat >"${KUBE_GOPATH}/BUILD" <<EOF
|
||||
# This dummy BUILD file prevents Bazel from trying to descend through the
|
||||
# infinite loop created by the symlink at
|
||||
# ${go_pkg_dir}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Ensure the go tool exists and is a viable version.
|
||||
kube::golang::verify_go_version() {
|
||||
if [[ -z "$(which go)" ]]; then
|
||||
kube::log::usage_from_stdin <<EOF
|
||||
Can't find 'go' in PATH, please fix and retry.
|
||||
See http://golang.org/doc/install for installation instructions.
|
||||
EOF
|
||||
return 2
|
||||
fi
|
||||
|
||||
local go_version
|
||||
go_version=($(go version))
|
||||
local minimum_go_version
|
||||
minimum_go_version=go1.9.1
|
||||
if [[ "${go_version[2]}" < "${minimum_go_version}" && "${go_version[2]}" != "devel" ]]; then
|
||||
kube::log::usage_from_stdin <<EOF
|
||||
Detected go version: ${go_version[*]}.
|
||||
Kubernetes requires ${minimum_go_version} or greater.
|
||||
Please install ${minimum_go_version} or later.
|
||||
EOF
|
||||
return 2
|
||||
fi
|
||||
}
|
||||
|
||||
# kube::golang::setup_env will check that the `go` commands is available in
|
||||
# ${PATH}. It will also check that the Go version is good enough for the
|
||||
# Kubernetes build.
|
||||
#
|
||||
# Inputs:
|
||||
# KUBE_EXTRA_GOPATH - If set, this is included in created GOPATH
|
||||
#
|
||||
# Outputs:
|
||||
# env-var GOPATH points to our local output dir
|
||||
# env-var GOBIN is unset (we want binaries in a predictable place)
|
||||
# env-var GO15VENDOREXPERIMENT=1
|
||||
# current directory is within GOPATH
|
||||
kube::golang::setup_env() {
|
||||
kube::golang::verify_go_version
|
||||
|
||||
kube::golang::create_gopath_tree
|
||||
|
||||
export GOPATH="${KUBE_GOPATH}"
|
||||
|
||||
# Append KUBE_EXTRA_GOPATH to the GOPATH if it is defined.
|
||||
if [[ -n ${KUBE_EXTRA_GOPATH:-} ]]; then
|
||||
GOPATH="${GOPATH}:${KUBE_EXTRA_GOPATH}"
|
||||
fi
|
||||
|
||||
# Make sure our own Go binaries are in PATH.
|
||||
export PATH="${KUBE_GOPATH}/bin:${PATH}"
|
||||
|
||||
# Change directories so that we are within the GOPATH. Some tools get really
|
||||
# upset if this is not true. We use a whole fake GOPATH here to collect the
|
||||
# resultant binaries. Go will not let us use GOBIN with `go install` and
|
||||
# cross-compiling, and `go install -o <file>` only works for a single pkg.
|
||||
local subdir
|
||||
subdir=$(kube::realpath . | sed "s|$KUBE_ROOT||")
|
||||
cd "${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}/${subdir}"
|
||||
|
||||
# Set GOROOT so binaries that parse code can work properly.
|
||||
export GOROOT=$(go env GOROOT)
|
||||
|
||||
# Unset GOBIN in case it already exists in the current session.
|
||||
unset GOBIN
|
||||
|
||||
# This seems to matter to some tools (godep, ginkgo...)
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
}
|
||||
|
||||
# This will take binaries from $GOPATH/bin and copy them to the appropriate
|
||||
# place in ${KUBE_OUTPUT_BINDIR}
|
||||
#
|
||||
# Ideally this wouldn't be necessary and we could just set GOBIN to
|
||||
# KUBE_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
|
||||
# install' will place binaries that match the host platform directly in $GOBIN
|
||||
# while placing cross compiled binaries into `platform_arch` subdirs. This
|
||||
# complicates pretty much everything else we do around packaging and such.
|
||||
kube::golang::place_bins() {
|
||||
local host_platform
|
||||
host_platform=$(kube::golang::host_platform)
|
||||
|
||||
V=2 kube::log::status "Placing binaries"
|
||||
|
||||
local platform
|
||||
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
|
||||
# The substitution on platform_src below will replace all slashes with
|
||||
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
|
||||
local platform_src="/${platform//\//_}"
|
||||
if [[ $platform == $host_platform ]]; then
|
||||
platform_src=""
|
||||
rm -f "${THIS_PLATFORM_BIN}"
|
||||
ln -s "${KUBE_OUTPUT_BINPATH}/${platform}" "${THIS_PLATFORM_BIN}"
|
||||
fi
|
||||
|
||||
local full_binpath_src="${KUBE_GOPATH}/bin${platform_src}"
|
||||
if [[ -d "${full_binpath_src}" ]]; then
|
||||
mkdir -p "${KUBE_OUTPUT_BINPATH}/${platform}"
|
||||
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
|
||||
rsync -pc {} "${KUBE_OUTPUT_BINPATH}/${platform}" \;
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
kube::golang::fallback_if_stdlib_not_installable() {
|
||||
local go_root_dir=$(go env GOROOT);
|
||||
local go_host_os=$(go env GOHOSTOS);
|
||||
local go_host_arch=$(go env GOHOSTARCH);
|
||||
local cgo_pkg_dir=${go_root_dir}/pkg/${go_host_os}_${go_host_arch}_cgo;
|
||||
|
||||
if [ -e ${cgo_pkg_dir} ]; then
|
||||
return 0;
|
||||
fi
|
||||
|
||||
if [ -w ${go_root_dir}/pkg ]; then
|
||||
return 0;
|
||||
fi
|
||||
|
||||
kube::log::status "+++ Warning: stdlib pkg with cgo flag not found.";
|
||||
kube::log::status "+++ Warning: stdlib pkg cannot be rebuilt since ${go_root_dir}/pkg is not writable by `whoami`";
|
||||
kube::log::status "+++ Warning: Make ${go_root_dir}/pkg writable for `whoami` for a one-time stdlib install, Or"
|
||||
kube::log::status "+++ Warning: Rebuild stdlib using the command 'CGO_ENABLED=0 go install -a -installsuffix cgo std'";
|
||||
kube::log::status "+++ Falling back to go build, which is slower";
|
||||
|
||||
use_go_build=true
|
||||
}
|
||||
|
||||
# Builds the toolchain necessary for building kube. This needs to be
|
||||
# built only on the host platform.
|
||||
# TODO: Find this a proper home.
|
||||
# Ideally, not a shell script because testing shell scripts is painful.
|
||||
kube::golang::build_kube_toolchain() {
|
||||
local targets=(
|
||||
hack/cmd/teststale
|
||||
vendor/github.com/jteeuwen/go-bindata/go-bindata
|
||||
)
|
||||
|
||||
local binaries
|
||||
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
|
||||
|
||||
kube::log::status "Building the toolchain targets:" "${binaries[@]}"
|
||||
go install "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${binaries[@]:+${binaries[@]}}"
|
||||
}
|
||||
|
||||
# Try and replicate the native binary placement of go install without
|
||||
# calling go install.
|
||||
kube::golang::output_filename_for_binary() {
|
||||
local binary=$1
|
||||
local platform=$2
|
||||
local output_path="${KUBE_GOPATH}/bin"
|
||||
if [[ $platform != $host_platform ]]; then
|
||||
output_path="${output_path}/${platform//\//_}"
|
||||
fi
|
||||
local bin=$(basename "${binary}")
|
||||
if [[ ${GOOS} == "windows" ]]; then
|
||||
bin="${bin}.exe"
|
||||
fi
|
||||
echo "${output_path}/${bin}"
|
||||
}
|
||||
|
||||
kube::golang::build_binaries_for_platform() {
|
||||
local platform=$1
|
||||
local use_go_build=${2-}
|
||||
|
||||
local -a statics=()
|
||||
local -a nonstatics=()
|
||||
local -a tests=()
|
||||
|
||||
V=2 kube::log::info "Env for ${platform}: GOOS=${GOOS-} GOARCH=${GOARCH-} GOROOT=${GOROOT-} CGO_ENABLED=${CGO_ENABLED-} CC=${CC-}"
|
||||
|
||||
for binary in "${binaries[@]}"; do
|
||||
if [[ "${binary}" =~ ".test"$ ]]; then
|
||||
tests+=($binary)
|
||||
elif kube::golang::is_statically_linked_library "${binary}"; then
|
||||
statics+=($binary)
|
||||
else
|
||||
nonstatics+=($binary)
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${#statics[@]}" != 0 ]]; then
|
||||
kube::golang::fallback_if_stdlib_not_installable;
|
||||
fi
|
||||
|
||||
if [[ -n ${use_go_build:-} ]]; then
|
||||
kube::log::progress " "
|
||||
for binary in "${statics[@]:+${statics[@]}}"; do
|
||||
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
|
||||
CGO_ENABLED=0 go build -o "${outfile}" \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${binary}"
|
||||
kube::log::progress "*"
|
||||
done
|
||||
for binary in "${nonstatics[@]:+${nonstatics[@]}}"; do
|
||||
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
|
||||
go build -o "${outfile}" \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${binary}"
|
||||
kube::log::progress "*"
|
||||
done
|
||||
kube::log::progress "\n"
|
||||
else
|
||||
# Use go install.
|
||||
if [[ "${#nonstatics[@]}" != 0 ]]; then
|
||||
go install "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${nonstatics[@]:+${nonstatics[@]}}"
|
||||
fi
|
||||
if [[ "${#statics[@]}" != 0 ]]; then
|
||||
CGO_ENABLED=0 go install -installsuffix cgo "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${statics[@]:+${statics[@]}}"
|
||||
fi
|
||||
fi
|
||||
|
||||
for test in "${tests[@]:+${tests[@]}}"; do
|
||||
local outfile=$(kube::golang::output_filename_for_binary "${test}" \
|
||||
"${platform}")
|
||||
|
||||
local testpkg="$(dirname ${test})"
|
||||
|
||||
# Staleness check always happens on the host machine, so we don't
|
||||
# have to locate the `teststale` binaries for the other platforms.
|
||||
# Since we place the host binaries in `$KUBE_GOPATH/bin`, we can
|
||||
# assume that the binary exists there, if it exists at all.
|
||||
# Otherwise, something has gone wrong with building the `teststale`
|
||||
# binary and we should safely proceed building the test binaries
|
||||
# assuming that they are stale. There is no good reason to error
|
||||
# out.
|
||||
if test -x "${KUBE_GOPATH}/bin/teststale" && ! "${KUBE_GOPATH}/bin/teststale" -binary "${outfile}" -package "${testpkg}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
|
||||
# `go test -c` below directly builds the binary. It builds the packages,
|
||||
# but it never installs them. `go test -i` only installs the dependencies
|
||||
# of the test, but not the test package itself. So neither `go test -c`
|
||||
# nor `go test -i` installs, for example, test/e2e.a. And without that,
|
||||
# doing a staleness check on k8s.io/kubernetes/test/e2e package always
|
||||
# returns true (always stale). And that's why we need to install the
|
||||
# test package.
|
||||
go install "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${testpkg}"
|
||||
|
||||
mkdir -p "$(dirname ${outfile})"
|
||||
go test -i -c \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
-o "${outfile}" \
|
||||
"${testpkg}"
|
||||
done
|
||||
}
|
||||
|
||||
# Return approximate physical memory available in gigabytes.
|
||||
kube::golang::get_physmem() {
|
||||
local mem
|
||||
|
||||
# Linux kernel version >=3.14, in kb
|
||||
if mem=$(grep MemAvailable /proc/meminfo | awk '{ print $2 }'); then
|
||||
echo $(( ${mem} / 1048576 ))
|
||||
return
|
||||
fi
|
||||
|
||||
# Linux, in kb
|
||||
if mem=$(grep MemTotal /proc/meminfo | awk '{ print $2 }'); then
|
||||
echo $(( ${mem} / 1048576 ))
|
||||
return
|
||||
fi
|
||||
|
||||
# OS X, in bytes. Note that get_physmem, as used, should only ever
|
||||
# run in a Linux container (because it's only used in the multiple
|
||||
# platform case, which is a Dockerized build), but this is provided
|
||||
# for completeness.
|
||||
if mem=$(sysctl -n hw.memsize 2>/dev/null); then
|
||||
echo $(( ${mem} / 1073741824 ))
|
||||
return
|
||||
fi
|
||||
|
||||
# If we can't infer it, just give up and assume a low memory system
|
||||
echo 1
|
||||
}
|
||||
|
||||
# Build binaries targets specified
|
||||
#
|
||||
# Input:
|
||||
# $@ - targets and go flags. If no targets are set then all binaries targets
|
||||
# are built.
|
||||
# KUBE_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
|
||||
# then just the host architecture is built.
|
||||
kube::golang::build_binaries() {
|
||||
# Create a sub-shell so that we don't pollute the outer environment
|
||||
(
|
||||
# Check for `go` binary and set ${GOPATH}.
|
||||
kube::golang::setup_env
|
||||
V=2 kube::log::info "Go version: $(go version)"
|
||||
|
||||
local host_platform
|
||||
host_platform=$(kube::golang::host_platform)
|
||||
|
||||
# Use eval to preserve embedded quoted strings.
|
||||
local goflags goldflags gogcflags
|
||||
eval "goflags=(${GOFLAGS:-})"
|
||||
goldflags="${GOLDFLAGS:-} $(kube::version::ldflags)"
|
||||
gogcflags="${GOGCFLAGS:-}"
|
||||
|
||||
local use_go_build
|
||||
local -a targets=()
|
||||
local arg
|
||||
|
||||
for arg; do
|
||||
if [[ "${arg}" == "--use_go_build" ]]; then
|
||||
use_go_build=true
|
||||
elif [[ "${arg}" == -* ]]; then
|
||||
# Assume arguments starting with a dash are flags to pass to go.
|
||||
goflags+=("${arg}")
|
||||
else
|
||||
targets+=("${arg}")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#targets[@]} -eq 0 ]]; then
|
||||
targets=("${KUBE_ALL_TARGETS[@]}")
|
||||
fi
|
||||
|
||||
local -a platforms=(${KUBE_BUILD_PLATFORMS:-})
|
||||
if [[ ${#platforms[@]} -eq 0 ]]; then
|
||||
platforms=("${host_platform}")
|
||||
fi
|
||||
|
||||
local binaries
|
||||
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
|
||||
|
||||
local parallel=false
|
||||
if [[ ${#platforms[@]} -gt 1 ]]; then
|
||||
local gigs
|
||||
gigs=$(kube::golang::get_physmem)
|
||||
|
||||
if [[ ${gigs} -ge ${KUBE_PARALLEL_BUILD_MEMORY} ]]; then
|
||||
kube::log::status "Multiple platforms requested and available ${gigs}G >= threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in parallel"
|
||||
parallel=true
|
||||
else
|
||||
kube::log::status "Multiple platforms requested, but available ${gigs}G < threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in serial"
|
||||
parallel=false
|
||||
fi
|
||||
fi
|
||||
|
||||
# First build the toolchain before building any other targets
|
||||
kube::golang::build_kube_toolchain
|
||||
|
||||
kube::log::status "Generating bindata:" "${KUBE_BINDATAS[@]}"
|
||||
for bindata in ${KUBE_BINDATAS[@]}; do
|
||||
# Only try to generate bindata if the file exists, since in some cases
|
||||
# one-off builds of individual directories may exclude some files.
|
||||
if [[ -f "${KUBE_ROOT}/${bindata}" ]]; then
|
||||
go generate "${goflags[@]:+${goflags[@]}}" "${KUBE_ROOT}/${bindata}"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${parallel}" == "true" ]]; then
|
||||
kube::log::status "Building go targets for {${platforms[*]}} in parallel (output will appear in a burst when complete):" "${targets[@]}"
|
||||
local platform
|
||||
for platform in "${platforms[@]}"; do (
|
||||
kube::golang::set_platform_envs "${platform}"
|
||||
kube::log::status "${platform}: go build started"
|
||||
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
|
||||
kube::log::status "${platform}: go build finished"
|
||||
) &> "/tmp//${platform//\//_}.build" &
|
||||
done
|
||||
|
||||
local fails=0
|
||||
for job in $(jobs -p); do
|
||||
wait ${job} || let "fails+=1"
|
||||
done
|
||||
|
||||
for platform in "${platforms[@]}"; do
|
||||
cat "/tmp//${platform//\//_}.build"
|
||||
done
|
||||
|
||||
exit ${fails}
|
||||
else
|
||||
for platform in "${platforms[@]}"; do
|
||||
kube::log::status "Building go targets for ${platform}:" "${targets[@]}"
|
||||
(
|
||||
kube::golang::set_platform_envs "${platform}"
|
||||
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
|
||||
)
|
||||
done
|
||||
fi
|
||||
)
|
||||
}
|
176
vendor/k8s.io/kubernetes/hack/lib/init.sh
generated
vendored
Executable file
176
vendor/k8s.io/kubernetes/hack/lib/init.sh
generated
vendored
Executable file
@ -0,0 +1,176 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# The root of the build/dist directory
|
||||
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
|
||||
|
||||
KUBE_OUTPUT_SUBPATH="${KUBE_OUTPUT_SUBPATH:-_output/local}"
|
||||
KUBE_OUTPUT="${KUBE_ROOT}/${KUBE_OUTPUT_SUBPATH}"
|
||||
KUBE_OUTPUT_BINPATH="${KUBE_OUTPUT}/bin"
|
||||
|
||||
# This controls rsync compression. Set to a value > 0 to enable rsync
|
||||
# compression for build container
|
||||
KUBE_RSYNC_COMPRESS="${KUBE_RSYNC_COMPRESS:-0}"
|
||||
|
||||
# Set no_proxy for localhost if behind a proxy, otherwise,
|
||||
# the connections to localhost in scripts will time out
|
||||
export no_proxy=127.0.0.1,localhost
|
||||
|
||||
# This is a symlink to binaries for "this platform", e.g. build tools.
|
||||
THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin"
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/lib/logging.sh"
|
||||
|
||||
kube::log::install_errexit
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/version.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/golang.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/etcd.sh"
|
||||
|
||||
KUBE_OUTPUT_HOSTBIN="${KUBE_OUTPUT_BINPATH}/$(kube::util::host_platform)"
|
||||
|
||||
# list of all available group versions. This should be used when generated code
|
||||
# or when starting an API server that you want to have everything.
|
||||
# most preferred version for a group should appear first
|
||||
KUBE_AVAILABLE_GROUP_VERSIONS="${KUBE_AVAILABLE_GROUP_VERSIONS:-\
|
||||
v1 \
|
||||
admissionregistration.k8s.io/v1alpha1 \
|
||||
admissionregistration.k8s.io/v1beta1 \
|
||||
admission.k8s.io/v1beta1 \
|
||||
apps/v1beta1 \
|
||||
apps/v1beta2 \
|
||||
apps/v1 \
|
||||
authentication.k8s.io/v1 \
|
||||
authentication.k8s.io/v1beta1 \
|
||||
authorization.k8s.io/v1 \
|
||||
authorization.k8s.io/v1beta1 \
|
||||
autoscaling/v1 \
|
||||
autoscaling/v2beta1 \
|
||||
batch/v1 \
|
||||
batch/v1beta1 \
|
||||
batch/v2alpha1 \
|
||||
certificates.k8s.io/v1beta1 \
|
||||
extensions/v1beta1 \
|
||||
events.k8s.io/v1beta1 \
|
||||
imagepolicy.k8s.io/v1alpha1 \
|
||||
networking.k8s.io/v1 \
|
||||
policy/v1beta1 \
|
||||
rbac.authorization.k8s.io/v1 \
|
||||
rbac.authorization.k8s.io/v1beta1 \
|
||||
rbac.authorization.k8s.io/v1alpha1 \
|
||||
scheduling.k8s.io/v1alpha1 \
|
||||
settings.k8s.io/v1alpha1 \
|
||||
storage.k8s.io/v1beta1 \
|
||||
storage.k8s.io/v1 \
|
||||
storage.k8s.io/v1alpha1 \
|
||||
}"
|
||||
|
||||
# not all group versions are exposed by the server. This list contains those
|
||||
# which are not available so we don't generate clients or swagger for them
|
||||
KUBE_NONSERVER_GROUP_VERSIONS="
|
||||
abac.authorization.kubernetes.io/v0 \
|
||||
abac.authorization.kubernetes.io/v1beta1 \
|
||||
componentconfig/v1alpha1 \
|
||||
imagepolicy.k8s.io/v1alpha1\
|
||||
admission.k8s.io/v1beta1\
|
||||
"
|
||||
|
||||
# This emulates "readlink -f" which is not available on MacOS X.
|
||||
# Test:
|
||||
# T=/tmp/$$.$RANDOM
|
||||
# mkdir $T
|
||||
# touch $T/file
|
||||
# mkdir $T/dir
|
||||
# ln -s $T/file $T/linkfile
|
||||
# ln -s $T/dir $T/linkdir
|
||||
# function testone() {
|
||||
# X=$(readlink -f $1 2>&1)
|
||||
# Y=$(kube::readlinkdashf $1 2>&1)
|
||||
# if [ "$X" != "$Y" ]; then
|
||||
# echo readlinkdashf $1: expected "$X", got "$Y"
|
||||
# fi
|
||||
# }
|
||||
# testone /
|
||||
# testone /tmp
|
||||
# testone $T
|
||||
# testone $T/file
|
||||
# testone $T/dir
|
||||
# testone $T/linkfile
|
||||
# testone $T/linkdir
|
||||
# testone $T/nonexistant
|
||||
# testone $T/linkdir/file
|
||||
# testone $T/linkdir/dir
|
||||
# testone $T/linkdir/linkfile
|
||||
# testone $T/linkdir/linkdir
|
||||
function kube::readlinkdashf {
|
||||
# run in a subshell for simpler 'cd'
|
||||
(
|
||||
if [[ -d "$1" ]]; then # This also catch symlinks to dirs.
|
||||
cd "$1"
|
||||
pwd -P
|
||||
else
|
||||
cd $(dirname "$1")
|
||||
local f
|
||||
f=$(basename "$1")
|
||||
if [[ -L "$f" ]]; then
|
||||
readlink "$f"
|
||||
else
|
||||
echo "$(pwd -P)/${f}"
|
||||
fi
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
# This emulates "realpath" which is not available on MacOS X
|
||||
# Test:
|
||||
# T=/tmp/$$.$RANDOM
|
||||
# mkdir $T
|
||||
# touch $T/file
|
||||
# mkdir $T/dir
|
||||
# ln -s $T/file $T/linkfile
|
||||
# ln -s $T/dir $T/linkdir
|
||||
# function testone() {
|
||||
# X=$(realpath $1 2>&1)
|
||||
# Y=$(kube::realpath $1 2>&1)
|
||||
# if [ "$X" != "$Y" ]; then
|
||||
# echo realpath $1: expected "$X", got "$Y"
|
||||
# fi
|
||||
# }
|
||||
# testone /
|
||||
# testone /tmp
|
||||
# testone $T
|
||||
# testone $T/file
|
||||
# testone $T/dir
|
||||
# testone $T/linkfile
|
||||
# testone $T/linkdir
|
||||
# testone $T/nonexistant
|
||||
# testone $T/linkdir/file
|
||||
# testone $T/linkdir/dir
|
||||
# testone $T/linkdir/linkfile
|
||||
# testone $T/linkdir/linkdir
|
||||
kube::realpath() {
|
||||
if [[ ! -e "$1" ]]; then
|
||||
echo "$1: No such file or directory" >&2
|
||||
return 1
|
||||
fi
|
||||
kube::readlinkdashf "$1"
|
||||
}
|
||||
|
91
vendor/k8s.io/kubernetes/hack/lib/protoc.sh
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/hack/lib/protoc.sh
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# The root of the build/dist directory
|
||||
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# Generates $1/api.pb.go from the protobuf file $1/api.proto
|
||||
# and formats it correctly
|
||||
# $1: Full path to the directory where the api.proto file is
|
||||
function kube::protoc::generate_proto() {
|
||||
kube::golang::setup_env
|
||||
local bins=(
|
||||
vendor/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo
|
||||
)
|
||||
make -C "${KUBE_ROOT}" WHAT="${bins[*]}"
|
||||
|
||||
kube::protoc::check_protoc
|
||||
|
||||
local package=${1}
|
||||
kube::protoc::protoc ${package}
|
||||
kube::protoc::format ${package}
|
||||
}
|
||||
|
||||
# Checks that the current protoc version is at least version 3.0.0-beta1
|
||||
# exit 1 if it's not the case
|
||||
function kube::protoc::check_protoc() {
|
||||
if [[ -z "$(which protoc)" || "$(protoc --version)" != "libprotoc 3."* ]]; then
|
||||
echo "Generating protobuf requires protoc 3.0.0-beta1 or newer. Please download and"
|
||||
echo "install the platform appropriate Protobuf package for your OS: "
|
||||
echo
|
||||
echo " https://github.com/google/protobuf/releases"
|
||||
echo
|
||||
echo "WARNING: Protobuf changes are not being validated"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Generates $1/api.pb.go from the protobuf file $1/api.proto
|
||||
# $1: Full path to the directory where the api.proto file is
|
||||
function kube::protoc::protoc() {
|
||||
local package=${1}
|
||||
gogopath=$(dirname $(kube::util::find-binary "protoc-gen-gogo"))
|
||||
|
||||
PATH="${gogopath}:${PATH}" protoc \
|
||||
--proto_path="${package}" \
|
||||
--proto_path="${KUBE_ROOT}/vendor" \
|
||||
--gogo_out=plugins=grpc:${package} ${package}/api.proto
|
||||
}
|
||||
|
||||
# Formats $1/api.pb.go, adds the boilerplate comments and run gofmt on it
|
||||
# $1: Full path to the directory where the api.proto file is
|
||||
function kube::protoc::format() {
|
||||
local package=${1}
|
||||
|
||||
# Update boilerplate for the generated file.
|
||||
echo "$(cat hack/boilerplate/boilerplate.go.txt ${package}/api.pb.go)" > ${package}/api.pb.go
|
||||
sed -i".bak" "s/Copyright YEAR/Copyright $(date '+%Y')/g" ${package}/api.pb.go
|
||||
|
||||
# Run gofmt to clean up the generated code.
|
||||
kube::golang::verify_go_version
|
||||
gofmt -l -s -w ${package}/api.pb.go
|
||||
}
|
||||
|
||||
# Compares the contents of $1 and $2
|
||||
# Echo's $3 in case of error and exits 1
|
||||
function kube::protoc::diff() {
|
||||
local ret=0
|
||||
diff -I "gzipped FileDescriptorProto" -I "0x" -Naupr ${1} ${2} || ret=$?
|
||||
if [[ $ret -ne 0 ]]; then
|
||||
echo ${3}
|
||||
exit 1
|
||||
fi
|
||||
}
|
155
vendor/k8s.io/kubernetes/hack/lib/swagger.sh
generated
vendored
Normal file
155
vendor/k8s.io/kubernetes/hack/lib/swagger.sh
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Contains swagger related util functions.
|
||||
#
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# The root of the build/dist directory
|
||||
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
|
||||
|
||||
# Generates types_swagger_doc_generated file for the given group version.
|
||||
# $1: Name of the group version
|
||||
# $2: Path to the directory where types.go for that group version exists. This
|
||||
# is the directory where the file will be generated.
|
||||
kube::swagger::gen_types_swagger_doc() {
|
||||
local group_version=$1
|
||||
local gv_dir=$2
|
||||
local TMPFILE="${TMPDIR:-/tmp}/types_swagger_doc_generated.$(date +%s).go"
|
||||
|
||||
echo "Generating swagger type docs for ${group_version} at ${gv_dir}"
|
||||
|
||||
sed 's/YEAR/2016/' hack/boilerplate/boilerplate.go.txt > "$TMPFILE"
|
||||
echo "package ${group_version##*/}" >> "$TMPFILE"
|
||||
cat >> "$TMPFILE" <<EOF
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE
|
||||
EOF
|
||||
|
||||
go run cmd/genswaggertypedocs/swagger_type_docs.go -s \
|
||||
"${gv_dir}/types.go" \
|
||||
-f - \
|
||||
>> "$TMPFILE"
|
||||
|
||||
echo "// AUTO-GENERATED FUNCTIONS END HERE" >> "$TMPFILE"
|
||||
|
||||
gofmt -w -s "$TMPFILE"
|
||||
mv "$TMPFILE" ""${gv_dir}"/types_swagger_doc_generated.go"
|
||||
}
|
||||
|
||||
# Generates API reference docs for the given API group versions.
|
||||
# Required env vars:
|
||||
# GROUP_VERSIONS: Array of group versions to be included in the reference
|
||||
# docs.
|
||||
# GV_DIRS: Array of root directories for those group versions.
|
||||
# Input vars:
|
||||
# $1: Root directory path for swagger spec
|
||||
# $2: Root directory path where the reference docs should be generated.
|
||||
kube::swagger::gen_api_ref_docs() {
|
||||
: "${GROUP_VERSIONS?Must set GROUP_VERSIONS env var}"
|
||||
: "${GV_DIRS?Must set GV_DIRS env var}"
|
||||
|
||||
echo "Generating API reference docs for group versions: ${GROUP_VERSIONS[@]}, at dirs: ${GV_DIRS[@]}"
|
||||
GROUP_VERSIONS=(${GROUP_VERSIONS[@]})
|
||||
GV_DIRS=(${GV_DIRS[@]})
|
||||
local swagger_spec_path=${1}
|
||||
local output_dir=${2}
|
||||
echo "Reading swagger spec from: ${swagger_spec_path}"
|
||||
echo "Generating the docs at: ${output_dir}"
|
||||
|
||||
# Use REPO_DIR if provided so we can set it to the host-resolvable path
|
||||
# to the repo root if we are running this script from a container with
|
||||
# docker mounted in as a volume.
|
||||
# We pass the host output dir as the source dir to `docker run -v`, but use
|
||||
# the regular one to compute diff (they will be the same if running this
|
||||
# test on the host, potentially different if running in a container).
|
||||
local repo_dir=${REPO_DIR:-"${KUBE_ROOT}"}
|
||||
local tmp_subpath="_output/generated_html"
|
||||
local output_tmp_in_host="${repo_dir}/${tmp_subpath}"
|
||||
local output_tmp="${KUBE_ROOT}/${tmp_subpath}"
|
||||
|
||||
echo "Generating api reference docs at ${output_tmp}"
|
||||
|
||||
for ver in "${GROUP_VERSIONS[@]}"; do
|
||||
mkdir -p "${output_tmp}/${ver}"
|
||||
done
|
||||
|
||||
user_flags="-u $(id -u)"
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
# mapping in a uid from OS X doesn't make any sense
|
||||
user_flags=""
|
||||
fi
|
||||
|
||||
for i in "${!GROUP_VERSIONS[@]}"; do
|
||||
local ver=${GROUP_VERSIONS[i]}
|
||||
local dir=${GV_DIRS[i]}
|
||||
local tmp_in_host="${output_tmp_in_host}/${ver}"
|
||||
local register_file="${dir}/register.go"
|
||||
local swagger_json_name="$(kube::util::gv-to-swagger-name "${ver}")"
|
||||
|
||||
docker run ${user_flags} \
|
||||
--rm -v "${tmp_in_host}":/output:z \
|
||||
-v "${swagger_spec_path}":/swagger-source:z \
|
||||
-v "${register_file}":/register.go:z \
|
||||
--net=host -e "https_proxy=${KUBERNETES_HTTPS_PROXY:-}" \
|
||||
gcr.io/google_containers/gen-swagger-docs:v8 \
|
||||
"${swagger_json_name}"
|
||||
done
|
||||
|
||||
# Check if we actually changed anything
|
||||
pushd "${output_tmp}" > /dev/null
|
||||
touch .generated_html
|
||||
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > .generated_html
|
||||
popd > /dev/null
|
||||
|
||||
kube::util::ensure-gnu-sed
|
||||
|
||||
while read file; do
|
||||
if [[ -e "${output_dir}/${file}" && -e "${output_tmp}/${file}" ]]; then
|
||||
echo "comparing ${output_dir}/${file} with ${output_tmp}/${file}"
|
||||
|
||||
# Remove the timestamp to reduce conflicts in PR(s)
|
||||
${SED} -i 's/^Last updated.*$//' "${output_tmp}/${file}"
|
||||
|
||||
# By now, the contents should be normalized and stripped of any
|
||||
# auto-managed content.
|
||||
if diff -NauprB "${output_dir}/${file}" "${output_tmp}/${file}" >/dev/null; then
|
||||
# actual contents same, overwrite generated with original.
|
||||
cp "${output_dir}/${file}" "${output_tmp}/${file}"
|
||||
fi
|
||||
fi
|
||||
done <"${output_tmp}/.generated_html"
|
||||
|
||||
echo "Moving api reference docs from ${output_tmp} to ${output_dir}"
|
||||
|
||||
# Create output_dir if doesn't exist. Prevents error on copy.
|
||||
mkdir -p "${output_dir}"
|
||||
|
||||
cp -af "${output_tmp}"/* "${output_dir}"
|
||||
rm -r "${output_tmp}"
|
||||
}
|
425
vendor/k8s.io/kubernetes/hack/lib/test.sh
generated
vendored
Normal file
425
vendor/k8s.io/kubernetes/hack/lib/test.sh
generated
vendored
Normal file
@ -0,0 +1,425 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A set of helpers for tests
|
||||
|
||||
readonly reset=$(tput sgr0)
|
||||
readonly bold=$(tput bold)
|
||||
readonly black=$(tput setaf 0)
|
||||
readonly red=$(tput setaf 1)
|
||||
readonly green=$(tput setaf 2)
|
||||
|
||||
kube::test::clear_all() {
|
||||
if kube::test::if_supports_resource "rc" ; then
|
||||
kubectl delete "${kube_flags[@]}" rc --all --grace-period=0 --force
|
||||
fi
|
||||
if kube::test::if_supports_resource "pods" ; then
|
||||
kubectl delete "${kube_flags[@]}" pods --all --grace-period=0 --force
|
||||
fi
|
||||
}
|
||||
|
||||
# Prints the calling file and line number $1 levels deep
|
||||
# Defaults to 2 levels so you can call this to find your own caller
|
||||
kube::test::get_caller() {
|
||||
local levels=${1:-2}
|
||||
local caller_file="${BASH_SOURCE[$levels]}"
|
||||
local caller_line="${BASH_LINENO[$levels-1]}"
|
||||
echo "$(basename "${caller_file}"):${caller_line}"
|
||||
}
|
||||
|
||||
# Force exact match of a returned result for a object query. Wrap this with || to support multiple
|
||||
# valid return types.
|
||||
# This runs `kubectl get` once and asserts that the result is as expected.
|
||||
## $1: Object on which get should be run
|
||||
# $2: The go-template to run on the result
|
||||
# $3: The expected output
|
||||
# $4: Additional args to be passed to kubectl
|
||||
kube::test::get_object_assert() {
|
||||
kube::test::object_assert 1 "$@"
|
||||
}
|
||||
|
||||
# Asserts that the output of a given get query is as expected.
|
||||
# Runs the query multiple times before failing it.
|
||||
# $1: Object on which get should be run
|
||||
# $2: The go-template to run on the result
|
||||
# $3: The expected output
|
||||
# $4: Additional args to be passed to kubectl
|
||||
kube::test::wait_object_assert() {
|
||||
kube::test::object_assert 10 "$@"
|
||||
}
|
||||
|
||||
# Asserts that the output of a given get query is as expected.
|
||||
# Can run the query multiple times before failing it.
|
||||
# $1: Number of times the query should be run before failing it.
|
||||
# $2: Object on which get should be run
|
||||
# $3: The go-template to run on the result
|
||||
# $4: The expected output
|
||||
# $5: Additional args to be passed to kubectl
|
||||
kube::test::object_assert() {
|
||||
local tries=$1
|
||||
local object=$2
|
||||
local request=$3
|
||||
local expected=$4
|
||||
local args=${5:-}
|
||||
|
||||
for j in $(seq 1 ${tries}); do
|
||||
res=$(eval kubectl get -a "${kube_flags[@]}" ${args} $object -o go-template=\"$request\")
|
||||
if [[ "$res" =~ ^$expected$ ]]; then
|
||||
echo -n ${green}
|
||||
echo "$(kube::test::get_caller 3): Successful get $object $request: $res"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
fi
|
||||
echo "Waiting for Get $object $request $args: expected: $expected, got: $res"
|
||||
sleep $((${j}-1))
|
||||
done
|
||||
|
||||
echo ${bold}${red}
|
||||
echo "$(kube::test::get_caller 3): FAIL!"
|
||||
echo "Get $object $request"
|
||||
echo " Expected: $expected"
|
||||
echo " Got: $res"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
}
|
||||
|
||||
kube::test::get_object_jsonpath_assert() {
|
||||
local object=$1
|
||||
local request=$2
|
||||
local expected=$3
|
||||
|
||||
res=$(eval kubectl get -a "${kube_flags[@]}" $object -o jsonpath=\"$request\")
|
||||
|
||||
if [[ "$res" =~ ^$expected$ ]]; then
|
||||
echo -n ${green}
|
||||
echo "$(kube::test::get_caller): Successful get $object $request: $res"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
else
|
||||
echo ${bold}${red}
|
||||
echo "$(kube::test::get_caller): FAIL!"
|
||||
echo "Get $object $request"
|
||||
echo " Expected: $expected"
|
||||
echo " Got: $res"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
kube::test::describe_object_assert() {
|
||||
local resource=$1
|
||||
local object=$2
|
||||
local matches=${@:3}
|
||||
|
||||
result=$(eval kubectl describe "${kube_flags[@]}" $resource $object)
|
||||
|
||||
for match in ${matches}; do
|
||||
if [[ ! $(echo "$result" | grep ${match}) ]]; then
|
||||
echo ${bold}${red}
|
||||
echo "$(kube::test::get_caller): FAIL!"
|
||||
echo "Describe $resource $object"
|
||||
echo " Expected Match: $match"
|
||||
echo " Not found in:"
|
||||
echo "$result"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo -n ${green}
|
||||
echo "$(kube::test::get_caller): Successful describe $resource $object:"
|
||||
echo "$result"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
}
|
||||
|
||||
kube::test::describe_object_events_assert() {
|
||||
local resource=$1
|
||||
local object=$2
|
||||
local showevents=${3:-"true"}
|
||||
|
||||
if [[ -z "${3:-}" ]]; then
|
||||
result=$(eval kubectl describe "${kube_flags[@]}" $resource $object)
|
||||
else
|
||||
result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=$showevents" $resource $object)
|
||||
fi
|
||||
|
||||
if [[ -n $(echo "$result" | grep "No events.\|Events:") ]]; then
|
||||
local has_events="true"
|
||||
else
|
||||
local has_events="false"
|
||||
fi
|
||||
if [[ $showevents == $has_events ]]; then
|
||||
echo -n ${green}
|
||||
echo "$(kube::test::get_caller): Successful describe"
|
||||
echo "$result"
|
||||
echo ${reset}
|
||||
return 0
|
||||
else
|
||||
echo ${bold}${red}
|
||||
echo "$(kube::test::get_caller): FAIL"
|
||||
if [[ $showevents == "false" ]]; then
|
||||
echo " Events information should not be described in:"
|
||||
else
|
||||
echo " Events information not found in:"
|
||||
fi
|
||||
echo $result
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
kube::test::describe_resource_assert() {
|
||||
local resource=$1
|
||||
local matches=${@:2}
|
||||
|
||||
result=$(eval kubectl describe "${kube_flags[@]}" $resource)
|
||||
|
||||
for match in ${matches}; do
|
||||
if [[ ! $(echo "$result" | grep ${match}) ]]; then
|
||||
echo ${bold}${red}
|
||||
echo "FAIL!"
|
||||
echo "Describe $resource"
|
||||
echo " Expected Match: $match"
|
||||
echo " Not found in:"
|
||||
echo "$result"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo -n ${green}
|
||||
echo "Successful describe $resource:"
|
||||
echo "$result"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
}
|
||||
|
||||
kube::test::describe_resource_events_assert() {
|
||||
local resource=$1
|
||||
local showevents=${2:-"true"}
|
||||
|
||||
result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=$showevents" $resource)
|
||||
|
||||
if [[ $(echo "$result" | grep "No events.\|Events:") ]]; then
|
||||
local has_events="true"
|
||||
else
|
||||
local has_events="false"
|
||||
fi
|
||||
if [[ $showevents == $has_events ]]; then
|
||||
echo -n ${green}
|
||||
echo "Successful describe"
|
||||
echo "$result"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
else
|
||||
echo ${bold}${red}
|
||||
echo "FAIL"
|
||||
if [[ $showevents == "false" ]]; then
|
||||
echo " Events information should not be described in:"
|
||||
else
|
||||
echo " Events information not found in:"
|
||||
fi
|
||||
echo $result
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Compare sort-by resource name output with expected order specify in the last parameter
|
||||
kube::test::if_sort_by_has_correct_order() {
|
||||
local array=($(echo "$1" |awk '{if(NR!=1) print $1}'))
|
||||
local var
|
||||
for i in "${array[@]}"; do
|
||||
var+="$i:"
|
||||
done
|
||||
|
||||
kube::test::if_has_string "$var" "${@:$#}"
|
||||
}
|
||||
|
||||
kube::test::if_has_string() {
|
||||
local message=$1
|
||||
local match=$2
|
||||
|
||||
if echo "$message" | grep -q "$match"; then
|
||||
echo "Successful"
|
||||
echo "message:$message"
|
||||
echo "has:$match"
|
||||
return 0
|
||||
else
|
||||
echo "FAIL!"
|
||||
echo "message:$message"
|
||||
echo "has not:$match"
|
||||
caller
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
kube::test::if_has_not_string() {
|
||||
local message=$1
|
||||
local match=$2
|
||||
|
||||
if echo "$message" | grep -q "$match"; then
|
||||
echo "FAIL!"
|
||||
echo "message:$message"
|
||||
echo "has:$match"
|
||||
caller
|
||||
return 1
|
||||
else
|
||||
echo "Successful"
|
||||
echo "message:$message"
|
||||
echo "has not:$match"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
kube::test::if_empty_string() {
|
||||
local match=$1
|
||||
if [ -n "$match" ]; then
|
||||
echo "$match is not empty"
|
||||
caller
|
||||
return 1
|
||||
else
|
||||
echo "Successful"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Returns true if the required resource is part of supported resources.
|
||||
# Expects env vars:
|
||||
# SUPPORTED_RESOURCES: Array of all resources supported by the apiserver. "*"
|
||||
# means it supports all resources. For ex: ("*") or ("rc" "*") both mean that
|
||||
# all resources are supported.
|
||||
# $1: Name of the resource to be tested.
|
||||
kube::test::if_supports_resource() {
|
||||
SUPPORTED_RESOURCES=${SUPPORTED_RESOURCES:-""}
|
||||
REQUIRED_RESOURCE=${1:-""}
|
||||
|
||||
for r in "${SUPPORTED_RESOURCES[@]}"; do
|
||||
if [[ "${r}" == "*" || "${r}" == "${REQUIRED_RESOURCE}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
kube::test::version::object_to_file() {
|
||||
name=$1
|
||||
flags=${2:-""}
|
||||
file=$3
|
||||
kubectl version $flags | grep "$name Version:" | sed -e s/"$name Version: version.Info{"/'/' -e s/'}'/'/' -e s/', '/','/g -e s/':'/'=/g' -e s/'"'/""/g | tr , '\n' > "${file}"
|
||||
}
|
||||
|
||||
kube::test::version::json_object_to_file() {
|
||||
flags=$1
|
||||
file=$2
|
||||
kubectl version $flags --output json | sed -e s/' '/''/g -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e s/'clientVersion:'/'clientVersion:,'/ -e s/'serverVersion:'/'serverVersion:,'/ | tr , '\n' > "${file}"
|
||||
}
|
||||
|
||||
kube::test::version::json_client_server_object_to_file() {
|
||||
flags=$1
|
||||
name=$2
|
||||
file=$3
|
||||
kubectl version $flags --output json | jq -r ".${name}" | sed -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e /^$/d -e s/','/''/g -e s/':'/'='/g > "${file}"
|
||||
}
|
||||
|
||||
kube::test::version::yaml_object_to_file() {
|
||||
flags=$1
|
||||
file=$2
|
||||
kubectl version $flags --output yaml | sed -e s/' '/''/g -e s/'\"'/''/g -e /^$/d > "${file}"
|
||||
}
|
||||
|
||||
kube::test::version::diff_assert() {
|
||||
local original=$1
|
||||
local comparator=${2:-"eq"}
|
||||
local latest=$3
|
||||
local diff_msg=${4:-""}
|
||||
local res=""
|
||||
|
||||
if [ ! -f $original ]; then
|
||||
echo ${bold}${red}
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo "the file '${original}' does not exit"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ ! -f $latest ]; then
|
||||
echo ${bold}${red}
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo "the file '${latest}' does not exit"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
|
||||
sort ${original} > "${original}.sorted"
|
||||
sort ${latest} > "${latest}.sorted"
|
||||
|
||||
if [ "$comparator" == "eq" ]; then
|
||||
if [ "$(diff -iwB ${original}.sorted ${latest}.sorted)" == "" ] ; then
|
||||
echo -n ${green}
|
||||
echo "Successful: ${diff_msg}"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
else
|
||||
echo ${bold}${red}
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo " Expected: "
|
||||
echo "$(cat ${original})"
|
||||
echo " Got: "
|
||||
echo "$(cat ${latest})"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
if [ ! -z "$(diff -iwB ${original}.sorted ${latest}.sorted)" ] ; then
|
||||
echo -n ${green}
|
||||
echo "Successful: ${diff_msg}"
|
||||
echo -n ${reset}
|
||||
return 0
|
||||
else
|
||||
echo ${bold}${red}
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo " Expected: "
|
||||
echo "$(cat ${original})"
|
||||
echo " Got: "
|
||||
echo "$(cat ${latest})"
|
||||
echo ${reset}${red}
|
||||
caller
|
||||
echo ${reset}
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
804
vendor/k8s.io/kubernetes/hack/lib/util.sh
generated
vendored
Executable file
804
vendor/k8s.io/kubernetes/hack/lib/util.sh
generated
vendored
Executable file
@ -0,0 +1,804 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kube::util::sortable_date() {
|
||||
date "+%Y%m%d-%H%M%S"
|
||||
}
|
||||
|
||||
kube::util::wait_for_url() {
|
||||
local url=$1
|
||||
local prefix=${2:-}
|
||||
local wait=${3:-1}
|
||||
local times=${4:-30}
|
||||
|
||||
which curl >/dev/null || {
|
||||
kube::log::usage "curl must be installed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
local i
|
||||
for i in $(seq 1 $times); do
|
||||
local out
|
||||
if out=$(curl --max-time 1 -gkfs $url 2>/dev/null); then
|
||||
kube::log::status "On try ${i}, ${prefix}: ${out}"
|
||||
return 0
|
||||
fi
|
||||
sleep ${wait}
|
||||
done
|
||||
kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
|
||||
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
|
||||
kube::util::trap_add() {
|
||||
local trap_add_cmd
|
||||
trap_add_cmd=$1
|
||||
shift
|
||||
|
||||
for trap_add_name in "$@"; do
|
||||
local existing_cmd
|
||||
local new_cmd
|
||||
|
||||
# Grab the currently defined trap commands for this trap
|
||||
existing_cmd=`trap -p "${trap_add_name}" | awk -F"'" '{print $2}'`
|
||||
|
||||
if [[ -z "${existing_cmd}" ]]; then
|
||||
new_cmd="${trap_add_cmd}"
|
||||
else
|
||||
new_cmd="${trap_add_cmd};${existing_cmd}"
|
||||
fi
|
||||
|
||||
# Assign the test
|
||||
trap "${new_cmd}" "${trap_add_name}"
|
||||
done
|
||||
}
|
||||
|
||||
# Opposite of kube::util::ensure-temp-dir()
|
||||
kube::util::cleanup-temp-dir() {
|
||||
rm -rf "${KUBE_TEMP}"
|
||||
}
|
||||
|
||||
# Create a temp dir that'll be deleted at the end of this bash session.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_TEMP
|
||||
kube::util::ensure-temp-dir() {
|
||||
if [[ -z ${KUBE_TEMP-} ]]; then
|
||||
KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
|
||||
kube::util::trap_add kube::util::cleanup-temp-dir EXIT
|
||||
fi
|
||||
}
|
||||
|
||||
# This figures out the host platform without relying on golang. We need this as
|
||||
# we don't want a golang install to be a prerequisite to building yet we need
|
||||
# this info to figure out where the final binaries are placed.
|
||||
kube::util::host_platform() {
|
||||
local host_os
|
||||
local host_arch
|
||||
case "$(uname -s)" in
|
||||
Darwin)
|
||||
host_os=darwin
|
||||
;;
|
||||
Linux)
|
||||
host_os=linux
|
||||
;;
|
||||
*)
|
||||
kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$(uname -m)" in
|
||||
x86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
i?86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
amd64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
aarch64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm*)
|
||||
host_arch=arm
|
||||
;;
|
||||
i?86*)
|
||||
host_arch=x86
|
||||
;;
|
||||
s390x*)
|
||||
host_arch=s390x
|
||||
;;
|
||||
ppc64le*)
|
||||
host_arch=ppc64le
|
||||
;;
|
||||
*)
|
||||
kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo "${host_os}/${host_arch}"
|
||||
}
|
||||
|
||||
kube::util::find-binary-for-platform() {
|
||||
local -r lookfor="$1"
|
||||
local -r platform="$2"
|
||||
local locations=(
|
||||
"${KUBE_ROOT}/_output/bin/${lookfor}"
|
||||
"${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
|
||||
"${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
|
||||
"${KUBE_ROOT}/platforms/${platform}/${lookfor}"
|
||||
)
|
||||
# Also search for binary in bazel build tree.
|
||||
# In some cases we have to name the binary $BINARY_bin, since there was a
|
||||
# directory named $BINARY next to it.
|
||||
locations+=($(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
|
||||
\( -name "${lookfor}" -o -name "${lookfor}_bin" \) 2>/dev/null || true) )
|
||||
|
||||
# List most recently-updated location.
|
||||
local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
|
||||
echo -n "${bin}"
|
||||
}
|
||||
|
||||
kube::util::find-binary() {
|
||||
kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
|
||||
}
|
||||
|
||||
# Run all known doc generators (today gendocs and genman for kubectl)
|
||||
# $1 is the directory to put those generated documents
|
||||
kube::util::gen-docs() {
|
||||
local dest="$1"
|
||||
|
||||
# Find binary
|
||||
gendocs=$(kube::util::find-binary "gendocs")
|
||||
genkubedocs=$(kube::util::find-binary "genkubedocs")
|
||||
genman=$(kube::util::find-binary "genman")
|
||||
genyaml=$(kube::util::find-binary "genyaml")
|
||||
genfeddocs=$(kube::util::find-binary "genfeddocs")
|
||||
|
||||
mkdir -p "${dest}/docs/user-guide/kubectl/"
|
||||
"${gendocs}" "${dest}/docs/user-guide/kubectl/"
|
||||
mkdir -p "${dest}/docs/admin/"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kubelet"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
|
||||
|
||||
mkdir -p "${dest}/docs/man/man1/"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
|
||||
"${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-proxy"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubelet"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubectl"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubeadm"
|
||||
|
||||
mkdir -p "${dest}/docs/yaml/kubectl/"
|
||||
"${genyaml}" "${dest}/docs/yaml/kubectl/"
|
||||
|
||||
# create the list of generated files
|
||||
pushd "${dest}" > /dev/null
|
||||
touch docs/.generated_docs
|
||||
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# Puts a placeholder for every generated doc. This makes the link checker work.
|
||||
kube::util::set-placeholder-gen-docs() {
|
||||
local list_file="${KUBE_ROOT}/docs/.generated_docs"
|
||||
if [ -e ${list_file} ]; then
|
||||
# remove all of the old docs; we don't want to check them in.
|
||||
while read file; do
|
||||
if [[ "${list_file}" != "${KUBE_ROOT}/${file}" ]]; then
|
||||
cp "${KUBE_ROOT}/hack/autogenerated_placeholder.txt" "${KUBE_ROOT}/${file}"
|
||||
fi
|
||||
done <"${list_file}"
|
||||
# The docs/.generated_docs file lists itself, so we don't need to explicitly
|
||||
# delete it.
|
||||
fi
|
||||
}
|
||||
|
||||
# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
|
||||
# must be set.
|
||||
kube::util::remove-gen-docs() {
|
||||
if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
|
||||
# remove all of the old docs; we don't want to check them in.
|
||||
while read file; do
|
||||
rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
|
||||
done <"${KUBE_ROOT}/docs/.generated_docs"
|
||||
# The docs/.generated_docs file lists itself, so we don't need to explicitly
|
||||
# delete it.
|
||||
fi
|
||||
}
|
||||
|
||||
# Takes a group/version and returns the path to its location on disk, sans
|
||||
# "pkg". E.g.:
|
||||
# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
|
||||
# * default behavior for only a group: experimental -> apis/experimental
|
||||
# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
|
||||
# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
|
||||
# * Very special handling for when both group and version are "": / -> api
|
||||
kube::util::group-version-to-pkg-path() {
|
||||
staging_apis=(
|
||||
$(
|
||||
pushd ${KUBE_ROOT}/staging/src/k8s.io/api > /dev/null
|
||||
find . -name types.go | xargs -n1 dirname | sed "s|\./||g" | sort
|
||||
popd > /dev/null
|
||||
)
|
||||
)
|
||||
|
||||
local group_version="$1"
|
||||
|
||||
if [[ " ${staging_apis[@]} " =~ " ${group_version/.*k8s.io/} " ]]; then
|
||||
echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
|
||||
return
|
||||
fi
|
||||
|
||||
# "v1" is the API GroupVersion
|
||||
if [[ "${group_version}" == "v1" ]]; then
|
||||
echo "vendor/k8s.io/api/core/v1"
|
||||
return
|
||||
fi
|
||||
|
||||
# Special cases first.
|
||||
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
|
||||
# moving the results to pkg/apis/api.
|
||||
case "${group_version}" in
|
||||
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
|
||||
__internal)
|
||||
echo "pkg/apis/core"
|
||||
;;
|
||||
meta/v1)
|
||||
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
;;
|
||||
meta/v1)
|
||||
echo "../vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
;;
|
||||
meta/v1alpha1)
|
||||
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
|
||||
;;
|
||||
meta/v1alpha1)
|
||||
echo "../vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
|
||||
;;
|
||||
unversioned)
|
||||
echo "pkg/api/unversioned"
|
||||
;;
|
||||
*.k8s.io)
|
||||
echo "pkg/apis/${group_version%.*k8s.io}"
|
||||
;;
|
||||
*.k8s.io/*)
|
||||
echo "pkg/apis/${group_version/.*k8s.io/}"
|
||||
;;
|
||||
*)
|
||||
echo "pkg/apis/${group_version%__internal}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Takes a group/version and returns the swagger-spec file name.
|
||||
# default behavior: extensions/v1beta1 -> extensions_v1beta1
|
||||
# special case for v1: v1 -> v1
|
||||
kube::util::gv-to-swagger-name() {
|
||||
local group_version="$1"
|
||||
case "${group_version}" in
|
||||
v1)
|
||||
echo "v1"
|
||||
;;
|
||||
*)
|
||||
echo "${group_version%/*}_${group_version#*/}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
# Fetches swagger spec from apiserver.
|
||||
# Assumed vars:
|
||||
# SWAGGER_API_PATH: Base path for swaggerapi on apiserver. Ex:
|
||||
# http://localhost:8080/swaggerapi.
|
||||
# SWAGGER_ROOT_DIR: Root dir where we want to to save the fetched spec.
|
||||
# VERSIONS: Array of group versions to include in swagger spec.
|
||||
kube::util::fetch-swagger-spec() {
|
||||
for ver in ${VERSIONS}; do
|
||||
if [[ " ${KUBE_NONSERVER_GROUP_VERSIONS} " == *" ${ver} "* ]]; then
|
||||
continue
|
||||
fi
|
||||
# fetch the swagger spec for each group version.
|
||||
if [[ ${ver} == "v1" ]]; then
|
||||
SUBPATH="api"
|
||||
else
|
||||
SUBPATH="apis"
|
||||
fi
|
||||
SUBPATH="${SUBPATH}/${ver}"
|
||||
SWAGGER_JSON_NAME="$(kube::util::gv-to-swagger-name ${ver}).json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
|
||||
|
||||
# fetch the swagger spec for the discovery mechanism at group level.
|
||||
if [[ ${ver} == "v1" ]]; then
|
||||
continue
|
||||
fi
|
||||
SUBPATH="apis/"${ver%/*}
|
||||
SWAGGER_JSON_NAME="${ver%/*}.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
|
||||
done
|
||||
|
||||
# fetch swagger specs for other discovery mechanism.
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}" > "${SWAGGER_ROOT_DIR}/resourceListing.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}version" > "${SWAGGER_ROOT_DIR}/version.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}api" > "${SWAGGER_ROOT_DIR}/api.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}apis" > "${SWAGGER_ROOT_DIR}/apis.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}logs" > "${SWAGGER_ROOT_DIR}/logs.json"
|
||||
}
|
||||
|
||||
# Returns the name of the upstream remote repository name for the local git
|
||||
# repo, e.g. "upstream" or "origin".
|
||||
kube::util::git_upstream_remote_name() {
|
||||
git remote -v | grep fetch |\
|
||||
grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
|
||||
head -n 1 | awk '{print $1}'
|
||||
}
|
||||
|
||||
# Ensures the current directory is a git tree for doing things like restoring or
|
||||
# validating godeps
|
||||
kube::util::create-fake-git-tree() {
|
||||
local -r target_dir=${1:-$(pwd)}
|
||||
|
||||
pushd "${target_dir}" >/dev/null
|
||||
git init >/dev/null
|
||||
git config --local user.email "nobody@k8s.io"
|
||||
git config --local user.name "$0"
|
||||
git add . >/dev/null
|
||||
git commit -q -m "Snapshot" >/dev/null
|
||||
if (( ${KUBE_VERBOSE:-5} >= 6 )); then
|
||||
kube::log::status "${target_dir} is now a git tree."
|
||||
fi
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist
|
||||
# and are checked out to the referenced rev.
|
||||
kube::util::godep_restored() {
|
||||
local -r godeps_json=${1:-Godeps/Godeps.json}
|
||||
local -r gopath=${2:-${GOPATH%:*}}
|
||||
if ! which jq &>/dev/null; then
|
||||
echo "jq not found. Please install." 1>&2
|
||||
return 1
|
||||
fi
|
||||
local root
|
||||
local old_rev=""
|
||||
while read path rev; do
|
||||
rev=$(echo "${rev}" | sed "s/['\"]//g") # remove quotes which are around revs sometimes
|
||||
|
||||
if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then
|
||||
# avoid checking the same git/hg root again
|
||||
continue
|
||||
fi
|
||||
|
||||
root="${path}"
|
||||
while [ "${root}" != "." -a ! -d "${gopath}/src/${root}/.git" -a ! -d "${gopath}/src/${root}/.hg" ]; do
|
||||
root=$(dirname "${root}")
|
||||
done
|
||||
if [ "${root}" == "." ]; then
|
||||
echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2
|
||||
return 1
|
||||
fi
|
||||
local head
|
||||
if [ -d "${gopath}/src/${root}/.git" ]; then
|
||||
head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)"
|
||||
else
|
||||
head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')"
|
||||
fi
|
||||
if [ "${head}" != "${rev}" ]; then
|
||||
echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2
|
||||
return 1
|
||||
fi
|
||||
old_rev="${rev}"
|
||||
done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}")
|
||||
return 0
|
||||
}
|
||||
|
||||
# Exits script if working directory is dirty. If it's run interactively in the terminal
|
||||
# the user can commit changes in a second terminal. This script will wait.
|
||||
kube::util::ensure_clean_working_dir() {
|
||||
while ! git diff HEAD --exit-code &>/dev/null; do
|
||||
echo -e "\nUnexpected dirty working directory:\n"
|
||||
if tty -s; then
|
||||
git status -s
|
||||
else
|
||||
git diff -a # be more verbose in log files without tty
|
||||
exit 1
|
||||
fi | sed 's/^/ /'
|
||||
echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
|
||||
read
|
||||
done 1>&2
|
||||
}
|
||||
|
||||
# Ensure that the given godep version is installed and in the path. Almost
|
||||
# nobody should use any version but the default.
|
||||
kube::util::ensure_godep_version() {
|
||||
GODEP_VERSION=${1:-"v79"} # this version is known to work
|
||||
|
||||
if [[ "$(godep version 2>/dev/null)" == *"godep ${GODEP_VERSION}"* ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
kube::log::status "Installing godep version ${GODEP_VERSION}"
|
||||
go install ./vendor/github.com/tools/godep/
|
||||
|
||||
if [[ "$(godep version 2>/dev/null)" != *"godep ${GODEP_VERSION}"* ]]; then
|
||||
kube::log::error "Expected godep ${GODEP_VERSION}, got $(godep version)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensure that none of the staging repos is checked out in the GOPATH because this
|
||||
# easily confused godep.
|
||||
kube::util::ensure_no_staging_repos_in_gopath() {
|
||||
kube::util::ensure_single_dir_gopath
|
||||
local error=0
|
||||
for repo in $(ls ${KUBE_ROOT}/staging/src/k8s.io); do
|
||||
if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then
|
||||
echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2
|
||||
error=1
|
||||
fi
|
||||
done
|
||||
if [ "${error}" = "1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Installs the specified go package at a particular commit.
|
||||
kube::util::go_install_from_commit() {
|
||||
local -r pkg=$1
|
||||
local -r commit=$2
|
||||
|
||||
kube::util::ensure-temp-dir
|
||||
mkdir -p "${KUBE_TEMP}/go/src"
|
||||
GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}"
|
||||
(
|
||||
cd "${KUBE_TEMP}/go/src/${pkg}"
|
||||
git checkout -q "${commit}"
|
||||
GOPATH="${KUBE_TEMP}/go" go install "${pkg}"
|
||||
)
|
||||
PATH="${KUBE_TEMP}/go/bin:${PATH}"
|
||||
hash -r # force bash to clear PATH cache
|
||||
}
|
||||
|
||||
# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple.
|
||||
kube::util::ensure_single_dir_gopath() {
|
||||
if [[ "${GOPATH}" == *:* ]]; then
|
||||
echo "GOPATH must consist of a single directory." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks whether there are any files matching pattern $2 changed between the
|
||||
# current branch and upstream branch named by $1.
|
||||
# Returns 1 (false) if there are no changes, 0 (true) if there are changes
|
||||
# detected.
|
||||
kube::util::has_changes_against_upstream_branch() {
|
||||
local -r git_branch=$1
|
||||
local -r pattern=$2
|
||||
local -r not_pattern=${3:-totallyimpossiblepattern}
|
||||
local full_branch
|
||||
|
||||
full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
|
||||
echo "Checking for '${pattern}' changes against '${full_branch}'"
|
||||
# make sure the branch is valid, otherwise the check will pass erroneously.
|
||||
if ! git describe "${full_branch}" >/dev/null; then
|
||||
# abort!
|
||||
exit 1
|
||||
fi
|
||||
# notice this uses ... to find the first shared ancestor
|
||||
if git diff --name-only "${full_branch}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
|
||||
return 0
|
||||
fi
|
||||
# also check for pending changes
|
||||
if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
|
||||
echo "Detected '${pattern}' uncommitted changes."
|
||||
return 0
|
||||
fi
|
||||
echo "No '${pattern}' changes detected."
|
||||
return 1
|
||||
}
|
||||
|
||||
kube::util::download_file() {
|
||||
local -r url=$1
|
||||
local -r destination_file=$2
|
||||
|
||||
rm ${destination_file} 2&> /dev/null || true
|
||||
|
||||
for i in $(seq 5)
|
||||
do
|
||||
if ! curl -fsSL --retry 3 --keepalive-time 2 ${url} -o ${destination_file}; then
|
||||
echo "Downloading ${url} failed. $((5-i)) retries left."
|
||||
sleep 1
|
||||
else
|
||||
echo "Downloading ${url} succeed"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Test whether openssl is installed.
|
||||
# Sets:
|
||||
# OPENSSL_BIN: The path to the openssl binary to use
|
||||
function kube::util::test_openssl_installed {
|
||||
openssl version >& /dev/null
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Failed to run openssl. Please ensure openssl is installed"
|
||||
exit 1
|
||||
fi
|
||||
OPENSSL_BIN=$(command -v openssl)
|
||||
}
|
||||
|
||||
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
|
||||
# purpose is dropped in after "key encipherment", you usually want
|
||||
# '"client auth"'
|
||||
# '"server auth"'
|
||||
# '"client auth","server auth"'
|
||||
function kube::util::create_signing_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local id=$3
|
||||
local purpose=$4
|
||||
# Create client ca
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
|
||||
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
|
||||
EOF
|
||||
}
|
||||
|
||||
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
|
||||
function kube::util::create_client_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca=$3
|
||||
local id=$4
|
||||
local cn=${5:-$4}
|
||||
local groups=""
|
||||
local SEP=""
|
||||
shift 5
|
||||
while [ -n "${1:-}" ]; do
|
||||
groups+="${SEP}{\"O\":\"$1\"}"
|
||||
SEP=","
|
||||
shift 1
|
||||
done
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
cd ${dest_dir}
|
||||
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
|
||||
mv "client-${id}-key.pem" "client-${id}.key"
|
||||
mv "client-${id}.pem" "client-${id}.crt"
|
||||
rm -f "client-${id}.csr"
|
||||
EOF
|
||||
}
|
||||
|
||||
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
|
||||
function kube::util::create_serving_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca=$3
|
||||
local id=$4
|
||||
local cn=${5:-$4}
|
||||
local hosts=""
|
||||
local SEP=""
|
||||
shift 5
|
||||
while [ -n "${1:-}" ]; do
|
||||
hosts+="${SEP}\"$1\""
|
||||
SEP=","
|
||||
shift 1
|
||||
done
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
cd ${dest_dir}
|
||||
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
|
||||
mv "serving-${id}-key.pem" "serving-${id}.key"
|
||||
mv "serving-${id}.pem" "serving-${id}.crt"
|
||||
rm -f "serving-${id}.csr"
|
||||
EOF
|
||||
}
|
||||
|
||||
# creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
|
||||
function kube::util::write_client_kubeconfig {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca_file=$3
|
||||
local api_host=$4
|
||||
local api_port=$5
|
||||
local client_id=$6
|
||||
local token=${7:-}
|
||||
cat <<EOF | ${sudo} tee "${dest_dir}"/${client_id}.kubeconfig > /dev/null
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: ${ca_file}
|
||||
server: https://${api_host}:${api_port}/
|
||||
name: local-up-cluster
|
||||
users:
|
||||
- user:
|
||||
token: ${token}
|
||||
client-certificate: ${dest_dir}/client-${client_id}.crt
|
||||
client-key: ${dest_dir}/client-${client_id}.key
|
||||
name: local-up-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local-up-cluster
|
||||
user: local-up-cluster
|
||||
name: local-up-cluster
|
||||
current-context: local-up-cluster
|
||||
EOF
|
||||
|
||||
# flatten the kubeconfig files to make them self contained
|
||||
username=$(whoami)
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
$(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
|
||||
mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
|
||||
chown ${username} "${dest_dir}/${client_id}.kubeconfig"
|
||||
EOF
|
||||
}
|
||||
|
||||
# Determines if docker can be run, failures may simply require that the user be added to the docker group.
|
||||
function kube::util::ensure_docker_daemon_connectivity {
|
||||
DOCKER=(docker ${DOCKER_OPTS})
|
||||
if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
|
||||
cat <<'EOF' >&2
|
||||
Can't connect to 'docker' daemon. please fix and retry.
|
||||
|
||||
Possible causes:
|
||||
- Docker Daemon not started
|
||||
- Linux: confirm via your init system
|
||||
- macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
|
||||
- macOS w/ Docker for Mac: Check the menu bar and start the Docker application
|
||||
- DOCKER_HOST hasn't been set or is set incorrectly
|
||||
- Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
|
||||
- macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
|
||||
- macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
|
||||
- Other things to check:
|
||||
- Linux: User isn't in 'docker' group. Add and relogin.
|
||||
- Something like 'sudo usermod -a -G docker ${USER}'
|
||||
- RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
|
||||
EOF
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Wait for background jobs to finish. Return with
|
||||
# an error status if any of the jobs failed.
|
||||
kube::util::wait-for-jobs() {
|
||||
local fail=0
|
||||
local job
|
||||
for job in $(jobs -p); do
|
||||
wait "${job}" || fail=$((fail + 1))
|
||||
done
|
||||
return ${fail}
|
||||
}
|
||||
|
||||
# kube::util::join <delim> <list...>
|
||||
# Concatenates the list elements with the delimiter passed as first parameter
|
||||
#
|
||||
# Ex: kube::util::join , a b c
|
||||
# -> a,b,c
|
||||
function kube::util::join {
|
||||
local IFS="$1"
|
||||
shift
|
||||
echo "$*"
|
||||
}
|
||||
|
||||
# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
|
||||
#
|
||||
# Assumed vars:
|
||||
# $1 (cfssl directory) (optional)
|
||||
#
|
||||
# Sets:
|
||||
# CFSSL_BIN: The path of the installed cfssl binary
|
||||
# CFSSLJSON_BIN: The path of the installed cfssljson binary
|
||||
#
|
||||
function kube::util::ensure-cfssl {
|
||||
if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
|
||||
CFSSL_BIN=$(command -v cfssl)
|
||||
CFSSLJSON_BIN=$(command -v cfssljson)
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Create a temp dir for cfssl if no directory was given
|
||||
local cfssldir=${1:-}
|
||||
if [[ -z "${cfssldir}" ]]; then
|
||||
kube::util::ensure-temp-dir
|
||||
cfssldir="${KUBE_TEMP}/cfssl"
|
||||
fi
|
||||
|
||||
mkdir -p "${cfssldir}"
|
||||
pushd "${cfssldir}" > /dev/null
|
||||
|
||||
echo "Unable to successfully run 'cfssl' from $PATH; downloading instead..."
|
||||
kernel=$(uname -s)
|
||||
case "${kernel}" in
|
||||
Linux)
|
||||
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
|
||||
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
|
||||
;;
|
||||
Darwin)
|
||||
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
|
||||
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
|
||||
;;
|
||||
*)
|
||||
echo "Unknown, unsupported platform: ${kernel}." >&2
|
||||
echo "Supported platforms: Linux, Darwin." >&2
|
||||
exit 2
|
||||
esac
|
||||
|
||||
chmod +x cfssl || true
|
||||
chmod +x cfssljson || true
|
||||
|
||||
CFSSL_BIN="${cfssldir}/cfssl"
|
||||
CFSSLJSON_BIN="${cfssldir}/cfssljson"
|
||||
if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
|
||||
echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
|
||||
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# kube::util::ensure_dockerized
|
||||
# Confirms that the script is being run inside a kube-build image
|
||||
#
|
||||
function kube::util::ensure_dockerized {
|
||||
if [[ -f /kube-build-image ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "ERROR: This script is designed to be run inside a kube-build container"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# kube::util::ensure-gnu-sed
|
||||
# Determines which sed binary is gnu-sed on linux/darwin
|
||||
#
|
||||
# Sets:
|
||||
# SED: The name of the gnu-sed binary
|
||||
#
|
||||
function kube::util::ensure-gnu-sed {
|
||||
if LANG=C sed --help 2>&1 | grep -q GNU; then
|
||||
SED="sed"
|
||||
elif which gsed &>/dev/null; then
|
||||
SED="gsed"
|
||||
else
|
||||
kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Some useful colors.
|
||||
if [[ -z "${color_start-}" ]]; then
|
||||
declare -r color_start="\033["
|
||||
declare -r color_red="${color_start}0;31m"
|
||||
declare -r color_yellow="${color_start}0;33m"
|
||||
declare -r color_green="${color_start}0;32m"
|
||||
declare -r color_norm="${color_start}0m"
|
||||
fi
|
||||
|
||||
# ex: ts=2 sw=2 et filetype=sh
|
168
vendor/k8s.io/kubernetes/hack/lib/version.sh
generated
vendored
Normal file
168
vendor/k8s.io/kubernetes/hack/lib/version.sh
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Version management helpers. These functions help to set, save and load the
|
||||
# following variables:
|
||||
#
|
||||
# KUBE_GIT_COMMIT - The git commit id corresponding to this
|
||||
# source code.
|
||||
# KUBE_GIT_TREE_STATE - "clean" indicates no changes since the git commit id
|
||||
# "dirty" indicates source code changes after the git commit id
|
||||
# "archive" indicates the tree was produced by 'git archive'
|
||||
# KUBE_GIT_VERSION - "vX.Y" used to indicate the last release version.
|
||||
# KUBE_GIT_MAJOR - The major part of the version
|
||||
# KUBE_GIT_MINOR - The minor component of the version
|
||||
|
||||
# Grovels through git to set a set of env variables.
|
||||
#
|
||||
# If KUBE_GIT_VERSION_FILE, this function will load from that file instead of
|
||||
# querying git.
|
||||
kube::version::get_version_vars() {
|
||||
if [[ -n ${KUBE_GIT_VERSION_FILE-} ]]; then
|
||||
kube::version::load_version_vars "${KUBE_GIT_VERSION_FILE}"
|
||||
return
|
||||
fi
|
||||
|
||||
# If the kubernetes source was exported through git archive, then
|
||||
# we likely don't have a git tree, but these magic values may be filled in.
|
||||
if [[ '$Format:%%$' == "%" ]]; then
|
||||
KUBE_GIT_COMMIT='$Format:%H$'
|
||||
KUBE_GIT_TREE_STATE="archive"
|
||||
# When a 'git archive' is exported, the '$Format:%D$' below will look
|
||||
# something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: '
|
||||
# can be extracted from it.
|
||||
if [[ '$Format:%D$' =~ tag:\ (v[^ ]+) ]]; then
|
||||
KUBE_GIT_VERSION="${BASH_REMATCH[1]}"
|
||||
fi
|
||||
fi
|
||||
|
||||
local git=(git --work-tree "${KUBE_ROOT}")
|
||||
|
||||
if [[ -n ${KUBE_GIT_COMMIT-} ]] || KUBE_GIT_COMMIT=$("${git[@]}" rev-parse "HEAD^{commit}" 2>/dev/null); then
|
||||
if [[ -z ${KUBE_GIT_TREE_STATE-} ]]; then
|
||||
# Check if the tree is dirty. default to dirty
|
||||
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
|
||||
KUBE_GIT_TREE_STATE="clean"
|
||||
else
|
||||
KUBE_GIT_TREE_STATE="dirty"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Use git describe to find the version based on annotated tags.
|
||||
if [[ -n ${KUBE_GIT_VERSION-} ]] || KUBE_GIT_VERSION=$("${git[@]}" describe --tags --abbrev=14 "${KUBE_GIT_COMMIT}^{commit}" 2>/dev/null); then
|
||||
# This translates the "git describe" to an actual semver.org
|
||||
# compatible semantic version that looks something like this:
|
||||
# v1.1.0-alpha.0.6+84c76d1142ea4d
|
||||
#
|
||||
# TODO: We continue calling this "git version" because so many
|
||||
# downstream consumers are expecting it there.
|
||||
DASHES_IN_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/[^-]//g")
|
||||
if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
|
||||
# We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
|
||||
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/")
|
||||
elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
|
||||
# We have distance to base tag (v1.1.0-1-gCommitHash)
|
||||
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/")
|
||||
fi
|
||||
if [[ "${KUBE_GIT_TREE_STATE}" == "dirty" ]]; then
|
||||
# git describe --dirty only considers changes to existing files, but
|
||||
# that is problematic since new untracked .go files affect the build,
|
||||
# so use our idea of "dirty" from git status instead.
|
||||
KUBE_GIT_VERSION+="-dirty"
|
||||
fi
|
||||
|
||||
|
||||
# Try to match the "git describe" output to a regex to try to extract
|
||||
# the "major" and "minor" versions and whether this is the exact tagged
|
||||
# version or whether the tree is between two tagged versions.
|
||||
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?$ ]]; then
|
||||
KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
|
||||
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
|
||||
if [[ -n "${BASH_REMATCH[4]}" ]]; then
|
||||
KUBE_GIT_MINOR+="+"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Saves the environment flags to $1
|
||||
kube::version::save_version_vars() {
|
||||
local version_file=${1-}
|
||||
[[ -n ${version_file} ]] || {
|
||||
echo "!!! Internal error. No file specified in kube::version::save_version_vars"
|
||||
return 1
|
||||
}
|
||||
|
||||
cat <<EOF >"${version_file}"
|
||||
KUBE_GIT_COMMIT='${KUBE_GIT_COMMIT-}'
|
||||
KUBE_GIT_TREE_STATE='${KUBE_GIT_TREE_STATE-}'
|
||||
KUBE_GIT_VERSION='${KUBE_GIT_VERSION-}'
|
||||
KUBE_GIT_MAJOR='${KUBE_GIT_MAJOR-}'
|
||||
KUBE_GIT_MINOR='${KUBE_GIT_MINOR-}'
|
||||
EOF
|
||||
}
|
||||
|
||||
# Loads up the version variables from file $1
|
||||
kube::version::load_version_vars() {
|
||||
local version_file=${1-}
|
||||
[[ -n ${version_file} ]] || {
|
||||
echo "!!! Internal error. No file specified in kube::version::load_version_vars"
|
||||
return 1
|
||||
}
|
||||
|
||||
source "${version_file}"
|
||||
}
|
||||
|
||||
kube::version::ldflag() {
|
||||
local key=${1}
|
||||
local val=${2}
|
||||
|
||||
# If you update these, also update the list pkg/version/def.bzl.
|
||||
echo "-X ${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}"
|
||||
echo "-X ${KUBE_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.${key}=${val}"
|
||||
}
|
||||
|
||||
# Prints the value that needs to be passed to the -ldflags parameter of go build
|
||||
# in order to set the Kubernetes based on the git tree status.
|
||||
# IMPORTANT: if you update any of these, also update the lists in
|
||||
# pkg/version/def.bzl and hack/print-workspace-status.sh.
|
||||
kube::version::ldflags() {
|
||||
kube::version::get_version_vars
|
||||
|
||||
local buildDate=
|
||||
[[ -z ${SOURCE_DATE_EPOCH-} ]] || buildDate="--date=@${SOURCE_DATE_EPOCH}"
|
||||
local -a ldflags=($(kube::version::ldflag "buildDate" "$(date ${buildDate} -u +'%Y-%m-%dT%H:%M:%SZ')"))
|
||||
if [[ -n ${KUBE_GIT_COMMIT-} ]]; then
|
||||
ldflags+=($(kube::version::ldflag "gitCommit" "${KUBE_GIT_COMMIT}"))
|
||||
ldflags+=($(kube::version::ldflag "gitTreeState" "${KUBE_GIT_TREE_STATE}"))
|
||||
fi
|
||||
|
||||
if [[ -n ${KUBE_GIT_VERSION-} ]]; then
|
||||
ldflags+=($(kube::version::ldflag "gitVersion" "${KUBE_GIT_VERSION}"))
|
||||
fi
|
||||
|
||||
if [[ -n ${KUBE_GIT_MAJOR-} && -n ${KUBE_GIT_MINOR-} ]]; then
|
||||
ldflags+=(
|
||||
$(kube::version::ldflag "gitMajor" "${KUBE_GIT_MAJOR}")
|
||||
$(kube::version::ldflag "gitMinor" "${KUBE_GIT_MINOR}")
|
||||
)
|
||||
fi
|
||||
|
||||
# The -ldflags parameter takes a single string, so join the output.
|
||||
echo "${ldflags[*]-}"
|
||||
}
|
23
vendor/k8s.io/kubernetes/hack/list-feature-tests.sh
generated
vendored
Executable file
23
vendor/k8s.io/kubernetes/hack/list-feature-tests.sh
generated
vendored
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A single script that lists all of the [Feature:.+] tests in our e2e suite.
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
grep "\[Feature:\w+\]" "${KUBE_ROOT}"/test/e2e/**/*.go -Eoh | LC_ALL=C sort -u
|
943
vendor/k8s.io/kubernetes/hack/local-up-cluster.sh
generated
vendored
Executable file
943
vendor/k8s.io/kubernetes/hack/local-up-cluster.sh
generated
vendored
Executable file
@ -0,0 +1,943 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# This command builds and runs a local kubernetes cluster.
|
||||
# You may need to run this as root to allow kubelet to open docker's socket,
|
||||
# and to write the test CA in /var/run/kubernetes.
|
||||
DOCKER_OPTS=${DOCKER_OPTS:-""}
|
||||
DOCKER=(docker ${DOCKER_OPTS})
|
||||
DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
|
||||
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
|
||||
ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""}
|
||||
PSP_ADMISSION=${PSP_ADMISSION:-""}
|
||||
NODE_ADMISSION=${NODE_ADMISSION:-""}
|
||||
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
|
||||
KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
|
||||
KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
|
||||
POD_MANIFEST_PATH=${POD_MANIFEST_PATH:-"/var/run/kubernetes/static-pods"}
|
||||
KUBELET_FLAGS=${KUBELET_FLAGS:-""}
|
||||
# many dev environments run with swap on, so we don't fail in this env
|
||||
FAIL_SWAP_ON=${FAIL_SWAP_ON:-"false"}
|
||||
# Name of the network plugin, eg: "kubenet"
|
||||
NET_PLUGIN=${NET_PLUGIN:-""}
|
||||
# Place the config files and binaries required by NET_PLUGIN in these directory,
|
||||
# eg: "/etc/cni/net.d" for config files, and "/opt/cni/bin" for binaries.
|
||||
CNI_CONF_DIR=${CNI_CONF_DIR:-""}
|
||||
CNI_BIN_DIR=${CNI_BIN_DIR:-""}
|
||||
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
|
||||
FIRST_SERVICE_CLUSTER_IP=${FIRST_SERVICE_CLUSTER_IP:-10.0.0.1}
|
||||
# if enabled, must set CGROUP_ROOT
|
||||
CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-true}
|
||||
# name of the cgroup driver, i.e. cgroupfs or systemd
|
||||
CGROUP_DRIVER=${CGROUP_DRIVER:-""}
|
||||
# owner of client certs, default to current user if not specified
|
||||
USER=${USER:-$(whoami)}
|
||||
|
||||
# enables testing eviction scenarios locally.
|
||||
EVICTION_HARD=${EVICTION_HARD:-"memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%"}
|
||||
EVICTION_SOFT=${EVICTION_SOFT:-""}
|
||||
EVICTION_PRESSURE_TRANSITION_PERIOD=${EVICTION_PRESSURE_TRANSITION_PERIOD:-"1m"}
|
||||
|
||||
# This script uses docker0 (or whatever container bridge docker is currently using)
|
||||
# and we don't know the IP of the DNS pod to pass in as --cluster-dns.
|
||||
# To set this up by hand, set this flag and change DNS_SERVER_IP.
|
||||
# Note also that you need API_HOST (defined above) for correct DNS.
|
||||
KUBEPROXY_MODE=${KUBEPROXY_MODE:-""}
|
||||
ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-true}
|
||||
DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
|
||||
DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
|
||||
KUBECTL=${KUBECTL:-cluster/kubectl.sh}
|
||||
WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-20}
|
||||
ENABLE_DAEMON=${ENABLE_DAEMON:-false}
|
||||
HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
|
||||
CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
|
||||
CLOUD_CONFIG=${CLOUD_CONFIG:-""}
|
||||
FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"}
|
||||
STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"}
|
||||
# enable swagger ui
|
||||
ENABLE_SWAGGER_UI=${ENABLE_SWAGGER_UI:-false}
|
||||
|
||||
# enable kubernetes dashboard
|
||||
ENABLE_CLUSTER_DASHBOARD=${KUBE_ENABLE_CLUSTER_DASHBOARD:-false}
|
||||
|
||||
# enable audit log
|
||||
ENABLE_APISERVER_BASIC_AUDIT=${ENABLE_APISERVER_BASIC_AUDIT:-false}
|
||||
|
||||
# RBAC Mode options
|
||||
AUTHORIZATION_MODE=${AUTHORIZATION_MODE:-"Node,RBAC"}
|
||||
KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
|
||||
AUTH_ARGS=${AUTH_ARGS:-""}
|
||||
|
||||
# Install a default storage class (enabled by default)
|
||||
DEFAULT_STORAGE_CLASS=${KUBE_DEFAULT_STORAGE_CLASS:-true}
|
||||
|
||||
# start the cache mutation detector by default so that cache mutators will be found
|
||||
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
||||
export KUBE_CACHE_MUTATION_DETECTOR
|
||||
|
||||
# panic the server on watch decode errors since they are considered coder mistakes
|
||||
KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
|
||||
export KUBE_PANIC_WATCH_DECODE_ERROR
|
||||
|
||||
ADMISSION_CONTROL=${ADMISSION_CONTROL:-""}
|
||||
ADMISSION_CONTROL_CONFIG_FILE=${ADMISSION_CONTROL_CONFIG_FILE:-""}
|
||||
|
||||
# START_MODE can be 'all', 'kubeletonly', or 'nokubelet'
|
||||
START_MODE=${START_MODE:-"all"}
|
||||
|
||||
# A list of controllers to enable
|
||||
KUBE_CONTROLLERS="${KUBE_CONTROLLERS:-"*"}"
|
||||
|
||||
# sanity check for OpenStack provider
|
||||
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
|
||||
if [ "${CLOUD_CONFIG}" == "" ]; then
|
||||
echo "Missing CLOUD_CONFIG env for OpenStack provider!"
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${CLOUD_CONFIG}" ]; then
|
||||
echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
#set feature gates if using ipvs mode
|
||||
if [ "${KUBEPROXY_MODE}" == "ipvs" ]; then
|
||||
FEATURE_GATES="$FEATURE_GATES,SupportIPVSProxyMode=true"
|
||||
fi
|
||||
|
||||
# warn if users are running with swap allowed
|
||||
if [ "${FAIL_SWAP_ON}" == "false" ]; then
|
||||
echo "WARNING : The kubelet is configured to not fail if swap is enabled; production deployments should disable swap."
|
||||
fi
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
|
||||
fi
|
||||
|
||||
# Stop right away if the build fails
|
||||
set -e
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
function usage {
|
||||
echo "This script starts a local kube cluster. "
|
||||
echo "Example 0: hack/local-up-cluster.sh -h (this 'help' usage description)"
|
||||
echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
|
||||
echo "Example 2: hack/local-up-cluster.sh -O (auto-guess the bin path for your platform)"
|
||||
echo "Example 3: hack/local-up-cluster.sh (build a local copy of the source)"
|
||||
}
|
||||
|
||||
# This function guesses where the existing cached binary build is for the `-O`
|
||||
# flag
|
||||
function guess_built_binary_path {
|
||||
local hyperkube_path=$(kube::util::find-binary "hyperkube")
|
||||
if [[ -z "${hyperkube_path}" ]]; then
|
||||
return
|
||||
fi
|
||||
echo -n "$(dirname "${hyperkube_path}")"
|
||||
}
|
||||
|
||||
### Allow user to supply the source directory.
|
||||
GO_OUT=${GO_OUT:-}
|
||||
while getopts "ho:O" OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
o)
|
||||
echo "skipping build"
|
||||
GO_OUT="$OPTARG"
|
||||
echo "using source $GO_OUT"
|
||||
;;
|
||||
O)
|
||||
GO_OUT=$(guess_built_binary_path)
|
||||
if [ "$GO_OUT" == "" ]; then
|
||||
echo "Could not guess the correct output directory to use."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
?)
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "x$GO_OUT" == "x" ]; then
|
||||
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl cmd/hyperkube"
|
||||
else
|
||||
echo "skipped the build."
|
||||
fi
|
||||
|
||||
function test_rkt {
|
||||
if [[ -n "${RKT_PATH}" ]]; then
|
||||
${RKT_PATH} list 2> /dev/null 1> /dev/null
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Failed to successfully run 'rkt list', please verify that ${RKT_PATH} is the path of rkt binary."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
rkt list 2> /dev/null 1> /dev/null
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Failed to successfully run 'rkt list', please verify that rkt is in \$PATH."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Shut down anyway if there's an error.
|
||||
set +e
|
||||
|
||||
API_PORT=${API_PORT:-8080}
|
||||
API_SECURE_PORT=${API_SECURE_PORT:-6443}
|
||||
|
||||
# WARNING: For DNS to work on most setups you should export API_HOST as the docker0 ip address,
|
||||
API_HOST=${API_HOST:-localhost}
|
||||
API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
|
||||
ADVERTISE_ADDRESS=${ADVERTISE_ADDRESS:-""}
|
||||
API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
|
||||
EXTERNAL_HOSTNAME=${EXTERNAL_HOSTNAME:-localhost}
|
||||
|
||||
KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
|
||||
# By default only allow CORS for requests on localhost
|
||||
API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
|
||||
KUBELET_PORT=${KUBELET_PORT:-10250}
|
||||
LOG_LEVEL=${LOG_LEVEL:-3}
|
||||
# Use to increase verbosity on particular files, e.g. LOG_SPEC=token_controller*=5,other_controller*=4
|
||||
LOG_SPEC=${LOG_SPEC:-""}
|
||||
LOG_DIR=${LOG_DIR:-"/tmp"}
|
||||
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
|
||||
CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-""}
|
||||
IMAGE_SERVICE_ENDPOINT=${IMAGE_SERVICE_ENDPOINT:-""}
|
||||
RKT_PATH=${RKT_PATH:-""}
|
||||
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
|
||||
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
|
||||
CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-true}
|
||||
ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
|
||||
CLAIM_BINDER_SYNC_PERIOD=${CLAIM_BINDER_SYNC_PERIOD:-"15s"} # current k8s default
|
||||
ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # current default
|
||||
KEEP_TERMINATED_POD_VOLUMES=${KEEP_TERMINATED_POD_VOLUMES:-"true"}
|
||||
# This is the default dir and filename where the apiserver will generate a self-signed cert
|
||||
# which should be able to be used as the CA to verify itself
|
||||
CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
|
||||
ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
|
||||
ROOT_CA_KEY=${CERT_DIR}/server-ca.key
|
||||
CLUSTER_SIGNING_CERT_FILE=${CLUSTER_SIGNING_CERT_FILE:-"${ROOT_CA_FILE}"}
|
||||
CLUSTER_SIGNING_KEY_FILE=${CLUSTER_SIGNING_KEY_FILE:-"${ROOT_CA_KEY}"}
|
||||
|
||||
# name of the cgroup driver, i.e. cgroupfs or systemd
|
||||
if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
|
||||
# default cgroup driver to match what is reported by docker to simplify local development
|
||||
if [[ -z ${CGROUP_DRIVER} ]]; then
|
||||
# match driver with docker runtime reported value (they must match)
|
||||
CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | cut -f3- -d' ')
|
||||
echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
|
||||
mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
|
||||
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
|
||||
|
||||
function test_apiserver_off {
|
||||
# For the common local scenario, fail fast if server is already running.
|
||||
# this can happen if you run local-up-cluster.sh twice and kill etcd in between.
|
||||
if [[ "${API_PORT}" -gt "0" ]]; then
|
||||
curl --silent -g $API_HOST:$API_PORT
|
||||
if [ ! $? -eq 0 ]; then
|
||||
echo "API SERVER insecure port is free, proceeding..."
|
||||
else
|
||||
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_PORT"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
curl --silent -k -g $API_HOST:$API_SECURE_PORT
|
||||
if [ ! $? -eq 0 ]; then
|
||||
echo "API SERVER secure port is free, proceeding..."
|
||||
else
|
||||
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_SECURE_PORT"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function detect_binary {
|
||||
# Detect the OS name/arch so that we can find our binary
|
||||
case "$(uname -s)" in
|
||||
Darwin)
|
||||
host_os=darwin
|
||||
;;
|
||||
Linux)
|
||||
host_os=linux
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$(uname -m)" in
|
||||
x86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
i?86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
amd64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
aarch64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm*)
|
||||
host_arch=arm
|
||||
;;
|
||||
i?86*)
|
||||
host_arch=x86
|
||||
;;
|
||||
s390x*)
|
||||
host_arch=s390x
|
||||
;;
|
||||
ppc64le*)
|
||||
host_arch=ppc64le
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
|
||||
}
|
||||
|
||||
cleanup_dockerized_kubelet()
|
||||
{
|
||||
if [[ -e $KUBELET_CIDFILE ]]; then
|
||||
docker kill $(<$KUBELET_CIDFILE) > /dev/null
|
||||
rm -f $KUBELET_CIDFILE
|
||||
fi
|
||||
}
|
||||
|
||||
cleanup()
|
||||
{
|
||||
echo "Cleaning up..."
|
||||
# delete running images
|
||||
# if [[ "${ENABLE_CLUSTER_DNS}" == true ]]; then
|
||||
# Still need to figure why this commands throw an error: Error from server: client: etcd cluster is unavailable or misconfigured
|
||||
# ${KUBECTL} --namespace=kube-system delete service kube-dns
|
||||
# And this one hang forever:
|
||||
# ${KUBECTL} --namespace=kube-system delete rc kube-dns-v10
|
||||
# fi
|
||||
|
||||
# Check if the API server is still running
|
||||
[[ -n "${APISERVER_PID-}" ]] && APISERVER_PIDS=$(pgrep -P ${APISERVER_PID} ; ps -o pid= -p ${APISERVER_PID})
|
||||
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill ${APISERVER_PIDS}
|
||||
|
||||
# Check if the controller-manager is still running
|
||||
[[ -n "${CTLRMGR_PID-}" ]] && CTLRMGR_PIDS=$(pgrep -P ${CTLRMGR_PID} ; ps -o pid= -p ${CTLRMGR_PID})
|
||||
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill ${CTLRMGR_PIDS}
|
||||
|
||||
if [[ -n "$DOCKERIZE_KUBELET" ]]; then
|
||||
cleanup_dockerized_kubelet
|
||||
else
|
||||
# Check if the kubelet is still running
|
||||
[[ -n "${KUBELET_PID-}" ]] && KUBELET_PIDS=$(pgrep -P ${KUBELET_PID} ; ps -o pid= -p ${KUBELET_PID})
|
||||
[[ -n "${KUBELET_PIDS-}" ]] && sudo kill ${KUBELET_PIDS}
|
||||
fi
|
||||
|
||||
# Check if the proxy is still running
|
||||
[[ -n "${PROXY_PID-}" ]] && PROXY_PIDS=$(pgrep -P ${PROXY_PID} ; ps -o pid= -p ${PROXY_PID})
|
||||
[[ -n "${PROXY_PIDS-}" ]] && sudo kill ${PROXY_PIDS}
|
||||
|
||||
# Check if the scheduler is still running
|
||||
[[ -n "${SCHEDULER_PID-}" ]] && SCHEDULER_PIDS=$(pgrep -P ${SCHEDULER_PID} ; ps -o pid= -p ${SCHEDULER_PID})
|
||||
[[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill ${SCHEDULER_PIDS}
|
||||
|
||||
# Check if the etcd is still running
|
||||
[[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
|
||||
[[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
function warning {
|
||||
message=$1
|
||||
|
||||
echo $(tput bold)$(tput setaf 1)
|
||||
echo "WARNING: ${message}"
|
||||
echo $(tput sgr0)
|
||||
}
|
||||
|
||||
function start_etcd {
|
||||
echo "Starting etcd"
|
||||
kube::etcd::start
|
||||
}
|
||||
|
||||
function set_service_accounts {
|
||||
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
|
||||
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
|
||||
# Generate ServiceAccount key if needed
|
||||
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
|
||||
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
|
||||
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
function start_apiserver {
|
||||
security_admission=""
|
||||
if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then
|
||||
security_admission=",SecurityContextDeny"
|
||||
fi
|
||||
if [[ -n "${PSP_ADMISSION}" ]]; then
|
||||
security_admission=",PodSecurityPolicy"
|
||||
fi
|
||||
if [[ -n "${NODE_ADMISSION}" ]]; then
|
||||
security_admission=",NodeRestriction"
|
||||
fi
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
#
|
||||
# ResourceQuota must come last, or a creation is recorded, but the pod may be forbidden.
|
||||
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount${security_admission},DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota
|
||||
# This is the default dir and filename where the apiserver will generate a self-signed cert
|
||||
# which should be able to be used as the CA to verify itself
|
||||
|
||||
audit_arg=""
|
||||
APISERVER_BASIC_AUDIT_LOG=""
|
||||
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" = true ]]; then
|
||||
# We currently only support enabling with a fixed path and with built-in log
|
||||
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
|
||||
# External log rotation should be set up the same as for kube-apiserver.log.
|
||||
APISERVER_BASIC_AUDIT_LOG=/tmp/kube-apiserver-audit.log
|
||||
audit_arg=" --audit-log-path=${APISERVER_BASIC_AUDIT_LOG}"
|
||||
audit_arg+=" --audit-log-maxage=0"
|
||||
audit_arg+=" --audit-log-maxbackup=0"
|
||||
# Lumberjack doesn't offer any way to disable size-based rotation. It also
|
||||
# has an in-memory counter that doesn't notice if you truncate the file.
|
||||
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
|
||||
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
|
||||
# never restarts. Please manually restart apiserver before this time.
|
||||
audit_arg+=" --audit-log-maxsize=2000000000"
|
||||
fi
|
||||
|
||||
swagger_arg=""
|
||||
if [[ "${ENABLE_SWAGGER_UI}" = true ]]; then
|
||||
swagger_arg="--enable-swagger-ui=true "
|
||||
fi
|
||||
|
||||
authorizer_arg=""
|
||||
if [[ -n "${AUTHORIZATION_MODE}" ]]; then
|
||||
authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE} "
|
||||
fi
|
||||
priv_arg=""
|
||||
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
|
||||
priv_arg="--allow-privileged "
|
||||
fi
|
||||
|
||||
if [[ ${ADMISSION_CONTROL} == *"Initializers"* ]]; then
|
||||
if [[ -n "${RUNTIME_CONFIG}" ]]; then
|
||||
RUNTIME_CONFIG+=","
|
||||
fi
|
||||
RUNTIME_CONFIG+="admissionregistration.k8s.io/v1alpha1"
|
||||
fi
|
||||
|
||||
runtime_config=""
|
||||
if [[ -n "${RUNTIME_CONFIG}" ]]; then
|
||||
runtime_config="--runtime-config=${RUNTIME_CONFIG}"
|
||||
fi
|
||||
|
||||
# Let the API server pick a default address when API_HOST_IP
|
||||
# is set to 127.0.0.1
|
||||
advertise_address=""
|
||||
if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
|
||||
advertise_address="--advertise_address=${API_HOST_IP}"
|
||||
fi
|
||||
if [[ "${ADVERTISE_ADDRESS}" != "" ]] ; then
|
||||
advertise_address="--advertise_address=${ADVERTISE_ADDRESS}"
|
||||
fi
|
||||
|
||||
# Create CA signers
|
||||
if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then
|
||||
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"'
|
||||
sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key"
|
||||
sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt"
|
||||
sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json"
|
||||
else
|
||||
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
|
||||
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
|
||||
fi
|
||||
|
||||
# Create auth proxy client ca
|
||||
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
|
||||
|
||||
# serving cert for kube-apiserver
|
||||
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" ${API_HOST_IP} ${API_HOST} ${FIRST_SERVICE_CLUSTER_IP}
|
||||
|
||||
# Create client certs signed with client-ca, given id, given CN and a number of groups
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
|
||||
|
||||
# Create matching certificates for kube-aggregator
|
||||
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" ${API_HOST_IP}
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
|
||||
# TODO remove masters and add rolebinding
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
|
||||
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
|
||||
|
||||
|
||||
APISERVER_LOG=${LOG_DIR}/kube-apiserver.log
|
||||
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${swagger_arg} ${audit_arg} ${authorizer_arg} ${priv_arg} ${runtime_config}\
|
||||
${advertise_address} \
|
||||
--v=${LOG_LEVEL} \
|
||||
--vmodule="${LOG_SPEC}" \
|
||||
--cert-dir="${CERT_DIR}" \
|
||||
--client-ca-file="${CERT_DIR}/client-ca.crt" \
|
||||
--service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
|
||||
--service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
|
||||
--admission-control="${ADMISSION_CONTROL}" \
|
||||
--admission-control-config-file="${ADMISSION_CONTROL_CONFIG_FILE}" \
|
||||
--bind-address="${API_BIND_ADDR}" \
|
||||
--secure-port="${API_SECURE_PORT}" \
|
||||
--tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
|
||||
--tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
|
||||
--tls-ca-file="${CERT_DIR}/server-ca.crt" \
|
||||
--insecure-bind-address="${API_HOST_IP}" \
|
||||
--insecure-port="${API_PORT}" \
|
||||
--storage-backend=${STORAGE_BACKEND} \
|
||||
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
|
||||
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
|
||||
--feature-gates="${FEATURE_GATES}" \
|
||||
--external-hostname="${EXTERNAL_HOSTNAME}" \
|
||||
--cloud-provider="${CLOUD_PROVIDER}" \
|
||||
--cloud-config="${CLOUD_CONFIG}" \
|
||||
--requestheader-username-headers=X-Remote-User \
|
||||
--requestheader-group-headers=X-Remote-Group \
|
||||
--requestheader-extra-headers-prefix=X-Remote-Extra- \
|
||||
--requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
|
||||
--requestheader-allowed-names=system:auth-proxy \
|
||||
--proxy-client-cert-file="${CERT_DIR}/client-auth-proxy.crt" \
|
||||
--proxy-client-key-file="${CERT_DIR}/client-auth-proxy.key" \
|
||||
--cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
|
||||
APISERVER_PID=$!
|
||||
|
||||
# Wait for kube-apiserver to come up before launching the rest of the components.
|
||||
echo "Waiting for apiserver to come up"
|
||||
# this uses the API port because if you don't have any authenticator, you can't seem to use the secure port at all.
|
||||
# this matches what happened with the combination in 1.4.
|
||||
# TODO change this conditionally based on whether API_PORT is on or off
|
||||
kube::util::wait_for_url "https://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 ${WAIT_FOR_URL_API_SERVER} \
|
||||
|| { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
|
||||
|
||||
# Create kubeconfigs for all components, using client certs
|
||||
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
|
||||
${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
|
||||
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
|
||||
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
|
||||
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
|
||||
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
|
||||
|
||||
if [[ -z "${AUTH_ARGS}" ]]; then
|
||||
AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
|
||||
fi
|
||||
|
||||
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
|
||||
${CONTROLPLANE_SUDO} chown $(whoami) "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
|
||||
${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:31090"
|
||||
echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
|
||||
|
||||
}
|
||||
|
||||
function start_controller_manager {
|
||||
node_cidr_args=""
|
||||
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
|
||||
node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 "
|
||||
fi
|
||||
|
||||
CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
|
||||
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" controller-manager \
|
||||
--v=${LOG_LEVEL} \
|
||||
--vmodule="${LOG_SPEC}" \
|
||||
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
|
||||
--root-ca-file="${ROOT_CA_FILE}" \
|
||||
--cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
|
||||
--cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
|
||||
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
|
||||
${node_cidr_args} \
|
||||
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
|
||||
--feature-gates="${FEATURE_GATES}" \
|
||||
--cloud-provider="${CLOUD_PROVIDER}" \
|
||||
--cloud-config="${CLOUD_CONFIG}" \
|
||||
--kubeconfig "$CERT_DIR"/controller.kubeconfig \
|
||||
--use-service-account-credentials \
|
||||
--controllers="${KUBE_CONTROLLERS}" \
|
||||
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
|
||||
CTLRMGR_PID=$!
|
||||
}
|
||||
|
||||
function start_kubelet {
|
||||
KUBELET_LOG=${LOG_DIR}/kubelet.log
|
||||
mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}"
|
||||
|
||||
priv_arg=""
|
||||
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
|
||||
priv_arg="--allow-privileged "
|
||||
fi
|
||||
|
||||
mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet"
|
||||
if [[ -z "${DOCKERIZE_KUBELET}" ]]; then
|
||||
# Enable dns
|
||||
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
|
||||
dns_args="--cluster-dns=${DNS_SERVER_IP} --cluster-domain=${DNS_DOMAIN}"
|
||||
else
|
||||
# To start a private DNS server set ENABLE_CLUSTER_DNS and
|
||||
# DNS_SERVER_IP/DOMAIN. This will at least provide a working
|
||||
# DNS server for real world hostnames.
|
||||
dns_args="--cluster-dns=8.8.8.8"
|
||||
fi
|
||||
|
||||
net_plugin_args=""
|
||||
if [[ -n "${NET_PLUGIN}" ]]; then
|
||||
net_plugin_args="--network-plugin=${NET_PLUGIN}"
|
||||
fi
|
||||
|
||||
auth_args=""
|
||||
if [[ -n "${KUBELET_AUTHORIZATION_WEBHOOK:-}" ]]; then
|
||||
auth_args="${auth_args} --authorization-mode=Webhook"
|
||||
fi
|
||||
if [[ -n "${KUBELET_AUTHENTICATION_WEBHOOK:-}" ]]; then
|
||||
auth_args="${auth_args} --authentication-token-webhook"
|
||||
fi
|
||||
if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
|
||||
auth_args="${auth_args} --client-ca-file=${CLIENT_CA_FILE}"
|
||||
fi
|
||||
|
||||
cni_conf_dir_args=""
|
||||
if [[ -n "${CNI_CONF_DIR}" ]]; then
|
||||
cni_conf_dir_args="--cni-conf-dir=${CNI_CONF_DIR}"
|
||||
fi
|
||||
|
||||
cni_bin_dir_args=""
|
||||
if [[ -n "${CNI_BIN_DIR}" ]]; then
|
||||
cni_bin_dir_args="--cni-bin-dir=${CNI_BIN_DIR}"
|
||||
fi
|
||||
|
||||
container_runtime_endpoint_args=""
|
||||
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
|
||||
container_runtime_endpoint_args="--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
|
||||
fi
|
||||
|
||||
image_service_endpoint_args=""
|
||||
if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
|
||||
image_service_endpoint_args="--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}"
|
||||
fi
|
||||
|
||||
sudo -E "${GO_OUT}/hyperkube" kubelet ${priv_arg}\
|
||||
--v=${LOG_LEVEL} \
|
||||
--vmodule="${LOG_SPEC}" \
|
||||
--chaos-chance="${CHAOS_CHANCE}" \
|
||||
--container-runtime="${CONTAINER_RUNTIME}" \
|
||||
--rkt-path="${RKT_PATH}" \
|
||||
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
|
||||
--hostname-override="${HOSTNAME_OVERRIDE}" \
|
||||
--cloud-provider="${CLOUD_PROVIDER}" \
|
||||
--cloud-config="${CLOUD_CONFIG}" \
|
||||
--address="${KUBELET_HOST}" \
|
||||
--kubeconfig "$CERT_DIR"/kubelet.kubeconfig \
|
||||
--feature-gates="${FEATURE_GATES}" \
|
||||
--cpu-cfs-quota=${CPU_CFS_QUOTA} \
|
||||
--enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" \
|
||||
--cgroups-per-qos=${CGROUPS_PER_QOS} \
|
||||
--cgroup-driver=${CGROUP_DRIVER} \
|
||||
--keep-terminated-pod-volumes=${KEEP_TERMINATED_POD_VOLUMES} \
|
||||
--eviction-hard=${EVICTION_HARD} \
|
||||
--eviction-soft=${EVICTION_SOFT} \
|
||||
--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD} \
|
||||
--pod-manifest-path="${POD_MANIFEST_PATH}" \
|
||||
--fail-swap-on="${FAIL_SWAP_ON}" \
|
||||
${auth_args} \
|
||||
${dns_args} \
|
||||
${cni_conf_dir_args} \
|
||||
${cni_bin_dir_args} \
|
||||
${net_plugin_args} \
|
||||
${container_runtime_endpoint_args} \
|
||||
${image_service_endpoint_args} \
|
||||
--port="$KUBELET_PORT" \
|
||||
${KUBELET_FLAGS} >"${KUBELET_LOG}" 2>&1 &
|
||||
KUBELET_PID=$!
|
||||
# Quick check that kubelet is running.
|
||||
if ps -p $KUBELET_PID > /dev/null ; then
|
||||
echo "kubelet ( $KUBELET_PID ) is running."
|
||||
else
|
||||
cat ${KUBELET_LOG} ; exit 1
|
||||
fi
|
||||
else
|
||||
# Docker won't run a container with a cidfile (container id file)
|
||||
# unless that file does not already exist; clean up an existing
|
||||
# dockerized kubelet that might be running.
|
||||
cleanup_dockerized_kubelet
|
||||
cred_bind=""
|
||||
# path to cloud credentials.
|
||||
cloud_cred=""
|
||||
if [ "${CLOUD_PROVIDER}" == "aws" ]; then
|
||||
cloud_cred="${HOME}/.aws/credentials"
|
||||
fi
|
||||
if [ "${CLOUD_PROVIDER}" == "gce" ]; then
|
||||
cloud_cred="${HOME}/.config/gcloud"
|
||||
fi
|
||||
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
|
||||
cloud_cred="${CLOUD_CONFIG}"
|
||||
fi
|
||||
if [[ -n "${cloud_cred}" ]]; then
|
||||
cred_bind="--volume=${cloud_cred}:${cloud_cred}:ro"
|
||||
fi
|
||||
|
||||
docker run \
|
||||
--volume=/:/rootfs:ro \
|
||||
--volume=/var/run:/var/run:rw \
|
||||
--volume=/sys:/sys:ro \
|
||||
--volume=/var/lib/docker/:/var/lib/docker:ro \
|
||||
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
|
||||
--volume=/dev:/dev \
|
||||
--volume=/run/xtables.lock:/run/xtables.lock:rw \
|
||||
${cred_bind} \
|
||||
--net=host \
|
||||
--privileged=true \
|
||||
-i \
|
||||
--cidfile=$KUBELET_CIDFILE \
|
||||
gcr.io/google_containers/kubelet \
|
||||
/kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG &
|
||||
fi
|
||||
}
|
||||
|
||||
function start_kubeproxy {
|
||||
PROXY_LOG=${LOG_DIR}/kube-proxy.log
|
||||
|
||||
cat <<EOF > /tmp/kube-proxy.yaml
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
clientConnection:
|
||||
kubeconfig: ${CERT_DIR}/kube-proxy.kubeconfig
|
||||
hostnameOverride: ${HOSTNAME_OVERRIDE}
|
||||
featureGates: ${FEATURE_GATES}
|
||||
mode: ${KUBEPROXY_MODE}
|
||||
EOF
|
||||
if [ "${KUBEPROXY_MODE}" == "ipvs" ]; then
|
||||
# Load kernel modules required by IPVS proxier
|
||||
sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack_ipv4
|
||||
fi
|
||||
|
||||
sudo "${GO_OUT}/hyperkube" proxy \
|
||||
--config=/tmp/kube-proxy.yaml \
|
||||
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" \
|
||||
--v=${LOG_LEVEL} 2>&1 &
|
||||
PROXY_PID=$!
|
||||
|
||||
SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
|
||||
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" scheduler \
|
||||
--v=${LOG_LEVEL} \
|
||||
--kubeconfig "$CERT_DIR"/scheduler.kubeconfig \
|
||||
--feature-gates="${FEATURE_GATES}" \
|
||||
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
|
||||
SCHEDULER_PID=$!
|
||||
}
|
||||
|
||||
function start_kubedns {
|
||||
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
|
||||
cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns.yaml.in" kube-dns.yaml
|
||||
sed -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml
|
||||
sed -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml
|
||||
|
||||
# TODO update to dns role once we have one.
|
||||
# use kubectl to create kubedns addon
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml
|
||||
echo "Kube-dns addon successfully deployed."
|
||||
rm kube-dns.yaml
|
||||
fi
|
||||
}
|
||||
|
||||
function start_kubedashboard {
|
||||
if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then
|
||||
echo "Creating kubernetes-dashboard"
|
||||
# use kubectl to create the dashboard
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
|
||||
echo "kubernetes-dashboard deployment and service successfully deployed."
|
||||
fi
|
||||
}
|
||||
|
||||
function create_psp_policy {
|
||||
echo "Create podsecuritypolicy policies for RBAC."
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml
|
||||
}
|
||||
|
||||
function create_storage_class {
|
||||
if [ -z "$CLOUD_PROVIDER" ]; then
|
||||
CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/local/default.yaml
|
||||
else
|
||||
CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/${CLOUD_PROVIDER}/default.yaml
|
||||
fi
|
||||
|
||||
if [ -e $CLASS_FILE ]; then
|
||||
echo "Create default storage class for $CLOUD_PROVIDER"
|
||||
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f $CLASS_FILE
|
||||
else
|
||||
echo "No storage class available for $CLOUD_PROVIDER."
|
||||
fi
|
||||
}
|
||||
|
||||
function print_success {
|
||||
if [[ "${START_MODE}" != "kubeletonly" ]]; then
|
||||
cat <<EOF
|
||||
Local Kubernetes cluster is running. Press Ctrl-C to shut it down.
|
||||
|
||||
Logs:
|
||||
${APISERVER_LOG:-}
|
||||
${CTLRMGR_LOG:-}
|
||||
${PROXY_LOG:-}
|
||||
${SCHEDULER_LOG:-}
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" = true ]]; then
|
||||
echo " ${APISERVER_BASIC_AUDIT_LOG}"
|
||||
fi
|
||||
|
||||
if [[ "${START_MODE}" == "all" ]]; then
|
||||
echo " ${KUBELET_LOG}"
|
||||
elif [[ "${START_MODE}" == "nokubelet" ]]; then
|
||||
echo
|
||||
echo "No kubelet was started because you set START_MODE=nokubelet"
|
||||
echo "Run this script again with START_MODE=kubeletonly to run a kubelet"
|
||||
fi
|
||||
|
||||
if [[ "${START_MODE}" != "kubeletonly" ]]; then
|
||||
echo
|
||||
cat <<EOF
|
||||
To start using your cluster, you can open up another terminal/tab and run:
|
||||
|
||||
export KUBECONFIG=${CERT_DIR}/admin.kubeconfig
|
||||
cluster/kubectl.sh
|
||||
|
||||
Alternatively, you can write to the default kubeconfig:
|
||||
|
||||
export KUBERNETES_PROVIDER=local
|
||||
|
||||
cluster/kubectl.sh config set-cluster local --server=https://${API_HOST}:${API_SECURE_PORT} --certificate-authority=${ROOT_CA_FILE}
|
||||
cluster/kubectl.sh config set-credentials myself ${AUTH_ARGS}
|
||||
cluster/kubectl.sh config set-context local --cluster=local --user=myself
|
||||
cluster/kubectl.sh config use-context local
|
||||
cluster/kubectl.sh
|
||||
EOF
|
||||
else
|
||||
cat <<EOF
|
||||
The kubelet was started.
|
||||
|
||||
Logs:
|
||||
${KUBELET_LOG}
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# validate that etcd is: not running, in path, and has minimum required version.
|
||||
if [[ "${START_MODE}" != "kubeletonly" ]]; then
|
||||
kube::etcd::validate
|
||||
fi
|
||||
|
||||
if [ "${CONTAINER_RUNTIME}" == "docker" ] && ! kube::util::ensure_docker_daemon_connectivity; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${CONTAINER_RUNTIME}" == "rkt" ]]; then
|
||||
test_rkt
|
||||
fi
|
||||
|
||||
if [[ "${START_MODE}" != "kubeletonly" ]]; then
|
||||
test_apiserver_off
|
||||
fi
|
||||
|
||||
kube::util::test_openssl_installed
|
||||
kube::util::ensure-cfssl
|
||||
|
||||
### IF the user didn't supply an output/ for the build... Then we detect.
|
||||
if [ "$GO_OUT" == "" ]; then
|
||||
detect_binary
|
||||
fi
|
||||
echo "Detected host and ready to start services. Doing some housekeeping first..."
|
||||
echo "Using GO_OUT $GO_OUT"
|
||||
KUBELET_CIDFILE=/tmp/kubelet.cid
|
||||
if [[ "${ENABLE_DAEMON}" = false ]]; then
|
||||
trap cleanup EXIT
|
||||
fi
|
||||
|
||||
echo "Starting services now!"
|
||||
if [[ "${START_MODE}" != "kubeletonly" ]]; then
|
||||
start_etcd
|
||||
set_service_accounts
|
||||
start_apiserver
|
||||
start_controller_manager
|
||||
start_kubeproxy
|
||||
start_kubedns
|
||||
start_kubedashboard
|
||||
fi
|
||||
|
||||
if [[ "${START_MODE}" != "nokubelet" ]]; then
|
||||
## TODO remove this check if/when kubelet is supported on darwin
|
||||
# Detect the OS name/arch and display appropriate error.
|
||||
case "$(uname -s)" in
|
||||
Darwin)
|
||||
warning "kubelet is not currently supported in darwin, kubelet aborted."
|
||||
KUBELET_LOG=""
|
||||
;;
|
||||
Linux)
|
||||
start_kubelet
|
||||
;;
|
||||
*)
|
||||
warning "Unsupported host OS. Must be Linux or Mac OS X, kubelet aborted."
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [[ -n "${PSP_ADMISSION}" && "${AUTHORIZATION_MODE}" = *RBAC* ]]; then
|
||||
create_psp_policy
|
||||
fi
|
||||
|
||||
if [[ "$DEFAULT_STORAGE_CLASS" = "true" ]]; then
|
||||
create_storage_class
|
||||
fi
|
||||
|
||||
print_success
|
||||
|
||||
if [[ "${ENABLE_DAEMON}" = false ]]; then
|
||||
while true; do sleep 1; done
|
||||
fi
|
40
vendor/k8s.io/kubernetes/hack/lookup_pull.py
generated
vendored
Executable file
40
vendor/k8s.io/kubernetes/hack/lookup_pull.py
generated
vendored
Executable file
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script to print out PR info in release note format.
|
||||
|
||||
import json
|
||||
import sys
|
||||
import urllib2
|
||||
|
||||
PULLQUERY=("https://api.github.com/repos/"
|
||||
"kubernetes/kubernetes/pulls/{pull}")
|
||||
LOGIN="login"
|
||||
TITLE="title"
|
||||
USER="user"
|
||||
|
||||
def print_pulls(pulls):
|
||||
for pull in pulls:
|
||||
d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read())
|
||||
print "* {title} #{pull} ({author})".format(
|
||||
title=d[TITLE], pull=pull, author=d[USER][LOGIN])
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print ("Usage: {cmd} <pulls>...: Prints out short " +
|
||||
"markdown description for PRs appropriate for release notes.")
|
||||
sys.exit(1)
|
||||
print_pulls(sys.argv[1:])
|
112
vendor/k8s.io/kubernetes/hack/make-rules/BUILD
generated
vendored
Normal file
112
vendor/k8s.io/kubernetes/hack/make-rules/BUILD
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
# Scripts runnable from make, e.g.
|
||||
#
|
||||
# cd $GOPATH/src/k8s.io/kubernetes
|
||||
# make test-e2e-node
|
||||
#
|
||||
# The sh_binary rules below exist only to validate
|
||||
# dependencies; if a shell dependency is accidentally
|
||||
# deleted, a presubmit BUILD will fail.
|
||||
#
|
||||
# If the scripts sourced their dependencies from
|
||||
# $RUNFILES (rather than $BASH_SOURCE/../.. or
|
||||
# whatever), then bazel build hack/... would install
|
||||
# runnable, hermetically sealed shell "binaries".
|
||||
# E.g. the following command would work:
|
||||
#
|
||||
# ./bazel-bin/hack/make-rules/test-e2e-node
|
||||
#
|
||||
# TODO(#47064): Should be a sh_test instead of sh_binary
|
||||
sh_binary(
|
||||
name = "test-cmd",
|
||||
srcs = ["test-cmd.sh"],
|
||||
deps = [
|
||||
":test-cmd-util",
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "test-e2e-node",
|
||||
srcs = ["test-e2e-node.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "test-integration",
|
||||
srcs = ["test-cmd.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "test-kubeadm-cmd",
|
||||
srcs = ["test-kubeadm-cmd.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "build",
|
||||
srcs = ["build.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "cross",
|
||||
srcs = ["cross.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "test",
|
||||
srcs = ["test.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "vet",
|
||||
srcs = ["vet.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "verify",
|
||||
srcs = ["verify.sh"],
|
||||
deps = [
|
||||
"//hack/lib",
|
||||
],
|
||||
)
|
||||
|
||||
sh_library(
|
||||
name = "test-cmd-util",
|
||||
srcs = [
|
||||
"test-cmd-util.sh",
|
||||
],
|
||||
data = ["//pkg/kubectl/validation:testdata/v1/validPod.yaml"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
28
vendor/k8s.io/kubernetes/hack/make-rules/build.sh
generated
vendored
Executable file
28
vendor/k8s.io/kubernetes/hack/make-rules/build.sh
generated
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script sets up a go workspace locally and builds all go components.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
KUBE_VERBOSE="${KUBE_VERBOSE:-1}"
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::build_binaries "$@"
|
||||
kube::golang::place_bins
|
38
vendor/k8s.io/kubernetes/hack/make-rules/clean.sh
generated
vendored
Executable file
38
vendor/k8s.io/kubernetes/hack/make-rules/clean.sh
generated
vendored
Executable file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
CLEAN_PATTERNS=(
|
||||
"_tmp"
|
||||
"doc_tmp"
|
||||
".*/zz_generated.openapi.go"
|
||||
"test/e2e/generated/bindata.go"
|
||||
)
|
||||
|
||||
for pattern in ${CLEAN_PATTERNS[@]}; do
|
||||
for match in $(find "${KUBE_ROOT}" -iregex "^${KUBE_ROOT}/${pattern}$"); do
|
||||
echo "Removing ${match#${KUBE_ROOT}\/} .."
|
||||
rm -rf "${match#${KUBE_ROOT}\/}"
|
||||
done
|
||||
done
|
||||
|
||||
# ex: ts=2 sw=2 et filetype=sh
|
38
vendor/k8s.io/kubernetes/hack/make-rules/cross.sh
generated
vendored
Executable file
38
vendor/k8s.io/kubernetes/hack/make-rules/cross.sh
generated
vendored
Executable file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script sets up a go workspace locally and builds all for all appropriate
|
||||
# platforms.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# NOTE: Using "${array[*]}" here is correct. [@] becomes distinct words (in
|
||||
# bash parlance).
|
||||
|
||||
make all WHAT="${KUBE_SERVER_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_SERVER_PLATFORMS[*]}"
|
||||
|
||||
make all WHAT="${KUBE_NODE_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_NODE_PLATFORMS[*]}"
|
||||
|
||||
make all WHAT="${KUBE_CLIENT_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_CLIENT_PLATFORMS[*]}"
|
||||
|
||||
make all WHAT="${KUBE_TEST_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_TEST_PLATFORMS[*]}"
|
||||
|
||||
make all WHAT="${KUBE_TEST_SERVER_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_TEST_SERVER_PLATFORMS[*]}"
|
73
vendor/k8s.io/kubernetes/hack/make-rules/helpers/cache_go_dirs.sh
generated
vendored
Executable file
73
vendor/k8s.io/kubernetes/hack/make-rules/helpers/cache_go_dirs.sh
generated
vendored
Executable file
@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script finds, caches, and prints a list of all directories that hold
|
||||
# *.go files. If any directory is newer than the cache, re-find everything and
|
||||
# update the cache. Otherwise use the cached file.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
if [[ -z "${1:-}" ]]; then
|
||||
echo "usage: $0 <cache-file>"
|
||||
exit 1
|
||||
fi
|
||||
CACHE="$1"; shift
|
||||
|
||||
trap "rm -f '${CACHE}'" HUP INT TERM ERR
|
||||
|
||||
# This is a partial 'find' command. The caller is expected to pass the
|
||||
# remaining arguments.
|
||||
#
|
||||
# Example:
|
||||
# kfind -type f -name foobar.go
|
||||
function kfind() {
|
||||
# include the "special" vendor directories which are actually part
|
||||
# of the Kubernetes source tree - generators will use these for
|
||||
# including certain core API concepts.
|
||||
find -H . ./vendor/k8s.io/apimachinery ./vendor/k8s.io/apiserver ./vendor/k8s.io/kube-aggregator ./vendor/k8s.io/apiextensions-apiserver ./vendor/k8s.io/metrics ./vendor/k8s.io/sample-apiserver ./vendor/k8s.io/api ./vendor/k8s.io/client-go ./vendor/k8s.io/code-generator ./vendor/k8s.io/sample-controller \
|
||||
\( \
|
||||
-not \( \
|
||||
\( \
|
||||
-path ./vendor -o \
|
||||
-path ./staging -o \
|
||||
-path ./_\* -o \
|
||||
-path ./.\* -o \
|
||||
-path ./docs -o \
|
||||
-path ./examples \
|
||||
\) -prune \
|
||||
\) \
|
||||
\) \
|
||||
"$@"
|
||||
}
|
||||
|
||||
NEED_FIND=true
|
||||
# It's *significantly* faster to check whether any directories are newer than
|
||||
# the cache than to blindly rebuild it.
|
||||
if [[ -f "${CACHE}" ]]; then
|
||||
N=$(kfind -type d -newer "${CACHE}" -print -quit | wc -l)
|
||||
[[ "${N}" == 0 ]] && NEED_FIND=false
|
||||
fi
|
||||
mkdir -p $(dirname "${CACHE}")
|
||||
if $("${NEED_FIND}"); then
|
||||
kfind -type f -name \*.go \
|
||||
| sed 's|/[^/]*$||' \
|
||||
| sed 's|^./||' \
|
||||
| LC_ALL=C sort -u \
|
||||
> "${CACHE}"
|
||||
fi
|
||||
cat "${CACHE}"
|
66
vendor/k8s.io/kubernetes/hack/make-rules/make-help.sh
generated
vendored
Executable file
66
vendor/k8s.io/kubernetes/hack/make-rules/make-help.sh
generated
vendored
Executable file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
readonly red=$(tput setaf 1)
|
||||
readonly reset=$(tput sgr0)
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
ALL_TARGETS=$(make -C "${KUBE_ROOT}" PRINT_HELP=y -rpn | sed -n -e '/^$/ { n ; /^[^ .#][^ ]*:/ { s/:.*$// ; p ; } ; }' | sort)
|
||||
CMD_TARGETS=$(ls -l "${KUBE_ROOT}/cmd" |awk '/^d/ {print $NF}')
|
||||
PLUGIN_CMD_TARGETS=$(ls -l "${KUBE_ROOT}/plugin/cmd" |awk '/^d/ {print $NF}')
|
||||
CMD_FLAG=false
|
||||
PLUGIN_CMD_FLAG=false
|
||||
|
||||
echo "--------------------------------------------------------------------------------"
|
||||
for tar in $ALL_TARGETS; do
|
||||
for cmdtar in $CMD_TARGETS; do
|
||||
if [ $tar = $cmdtar ]; then
|
||||
if [ $CMD_FLAG = true ]; then
|
||||
continue 2;
|
||||
fi
|
||||
|
||||
echo -e "${red}${CMD_TARGETS}${reset}"
|
||||
make -C "${KUBE_ROOT}" $tar PRINT_HELP=y
|
||||
echo "---------------------------------------------------------------------------------"
|
||||
|
||||
CMD_FLAG=true
|
||||
continue 2
|
||||
fi
|
||||
done
|
||||
|
||||
for plugincmdtar in $PLUGIN_CMD_TARGETS; do
|
||||
if [ $tar = $plugincmdtar ]; then
|
||||
if [ $PLUGIN_CMD_FLAG = true ]; then
|
||||
continue 2;
|
||||
fi
|
||||
|
||||
echo -e "${red}${PLUGIN_CMD_TARGETS}${reset}"
|
||||
make -C "${KUBE_ROOT}" $tar PRINT_HELP=y
|
||||
echo "---------------------------------------------------------------------------------"
|
||||
|
||||
PLUGIN_CMD_FLAG=true
|
||||
continue 2
|
||||
fi
|
||||
done
|
||||
|
||||
echo -e "${red}${tar}${reset}"
|
||||
make -C "${KUBE_ROOT}" $tar PRINT_HELP=y
|
||||
echo "---------------------------------------------------------------------------------"
|
||||
done
|
5241
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd-util.sh
generated
vendored
Executable file
5241
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd-util.sh
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
105
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh
generated
vendored
Executable file
105
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh
generated
vendored
Executable file
@ -0,0 +1,105 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This command checks that the built commands can function together for
|
||||
# simple scenarios. It does not require Docker.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/test.sh"
|
||||
source "${KUBE_ROOT}/hack/make-rules/test-cmd-util.sh"
|
||||
|
||||
function run_kube_apiserver() {
|
||||
kube::log::status "Building kube-apiserver"
|
||||
make -C "${KUBE_ROOT}" WHAT="cmd/kube-apiserver"
|
||||
|
||||
# Start kube-apiserver
|
||||
kube::log::status "Starting kube-apiserver"
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ResourceQuota"
|
||||
|
||||
# Include RBAC (to exercise bootstrapping), and AlwaysAllow to allow all actions
|
||||
AUTHORIZATION_MODE="RBAC,AlwaysAllow"
|
||||
|
||||
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
|
||||
--insecure-bind-address="127.0.0.1" \
|
||||
--bind-address="127.0.0.1" \
|
||||
--insecure-port="${API_PORT}" \
|
||||
--authorization-mode="${AUTHORIZATION_MODE}" \
|
||||
--secure-port="${SECURE_API_PORT}" \
|
||||
--admission-control="${ADMISSION_CONTROL}" \
|
||||
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
|
||||
--runtime-config=api/v1 \
|
||||
--storage-media-type="${KUBE_TEST_API_STORAGE_TYPE-}" \
|
||||
--cert-dir="${TMPDIR:-/tmp/}" \
|
||||
--service-cluster-ip-range="10.0.0.0/24" \
|
||||
--token-auth-file=hack/testdata/auth-tokens.csv 1>&2 &
|
||||
APISERVER_PID=$!
|
||||
|
||||
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver"
|
||||
}
|
||||
|
||||
function run_kube_controller_manager() {
|
||||
kube::log::status "Building kube-controller-manager"
|
||||
make -C "${KUBE_ROOT}" WHAT="cmd/kube-controller-manager"
|
||||
|
||||
# Start controller manager
|
||||
kube::log::status "Starting controller-manager"
|
||||
"${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \
|
||||
--port="${CTLRMGR_PORT}" \
|
||||
--kube-api-content-type="${KUBE_TEST_API_TYPE-}" \
|
||||
--master="127.0.0.1:${API_PORT}" 1>&2 &
|
||||
CTLRMGR_PID=$!
|
||||
|
||||
kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager"
|
||||
}
|
||||
|
||||
# Creates a node object with name 127.0.0.1. This is required because we do not
|
||||
# run kubelet.
|
||||
function create_node() {
|
||||
kubectl create -f - -s "http://127.0.0.1:${API_PORT}" << __EOF__
|
||||
{
|
||||
"kind": "Node",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "127.0.0.1"
|
||||
},
|
||||
"status": {
|
||||
"capacity": {
|
||||
"memory": "1Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
}
|
||||
|
||||
kube::log::status "Running kubectl tests for kube-apiserver"
|
||||
|
||||
setup
|
||||
run_kube_apiserver
|
||||
run_kube_controller_manager
|
||||
create_node
|
||||
SUPPORTED_RESOURCES=("*")
|
||||
# WARNING: Do not wrap this call in a subshell to capture output, e.g. output=$(runTests)
|
||||
# Doing so will suppress errexit behavior inside runTests
|
||||
runTests
|
||||
|
||||
kube::log::status "TESTS PASSED"
|
175
vendor/k8s.io/kubernetes/hack/make-rules/test-e2e-node.sh
generated
vendored
Executable file
175
vendor/k8s.io/kubernetes/hack/make-rules/test-e2e-node.sh
generated
vendored
Executable file
@ -0,0 +1,175 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
focus=${FOCUS:-""}
|
||||
skip=${SKIP-"\[Flaky\]|\[Slow\]|\[Serial\]"}
|
||||
# The number of tests that can run in parallel depends on what tests
|
||||
# are running and on the size of the node. Too many, and tests will
|
||||
# fail due to resource contention. 8 is a reasonable default for a
|
||||
# n1-standard-1 node.
|
||||
# Currently, parallelism only affects when REMOTE=true. For local test,
|
||||
# ginkgo default parallelism (cores - 1) is used.
|
||||
parallelism=${PARALLELISM:-8}
|
||||
artifacts=${ARTIFACTS:-"/tmp/_artifacts/`date +%y%m%dT%H%M%S`"}
|
||||
remote=${REMOTE:-"false"}
|
||||
runtime=${RUNTIME:-"docker"}
|
||||
container_runtime_endpoint=${CONTAINER_RUNTIME_ENDPOINT:-""}
|
||||
image_service_endpoint=${IMAGE_SERVICE_ENDPOINT:-""}
|
||||
run_until_failure=${RUN_UNTIL_FAILURE:-"false"}
|
||||
test_args=${TEST_ARGS:-""}
|
||||
system_spec_name=${SYSTEM_SPEC_NAME:-}
|
||||
|
||||
# Parse the flags to pass to ginkgo
|
||||
ginkgoflags=""
|
||||
if [[ $parallelism > 1 ]]; then
|
||||
ginkgoflags="$ginkgoflags -nodes=$parallelism "
|
||||
fi
|
||||
|
||||
if [[ $focus != "" ]]; then
|
||||
ginkgoflags="$ginkgoflags -focus=\"$focus\" "
|
||||
fi
|
||||
|
||||
if [[ $skip != "" ]]; then
|
||||
ginkgoflags="$ginkgoflags -skip=\"$skip\" "
|
||||
fi
|
||||
|
||||
if [[ $run_until_failure != "" ]]; then
|
||||
ginkgoflags="$ginkgoflags -untilItFails=$run_until_failure "
|
||||
fi
|
||||
|
||||
# Setup the directory to copy test artifacts (logs, junit.xml, etc) from remote host to local host
|
||||
if [ ! -d "${artifacts}" ]; then
|
||||
echo "Creating artifacts directory at ${artifacts}"
|
||||
mkdir -p ${artifacts}
|
||||
fi
|
||||
echo "Test artifacts will be written to ${artifacts}"
|
||||
|
||||
if [ $remote = true ] ; then
|
||||
# The following options are only valid in remote run.
|
||||
images=${IMAGES:-""}
|
||||
hosts=${HOSTS:-""}
|
||||
image_project=${IMAGE_PROJECT:-"kubernetes-node-e2e-images"}
|
||||
metadata=${INSTANCE_METADATA:-""}
|
||||
list_images=${LIST_IMAGES:-false}
|
||||
if [[ $list_images == "true" ]]; then
|
||||
gcloud compute images list --project="${image_project}" | grep "e2e-node"
|
||||
exit 0
|
||||
fi
|
||||
gubernator=${GUBERNATOR:-"false"}
|
||||
image_config_file=${IMAGE_CONFIG_FILE:-""}
|
||||
if [[ $hosts == "" && $images == "" && $image_config_file == "" ]]; then
|
||||
image_project=${IMAGE_PROJECT:-"cos-cloud"}
|
||||
gci_image=$(gcloud compute images list --project $image_project \
|
||||
--no-standard-images --filter="name ~ 'cos-beta.*'" --format="table[no-heading](name)")
|
||||
images=$gci_image
|
||||
metadata="user-data<${KUBE_ROOT}/test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
|
||||
fi
|
||||
instance_prefix=${INSTANCE_PREFIX:-"test"}
|
||||
cleanup=${CLEANUP:-"true"}
|
||||
delete_instances=${DELETE_INSTANCES:-"false"}
|
||||
|
||||
# Get the compute zone
|
||||
zone=$(gcloud info --format='value(config.properties.compute.zone)')
|
||||
if [[ $zone == "" ]]; then
|
||||
echo "Could not find gcloud compute/zone when running: \`gcloud info --format='value(config.properties.compute.zone)'\`"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the compute project
|
||||
project=$(gcloud info --format='value(config.project)')
|
||||
if [[ $project == "" ]]; then
|
||||
echo "Could not find gcloud project when running: \`gcloud info --format='value(config.project)'\`"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if any of the images specified already have running instances. If so reuse those instances
|
||||
# by moving the IMAGE to a HOST
|
||||
if [[ $images != "" ]]; then
|
||||
IFS=',' read -ra IM <<< "$images"
|
||||
images=""
|
||||
for i in "${IM[@]}"; do
|
||||
if [[ $(gcloud compute instances list "${instance_prefix}-$i" | grep $i) ]]; then
|
||||
if [[ $hosts != "" ]]; then
|
||||
hosts="$hosts,"
|
||||
fi
|
||||
echo "Reusing host ${instance_prefix}-$i"
|
||||
hosts="${hosts}${instance_prefix}-${i}"
|
||||
else
|
||||
if [[ $images != "" ]]; then
|
||||
images="$images,"
|
||||
fi
|
||||
images="$images$i"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Output the configuration we will try to run
|
||||
echo "Running tests remotely using"
|
||||
echo "Project: $project"
|
||||
echo "Image Project: $image_project"
|
||||
echo "Compute/Zone: $zone"
|
||||
echo "Images: $images"
|
||||
echo "Hosts: $hosts"
|
||||
echo "Ginkgo Flags: $ginkgoflags"
|
||||
echo "Instance Metadata: $metadata"
|
||||
echo "Image Config File: $image_config_file"
|
||||
# Invoke the runner
|
||||
go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 --ssh-env="gce" \
|
||||
--zone="$zone" --project="$project" --gubernator="$gubernator" \
|
||||
--hosts="$hosts" --images="$images" --cleanup="$cleanup" \
|
||||
--results-dir="$artifacts" --ginkgo-flags="$ginkgoflags" \
|
||||
--image-project="$image_project" --instance-name-prefix="$instance_prefix" \
|
||||
--delete-instances="$delete_instances" --test_args="$test_args" --instance-metadata="$metadata" \
|
||||
--image-config-file="$image_config_file" --system-spec-name="$system_spec_name" \
|
||||
2>&1 | tee -i "${artifacts}/build-log.txt"
|
||||
exit $?
|
||||
|
||||
else
|
||||
# Refresh sudo credentials for local run
|
||||
if ! ping -c 1 -q metadata.google.internal &> /dev/null; then
|
||||
echo "Updating sudo credentials"
|
||||
sudo -v || exit 1
|
||||
fi
|
||||
|
||||
# Do not use any network plugin by default. User could override the flags with
|
||||
# test_args.
|
||||
test_args='--kubelet-flags="--network-plugin= --cni-bin-dir=" '$test_args
|
||||
|
||||
# Runtime flags
|
||||
test_args='--kubelet-flags="--container-runtime='$runtime'" '$test_args
|
||||
if [[ $runtime == "remote" ]] ; then
|
||||
if [[ ! -z $container_runtime_endpoint ]] ; then
|
||||
test_args='--kubelet-flags="--container-runtime-endpoint='$container_runtime_endpoint'" '$test_args
|
||||
fi
|
||||
if [[ ! -z $image_service_endpoint ]] ; then
|
||||
test_args='--kubelet-flags="--image-service-endpoint='$image_service_endpoint'" '$test_args
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test using the host the script was run on
|
||||
# Provided for backwards compatibility
|
||||
go run test/e2e_node/runner/local/run_local.go \
|
||||
--system-spec-name="$system_spec_name" --ginkgo-flags="$ginkgoflags" \
|
||||
--test-flags="--container-runtime=${runtime} \
|
||||
--container-runtime-endpoint=${container_runtime_endpoint} \
|
||||
--image-service-endpoint=${image_service_endpoint} \
|
||||
--alsologtostderr --v 4 --report-dir=${artifacts} --node-name $(hostname) \
|
||||
$test_args" --build-dependencies=true 2>&1 | tee -i "${artifacts}/build-log.txt"
|
||||
exit $?
|
||||
fi
|
108
vendor/k8s.io/kubernetes/hack/make-rules/test-integration.sh
generated
vendored
Executable file
108
vendor/k8s.io/kubernetes/hack/make-rules/test-integration.sh
generated
vendored
Executable file
@ -0,0 +1,108 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
# Lists of API Versions of each groups that should be tested, groups are
|
||||
# separated by comma, lists are separated by semicolon. e.g.,
|
||||
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
|
||||
# TODO: It's going to be:
|
||||
# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1"}
|
||||
# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go)
|
||||
# ONLY the last version is tested in each group.
|
||||
ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$)
|
||||
KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
|
||||
|
||||
# Give integration tests longer to run
|
||||
# TODO: allow a larger value to be passed in
|
||||
#KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 240s}
|
||||
KUBE_TIMEOUT="-timeout 600s"
|
||||
KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"}
|
||||
LOG_LEVEL=${LOG_LEVEL:-2}
|
||||
KUBE_TEST_ARGS=${KUBE_TEST_ARGS:-}
|
||||
# Default glog module settings.
|
||||
KUBE_TEST_VMODULE=${KUBE_TEST_VMODULE:-"garbagecollector*=6,graph_builder*=6"}
|
||||
|
||||
kube::test::find_integration_test_dirs() {
|
||||
(
|
||||
cd ${KUBE_ROOT}
|
||||
find test/integration/ -name '*_test.go' -print0 \
|
||||
| xargs -0n1 dirname | sed "s|^|${KUBE_GO_PACKAGE}/|" \
|
||||
| LC_ALL=C sort -u
|
||||
find vendor/k8s.io/apiextensions-apiserver/test/integration/ -name '*_test.go' -print0 \
|
||||
| xargs -0n1 dirname | sed "s|^|${KUBE_GO_PACKAGE}/|" \
|
||||
| LC_ALL=C sort -u
|
||||
)
|
||||
}
|
||||
|
||||
CLEANUP_REQUIRED=
|
||||
cleanup() {
|
||||
if [[ -z "${CLEANUP_REQUIRED}" ]]; then
|
||||
return
|
||||
fi
|
||||
kube::log::status "Cleaning up etcd"
|
||||
kube::etcd::cleanup
|
||||
CLEANUP_REQUIRED=
|
||||
kube::log::status "Integration test cleanup complete"
|
||||
}
|
||||
|
||||
runTests() {
|
||||
kube::log::status "Starting etcd instance"
|
||||
CLEANUP_REQUIRED=1
|
||||
kube::etcd::start
|
||||
kube::log::status "Running integration test cases"
|
||||
|
||||
KUBE_RACE="-race"
|
||||
make -C "${KUBE_ROOT}" test \
|
||||
WHAT="${WHAT:-$(kube::test::find_integration_test_dirs | paste -sd' ' -)}" \
|
||||
GOFLAGS="${GOFLAGS:-}" \
|
||||
KUBE_TEST_ARGS="${KUBE_TEST_ARGS:-} ${SHORT:--short=true} --vmodule=${KUBE_TEST_VMODULE} --alsologtostderr=true" \
|
||||
KUBE_RACE="" \
|
||||
KUBE_TIMEOUT="${KUBE_TIMEOUT}" \
|
||||
KUBE_TEST_API_VERSIONS="$1"
|
||||
|
||||
cleanup
|
||||
}
|
||||
|
||||
checkEtcdOnPath() {
|
||||
kube::log::status "Checking etcd is on PATH"
|
||||
which etcd && return
|
||||
kube::log::status "Cannot find etcd, cannot run integration tests."
|
||||
kube::log::status "Please see https://github.com/kubernetes/community/blob/master/contributors/devel/testing.md#install-etcd-dependency for instructions."
|
||||
kube::log::usage "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
|
||||
return 1
|
||||
}
|
||||
|
||||
checkEtcdOnPath
|
||||
|
||||
# Run cleanup to stop etcd on interrupt or other kill signal.
|
||||
trap cleanup EXIT
|
||||
|
||||
# If a test case is specified, just run once with v1 API version and exit
|
||||
if [[ -n "${KUBE_TEST_ARGS}" ]]; then
|
||||
runTests v1
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Convert the CSV to an array of API versions to test
|
||||
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
|
||||
for apiVersion in "${apiVersions[@]}"; do
|
||||
runTests "${apiVersion}"
|
||||
done
|
32
vendor/k8s.io/kubernetes/hack/make-rules/test-kubeadm-cmd.sh
generated
vendored
Executable file
32
vendor/k8s.io/kubernetes/hack/make-rules/test-kubeadm-cmd.sh
generated
vendored
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
KUBEADM_PATH="${KUBEADM_PATH:=$(kube::realpath "${KUBE_ROOT}")/cluster/kubeadm.sh}"
|
||||
|
||||
# If testing a different version of kubeadm than the current build, you can
|
||||
# comment this out to save yourself from needlessly building here.
|
||||
make -C "${KUBE_ROOT}" WHAT=cmd/kubeadm
|
||||
|
||||
make -C "${KUBE_ROOT}" test \
|
||||
WHAT=k8s.io/kubernetes/cmd/kubeadm/test/cmd \
|
||||
KUBE_TEST_ARGS="--kubeadm-path '${KUBEADM_PATH}'"
|
407
vendor/k8s.io/kubernetes/hack/make-rules/test.sh
generated
vendored
Executable file
407
vendor/k8s.io/kubernetes/hack/make-rules/test.sh
generated
vendored
Executable file
@ -0,0 +1,407 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::setup_env
|
||||
|
||||
# start the cache mutation detector by default so that cache mutators will be found
|
||||
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
||||
export KUBE_CACHE_MUTATION_DETECTOR
|
||||
|
||||
# panic the server on watch decode errors since they are considered coder mistakes
|
||||
KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
|
||||
export KUBE_PANIC_WATCH_DECODE_ERROR
|
||||
|
||||
# Handle case where OS has sha#sum commands, instead of shasum.
|
||||
if which shasum >/dev/null 2>&1; then
|
||||
SHA1SUM="shasum -a1"
|
||||
elif which sha1sum >/dev/null 2>&1; then
|
||||
SHA1SUM="sha1sum"
|
||||
else
|
||||
echo "Failed to find shasum or sha1sum utility." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kube::test::find_dirs() {
|
||||
(
|
||||
cd ${KUBE_ROOT}
|
||||
find -L . -not \( \
|
||||
\( \
|
||||
-path './_artifacts/*' \
|
||||
-o -path './bazel-*/*' \
|
||||
-o -path './_output/*' \
|
||||
-o -path './_gopath/*' \
|
||||
-o -path './cmd/kubeadm/test/*' \
|
||||
-o -path './contrib/podex/*' \
|
||||
-o -path './output/*' \
|
||||
-o -path './release/*' \
|
||||
-o -path './target/*' \
|
||||
-o -path './test/e2e/*' \
|
||||
-o -path './test/e2e_node/*' \
|
||||
-o -path './test/integration/*' \
|
||||
-o -path './third_party/*' \
|
||||
-o -path './staging/*' \
|
||||
-o -path './vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
|
||||
|
||||
find -L . \
|
||||
-path './_output' -prune \
|
||||
-o -path './vendor/k8s.io/client-go/*' \
|
||||
-o -path './vendor/k8s.io/apiserver/*' \
|
||||
-o -path './test/e2e_node/system/*' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
|
||||
|
||||
# run tests for client-go
|
||||
find ./staging/src/k8s.io/client-go -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
# run tests for apiserver
|
||||
find ./staging/src/k8s.io/apiserver -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
# run tests for apimachinery
|
||||
find ./staging/src/k8s.io/apimachinery -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
find ./staging/src/k8s.io/kube-aggregator -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
find ./staging/src/k8s.io/apiextensions-apiserver -not \( \
|
||||
\( \
|
||||
-path '*/test/integration/*' \
|
||||
\) -prune \
|
||||
\) -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
find ./staging/src/k8s.io/sample-apiserver -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
)
|
||||
}
|
||||
|
||||
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
|
||||
KUBE_COVER=${KUBE_COVER:-n} # set to 'y' to enable coverage collection
|
||||
KUBE_COVERMODE=${KUBE_COVERMODE:-atomic}
|
||||
# How many 'go test' instances to run simultaneously when running tests in
|
||||
# coverage mode.
|
||||
KUBE_COVERPROCS=${KUBE_COVERPROCS:-4}
|
||||
KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing
|
||||
# Set to the goveralls binary path to report coverage results to Coveralls.io.
|
||||
KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-}
|
||||
# Lists of API Versions of each groups that should be tested, groups are
|
||||
# separated by comma, lists are separated by semicolon. e.g.,
|
||||
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
|
||||
# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go)
|
||||
# ONLY the last version is tested in each group.
|
||||
ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$)
|
||||
KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
|
||||
# once we have multiple group supports
|
||||
# Create a junit-style XML test report in this directory if set.
|
||||
KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-}
|
||||
# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
|
||||
# set.
|
||||
KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
|
||||
|
||||
kube::test::usage() {
|
||||
kube::log::usage_from_stdin <<EOF
|
||||
usage: $0 [OPTIONS] [TARGETS]
|
||||
|
||||
OPTIONS:
|
||||
-p <number> : number of parallel workers, must be >= 1
|
||||
EOF
|
||||
}
|
||||
|
||||
isnum() {
|
||||
[[ "$1" =~ ^[0-9]+$ ]]
|
||||
}
|
||||
|
||||
PARALLEL="${PARALLEL:-1}"
|
||||
while getopts "hp:i:" opt ; do
|
||||
case $opt in
|
||||
h)
|
||||
kube::test::usage
|
||||
exit 0
|
||||
;;
|
||||
p)
|
||||
PARALLEL="$OPTARG"
|
||||
if ! isnum "${PARALLEL}" || [[ "${PARALLEL}" -le 0 ]]; then
|
||||
kube::log::usage "'$0': argument to -p must be numeric and greater than 0"
|
||||
kube::test::usage
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
i)
|
||||
kube::log::usage "'$0': use GOFLAGS='-count <num-iterations>'"
|
||||
kube::test::usage
|
||||
exit 1
|
||||
;;
|
||||
?)
|
||||
kube::test::usage
|
||||
exit 1
|
||||
;;
|
||||
:)
|
||||
kube::log::usage "Option -$OPTARG <value>"
|
||||
kube::test::usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
# Use eval to preserve embedded quoted strings.
|
||||
eval "goflags=(${GOFLAGS:-})"
|
||||
eval "testargs=(${KUBE_TEST_ARGS:-})"
|
||||
|
||||
# Used to filter verbose test output.
|
||||
go_test_grep_pattern=".*"
|
||||
|
||||
# The go-junit-report tool needs full test case information to produce a
|
||||
# meaningful report.
|
||||
if [[ -n "${KUBE_JUNIT_REPORT_DIR}" ]] ; then
|
||||
goflags+=(-v)
|
||||
# Show only summary lines by matching lines like "status package/test"
|
||||
go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+"
|
||||
fi
|
||||
|
||||
# Filter out arguments that start with "-" and move them to goflags.
|
||||
testcases=()
|
||||
for arg; do
|
||||
if [[ "${arg}" == -* ]]; then
|
||||
goflags+=("${arg}")
|
||||
else
|
||||
testcases+=("${arg}")
|
||||
fi
|
||||
done
|
||||
if [[ ${#testcases[@]} -eq 0 ]]; then
|
||||
testcases=($(kube::test::find_dirs))
|
||||
fi
|
||||
set -- "${testcases[@]+${testcases[@]}}"
|
||||
|
||||
junitFilenamePrefix() {
|
||||
if [[ -z "${KUBE_JUNIT_REPORT_DIR}" ]]; then
|
||||
echo ""
|
||||
return
|
||||
fi
|
||||
mkdir -p "${KUBE_JUNIT_REPORT_DIR}"
|
||||
# This filename isn't parsed by anything, and we must avoid
|
||||
# exceeding 255 character filename limit. KUBE_TEST_API
|
||||
# barely fits there and in coverage mode test names are
|
||||
# appended to generated file names, easily exceeding
|
||||
# 255 chars in length. So let's just use a sha1 hash of it.
|
||||
local KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
|
||||
echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_HASH}_$(kube::util::sortable_date)"
|
||||
}
|
||||
|
||||
verifyAndSuggestPackagePath() {
|
||||
local specified_package_path="$1"
|
||||
local alternative_package_path="$2"
|
||||
local original_package_path="$3"
|
||||
local suggestion_package_path="$4"
|
||||
|
||||
if ! [ -d "$specified_package_path" ]; then
|
||||
# Because k8s sets a localized $GOPATH for testing, seeing the actual
|
||||
# directory can be confusing. Instead, just show $GOPATH if it exists in the
|
||||
# $specified_package_path.
|
||||
local printable_package_path=$(echo "$specified_package_path" | sed "s|$GOPATH|\$GOPATH|")
|
||||
kube::log::error "specified test path '$printable_package_path' does not exist"
|
||||
|
||||
if [ -d "$alternative_package_path" ]; then
|
||||
kube::log::info "try changing \"$original_package_path\" to \"$suggestion_package_path\""
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
verifyPathsToPackagesUnderTest() {
|
||||
local packages_under_test=($@)
|
||||
|
||||
for package_path in "${packages_under_test[@]}"; do
|
||||
local local_package_path="$package_path"
|
||||
local go_package_path="$GOPATH/src/$package_path"
|
||||
|
||||
if [[ "${package_path:0:2}" == "./" ]] ; then
|
||||
verifyAndSuggestPackagePath "$local_package_path" "$go_package_path" "$package_path" "${package_path:2}"
|
||||
else
|
||||
verifyAndSuggestPackagePath "$go_package_path" "$local_package_path" "$package_path" "./$package_path"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
produceJUnitXMLReport() {
|
||||
local -r junit_filename_prefix=$1
|
||||
if [[ -z "${junit_filename_prefix}" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local test_stdout_filenames
|
||||
local junit_xml_filename
|
||||
test_stdout_filenames=$(ls ${junit_filename_prefix}*.stdout)
|
||||
junit_xml_filename="${junit_filename_prefix}.xml"
|
||||
if ! command -v go-junit-report >/dev/null 2>&1; then
|
||||
kube::log::error "go-junit-report not found; please install with " \
|
||||
"go get -u github.com/jstemmer/go-junit-report"
|
||||
return
|
||||
fi
|
||||
cat ${test_stdout_filenames} | go-junit-report > "${junit_xml_filename}"
|
||||
if [[ ! ${KUBE_KEEP_VERBOSE_TEST_OUTPUT} =~ ^[yY]$ ]]; then
|
||||
rm ${test_stdout_filenames}
|
||||
fi
|
||||
kube::log::status "Saved JUnit XML test report to ${junit_xml_filename}"
|
||||
}
|
||||
|
||||
runTests() {
|
||||
local junit_filename_prefix
|
||||
junit_filename_prefix=$(junitFilenamePrefix)
|
||||
|
||||
verifyPathsToPackagesUnderTest "$@"
|
||||
|
||||
# If we're not collecting coverage, run all requested tests with one 'go test'
|
||||
# command, which is much faster.
|
||||
if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then
|
||||
kube::log::status "Running tests without code coverage"
|
||||
# `go test` does not install the things it builds. `go test -i` installs
|
||||
# the build artifacts but doesn't run the tests. The two together provide
|
||||
# a large speedup for tests that do not need to be rebuilt.
|
||||
go test -i "${goflags[@]:+${goflags[@]}}" \
|
||||
${KUBE_RACE} ${KUBE_TIMEOUT} "${@}" \
|
||||
"${testargs[@]:+${testargs[@]}}"
|
||||
go test "${goflags[@]:+${goflags[@]}}" \
|
||||
${KUBE_RACE} ${KUBE_TIMEOUT} "${@}" \
|
||||
"${testargs[@]:+${testargs[@]}}" \
|
||||
| tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} \
|
||||
| grep --binary-files=text "${go_test_grep_pattern}" && rc=$? || rc=$?
|
||||
produceJUnitXMLReport "${junit_filename_prefix}"
|
||||
return ${rc}
|
||||
fi
|
||||
|
||||
# Create coverage report directories.
|
||||
KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
|
||||
cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API_HASH}/$(kube::util::sortable_date)"
|
||||
cover_profile="coverage.out" # Name for each individual coverage profile
|
||||
kube::log::status "Saving coverage output in '${cover_report_dir}'"
|
||||
mkdir -p "${@+${@/#/${cover_report_dir}/}}"
|
||||
|
||||
# Run all specified tests, collecting coverage results. Go currently doesn't
|
||||
# support collecting coverage across multiple packages at once, so we must issue
|
||||
# separate 'go test' commands for each package and then combine at the end.
|
||||
# To speed things up considerably, we can at least use xargs -P to run multiple
|
||||
# 'go test' commands at once.
|
||||
# To properly parse the test results if generating a JUnit test report, we
|
||||
# must make sure the output from PARALLEL runs is not mixed. To achieve this,
|
||||
# we spawn a subshell for each PARALLEL process, redirecting the output to
|
||||
# separate files.
|
||||
|
||||
# ignore paths:
|
||||
# vendor/k8s.io/code-generator/cmd/generator: is fragile when run under coverage, so ignore it for now.
|
||||
# https://github.com/kubernetes/kubernetes/issues/24967
|
||||
# vendor/k8s.io/client-go/1.4/rest: causes cover internal errors
|
||||
# https://github.com/golang/go/issues/16540
|
||||
cover_ignore_dirs="vendor/k8s.io/code-generator/cmd/generator|vendor/k8s.io/client-go/1.4/rest"
|
||||
for path in $(echo $cover_ignore_dirs | sed 's/|/ /g'); do
|
||||
echo -e "skipped\tk8s.io/kubernetes/$path"
|
||||
done
|
||||
#
|
||||
# `go test` does not install the things it builds. `go test -i` installs
|
||||
# the build artifacts but doesn't run the tests. The two together provide
|
||||
# a large speedup for tests that do not need to be rebuilt.
|
||||
printf "%s\n" "${@}" \
|
||||
| grep -Ev $cover_ignore_dirs \
|
||||
| xargs -I{} -n 1 -P ${KUBE_COVERPROCS} \
|
||||
bash -c "set -o pipefail; _pkg=\"\$0\"; _pkg_out=\${_pkg//\//_}; \
|
||||
go test -i ${goflags[@]:+${goflags[@]}} \
|
||||
${KUBE_RACE} \
|
||||
${KUBE_TIMEOUT} \
|
||||
-cover -covermode=\"${KUBE_COVERMODE}\" \
|
||||
-coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \
|
||||
\"\${_pkg}\" \
|
||||
${testargs[@]:+${testargs[@]}}
|
||||
go test ${goflags[@]:+${goflags[@]}} \
|
||||
${KUBE_RACE} \
|
||||
${KUBE_TIMEOUT} \
|
||||
-cover -covermode=\"${KUBE_COVERMODE}\" \
|
||||
-coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \
|
||||
\"\${_pkg}\" \
|
||||
${testargs[@]:+${testargs[@]}} \
|
||||
| tee ${junit_filename_prefix:+\"${junit_filename_prefix}-\$_pkg_out.stdout\"} \
|
||||
| grep \"${go_test_grep_pattern}\"" \
|
||||
{} \
|
||||
&& test_result=$? || test_result=$?
|
||||
|
||||
produceJUnitXMLReport "${junit_filename_prefix}"
|
||||
|
||||
COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out"
|
||||
{
|
||||
# The combined coverage profile needs to start with a line indicating which
|
||||
# coverage mode was used (set, count, or atomic). This line is included in
|
||||
# each of the coverage profiles generated when running 'go test -cover', but
|
||||
# we strip these lines out when combining so that there's only one.
|
||||
echo "mode: ${KUBE_COVERMODE}"
|
||||
|
||||
# Include all coverage reach data in the combined profile, but exclude the
|
||||
# 'mode' lines, as there should be only one.
|
||||
for x in `find "${cover_report_dir}" -name "${cover_profile}"`; do
|
||||
cat $x | grep -h -v "^mode:" || true
|
||||
done
|
||||
} >"${COMBINED_COVER_PROFILE}"
|
||||
|
||||
coverage_html_file="${cover_report_dir}/combined-coverage.html"
|
||||
go tool cover -html="${COMBINED_COVER_PROFILE}" -o="${coverage_html_file}"
|
||||
kube::log::status "Combined coverage report: ${coverage_html_file}"
|
||||
|
||||
return ${test_result}
|
||||
}
|
||||
|
||||
reportCoverageToCoveralls() {
|
||||
if [[ ${KUBE_COVER} =~ ^[yY]$ ]] && [[ -x "${KUBE_GOVERALLS_BIN}" ]]; then
|
||||
kube::log::status "Reporting coverage results to Coveralls for service ${CI_NAME:-}"
|
||||
${KUBE_GOVERALLS_BIN} -coverprofile="${COMBINED_COVER_PROFILE}" \
|
||||
${CI_NAME:+"-service=${CI_NAME}"} \
|
||||
${COVERALLS_REPO_TOKEN:+"-repotoken=${COVERALLS_REPO_TOKEN}"} \
|
||||
|| true
|
||||
fi
|
||||
}
|
||||
|
||||
checkFDs() {
|
||||
# several unittests panic when httptest cannot open more sockets
|
||||
# due to the low default files limit on OS X. Warn about low limit.
|
||||
local fileslimit="$(ulimit -n)"
|
||||
if [[ $fileslimit -lt 1000 ]]; then
|
||||
echo "WARNING: ulimit -n (files) should be at least 1000, is $fileslimit, may cause test failure";
|
||||
fi
|
||||
}
|
||||
|
||||
checkFDs
|
||||
|
||||
|
||||
# Convert the CSVs to arrays.
|
||||
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
|
||||
apiVersionsCount=${#apiVersions[@]}
|
||||
for (( i=0; i<${apiVersionsCount}; i++ )); do
|
||||
apiVersion=${apiVersions[i]}
|
||||
echo "Running tests for APIVersion: $apiVersion"
|
||||
# KUBE_TEST_API sets the version of each group to be tested.
|
||||
KUBE_TEST_API="${apiVersion}" runTests "$@"
|
||||
done
|
||||
|
||||
# We might run the tests for multiple versions, but we want to report only
|
||||
# one of them to coveralls. Here we report coverage from the last run.
|
||||
reportCoverageToCoveralls
|
154
vendor/k8s.io/kubernetes/hack/make-rules/verify.sh
generated
vendored
Executable file
154
vendor/k8s.io/kubernetes/hack/make-rules/verify.sh
generated
vendored
Executable file
@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
# Excluded check patterns are always skipped.
|
||||
EXCLUDED_PATTERNS=(
|
||||
"verify-all.sh" # this script calls the make rule and would cause a loop
|
||||
"verify-linkcheck.sh" # runs in separate Jenkins job once per day due to high network usage
|
||||
"verify-test-owners.sh" # TODO(rmmh): figure out how to avoid endless conflicts
|
||||
"verify-*-dockerized.sh" # Don't run any scripts that intended to be run dockerized
|
||||
)
|
||||
|
||||
# Only run whitelisted fast checks in quick mode.
|
||||
# These run in <10s each on enisoc's workstation, assuming that
|
||||
# `make` and `hack/godep-restore.sh` had already been run.
|
||||
QUICK_PATTERNS+=(
|
||||
"verify-api-groups.sh"
|
||||
"verify-bazel.sh"
|
||||
"verify-boilerplate.sh"
|
||||
"verify-generated-files-remake"
|
||||
"verify-godep-licenses.sh"
|
||||
"verify-gofmt.sh"
|
||||
"verify-imports.sh"
|
||||
"verify-pkg-names.sh"
|
||||
"verify-readonly-packages.sh"
|
||||
"verify-staging-client-go.sh"
|
||||
"verify-test-images.sh"
|
||||
"verify-test-owners.sh"
|
||||
)
|
||||
|
||||
EXCLUDED_CHECKS=$(ls ${EXCLUDED_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true)
|
||||
QUICK_CHECKS=$(ls ${QUICK_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true)
|
||||
|
||||
function is-excluded {
|
||||
for e in ${EXCLUDED_CHECKS[@]}; do
|
||||
if [[ $1 -ef "$e" ]]; then
|
||||
return
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
function is-quick {
|
||||
for e in ${QUICK_CHECKS[@]}; do
|
||||
if [[ $1 -ef "$e" ]]; then
|
||||
return
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
function run-cmd {
|
||||
if ${SILENT}; then
|
||||
"$@" &> /dev/null
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Collect Failed tests in this Array , initalize it to nil
|
||||
FAILED_TESTS=()
|
||||
|
||||
function print-failed-tests {
|
||||
echo -e "========================"
|
||||
echo -e "${color_red}FAILED TESTS${color_norm}"
|
||||
echo -e "========================"
|
||||
for t in ${FAILED_TESTS[@]}; do
|
||||
echo -e "${color_red}${t}${color_norm}"
|
||||
done
|
||||
}
|
||||
|
||||
function run-checks {
|
||||
local -r pattern=$1
|
||||
local -r runner=$2
|
||||
|
||||
for t in $(ls ${pattern})
|
||||
do
|
||||
if is-excluded "${t}" ; then
|
||||
echo "Skipping ${t}"
|
||||
continue
|
||||
fi
|
||||
if ${QUICK} && ! is-quick "${t}" ; then
|
||||
echo "Skipping ${t} in quick mode"
|
||||
continue
|
||||
fi
|
||||
echo -e "Verifying ${t}"
|
||||
local start=$(date +%s)
|
||||
run-cmd "${runner}" "${t}" && tr=$? || tr=$?
|
||||
local elapsed=$(($(date +%s) - ${start}))
|
||||
if [[ ${tr} -eq 0 ]]; then
|
||||
echo -e "${color_green}SUCCESS${color_norm} ${t}\t${elapsed}s"
|
||||
else
|
||||
echo -e "${color_red}FAILED${color_norm} ${t}\t${elapsed}s"
|
||||
ret=1
|
||||
FAILED_TESTS+=(${t})
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
SILENT=true
|
||||
QUICK=false
|
||||
|
||||
while getopts ":vQ" opt; do
|
||||
case ${opt} in
|
||||
v)
|
||||
SILENT=false
|
||||
;;
|
||||
Q)
|
||||
QUICK=true
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid flag: -${OPTARG}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if ${SILENT} ; then
|
||||
echo "Running in silent mode, run with -v if you want to see script logs."
|
||||
fi
|
||||
|
||||
if ${QUICK} ; then
|
||||
echo "Running in quick mode (-Q flag). Only fast checks will run."
|
||||
fi
|
||||
|
||||
ret=0
|
||||
run-checks "${KUBE_ROOT}/hack/verify-*.sh" bash
|
||||
run-checks "${KUBE_ROOT}/hack/verify-*.py" python
|
||||
|
||||
if [[ ${ret} -eq 1 ]]; then
|
||||
print-failed-tests
|
||||
fi
|
||||
exit ${ret}
|
||||
|
||||
# ex: ts=2 sw=2 et filetype=sh
|
56
vendor/k8s.io/kubernetes/hack/make-rules/vet.sh
generated
vendored
Executable file
56
vendor/k8s.io/kubernetes/hack/make-rules/vet.sh
generated
vendored
Executable file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
cd "${KUBE_ROOT}"
|
||||
|
||||
# If called directly, exit.
|
||||
if [[ "${CALLED_FROM_MAIN_MAKEFILE:-""}" == "" ]]; then
|
||||
echo "ERROR: $0 should not be run directly." >&2
|
||||
echo >&2
|
||||
echo "Please run this command using \"make vet\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# This is required before we run govet for the results to be correct.
|
||||
# See https://github.com/golang/go/issues/16086 for details.
|
||||
go install ./cmd/...
|
||||
|
||||
# Use eval to preserve embedded quoted strings.
|
||||
eval "goflags=(${GOFLAGS:-})"
|
||||
|
||||
# Filter out arguments that start with "-" and move them to goflags.
|
||||
targets=()
|
||||
for arg; do
|
||||
if [[ "${arg}" == -* ]]; then
|
||||
goflags+=("${arg}")
|
||||
else
|
||||
targets+=("${arg}")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#targets[@]} -eq 0 ]]; then
|
||||
# Do not run on third_party directories or generated client code.
|
||||
targets=$(go list -e ./... | egrep -v "/(third_party|vendor|staging|clientset_generated)/")
|
||||
fi
|
||||
|
||||
go vet "${goflags[@]:+${goflags[@]}}" ${targets[@]}
|
47
vendor/k8s.io/kubernetes/hack/print-workspace-status.sh
generated
vendored
Executable file
47
vendor/k8s.io/kubernetes/hack/print-workspace-status.sh
generated
vendored
Executable file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This command is used by bazel as the workspace_status_command
|
||||
# to implement build stamping with git information.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
export KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/version.sh"
|
||||
kube::version::get_version_vars
|
||||
|
||||
# Prefix with STABLE_ so that these values are saved to stable-status.txt
|
||||
# instead of volatile-status.txt.
|
||||
# Stamped rules will be retriggered by changes to stable-status.txt, but not by
|
||||
# changes to volatile-status.txt.
|
||||
# IMPORTANT: the camelCase vars should match the lists in hack/lib/version.sh
|
||||
# and pkg/version/def.bzl.
|
||||
cat <<EOF
|
||||
STABLE_BUILD_GIT_COMMIT ${KUBE_GIT_COMMIT-}
|
||||
STABLE_BUILD_SCM_STATUS ${KUBE_GIT_TREE_STATE-}
|
||||
STABLE_BUILD_SCM_REVISION ${KUBE_GIT_VERSION-}
|
||||
STABLE_BUILD_MAJOR_VERSION ${KUBE_GIT_MAJOR-}
|
||||
STABLE_BUILD_MINOR_VERSION ${KUBE_GIT_MINOR-}
|
||||
STABLE_DOCKER_TAG ${KUBE_GIT_VERSION/+/_}
|
||||
gitCommit ${KUBE_GIT_COMMIT-}
|
||||
gitTreeState ${KUBE_GIT_TREE_STATE-}
|
||||
gitVersion ${KUBE_GIT_VERSION-}
|
||||
gitMajor ${KUBE_GIT_MAJOR-}
|
||||
gitMinor ${KUBE_GIT_MINOR-}
|
||||
buildDate $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
EOF
|
33
vendor/k8s.io/kubernetes/hack/run-in-gopath.sh
generated
vendored
Executable file
33
vendor/k8s.io/kubernetes/hack/run-in-gopath.sh
generated
vendored
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script sets up a temporary Kubernetes GOPATH and runs an arbitrary
|
||||
# command under it. Go tooling requires that the current directory be under
|
||||
# GOPATH or else it fails to find some things, such as the vendor directory for
|
||||
# the project.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# This sets up a clean GOPATH and makes sure we are currently in it.
|
||||
kube::golang::setup_env
|
||||
|
||||
# Run the user-provided command.
|
||||
"${@}"
|
37
vendor/k8s.io/kubernetes/hack/test-cmd.sh
generated
vendored
Executable file
37
vendor/k8s.io/kubernetes/hack/test-cmd.sh
generated
vendored
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is a vestigial redirection. Please do not add "real" logic.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# For help output
|
||||
ARGHELP=""
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
ARGHELP=""
|
||||
fi
|
||||
|
||||
echo "NOTE: $0 has been replaced by 'make test-cmd'"
|
||||
echo
|
||||
echo "The equivalent of this invocation is: "
|
||||
echo " make test-cmd ${ARGHELP}"
|
||||
echo
|
||||
echo
|
||||
make --no-print-directory -C "${KUBE_ROOT}" test-cmd
|
37
vendor/k8s.io/kubernetes/hack/test-go.sh
generated
vendored
Executable file
37
vendor/k8s.io/kubernetes/hack/test-go.sh
generated
vendored
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is a vestigial redirection. Please do not add "real" logic.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# For help output
|
||||
ARGHELP=""
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
ARGHELP="WHAT='$@'"
|
||||
fi
|
||||
|
||||
echo "NOTE: $0 has been replaced by 'make test'"
|
||||
echo
|
||||
echo "The equivalent of this invocation is: "
|
||||
echo " make test ${ARGHELP}"
|
||||
echo
|
||||
echo
|
||||
make --no-print-directory -C "${KUBE_ROOT}" test WHAT="$*"
|
33
vendor/k8s.io/kubernetes/hack/test-integration.sh
generated
vendored
Executable file
33
vendor/k8s.io/kubernetes/hack/test-integration.sh
generated
vendored
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is a vestigial redirection. Please do not add "real" logic.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
echo "$0 has been replaced by 'make test-integration'"
|
||||
echo
|
||||
echo "The following invocation will run all integration tests: "
|
||||
echo ' make test-integration'
|
||||
echo
|
||||
echo "The following invocation will run a specific test with the verbose flag set: "
|
||||
echo ' make test-integration WHAT=./test/integration/pods GOFLAGS="-v" KUBE_TEST_ARGS="-run ^TestPodUpdateActiveDeadlineSeconds$"'
|
||||
echo
|
||||
exit 1
|
214
vendor/k8s.io/kubernetes/hack/test-update-storage-objects.sh
generated
vendored
Executable file
214
vendor/k8s.io/kubernetes/hack/test-update-storage-objects.sh
generated
vendored
Executable file
@ -0,0 +1,214 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Script to test cluster/update-storage-objects.sh works as expected.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# The api version in which objects are currently stored in etcd.
|
||||
KUBE_OLD_API_VERSION=${KUBE_OLD_API_VERSION:-"v1"}
|
||||
# The api version in which our etcd objects should be converted to.
|
||||
# The new api version
|
||||
KUBE_NEW_API_VERSION=${KUBE_NEW_API_VERSION:-"v1"}
|
||||
|
||||
KUBE_OLD_STORAGE_VERSIONS=${KUBE_OLD_STORAGE_VERSIONS:-""}
|
||||
KUBE_NEW_STORAGE_VERSIONS=${KUBE_NEW_STORAGE_VERSIONS:-""}
|
||||
|
||||
KUBE_STORAGE_MEDIA_TYPE_JSON="application/json"
|
||||
KUBE_STORAGE_MEDIA_TYPE_PROTOBUF="application/vnd.kubernetes.protobuf"
|
||||
|
||||
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
|
||||
ETCD_PORT=${ETCD_PORT:-2379}
|
||||
ETCD_PREFIX=${ETCD_PREFIX:-randomPrefix}
|
||||
API_PORT=${API_PORT:-8080}
|
||||
API_HOST=${API_HOST:-127.0.0.1}
|
||||
KUBE_API_VERSIONS=""
|
||||
RUNTIME_CONFIG=""
|
||||
|
||||
ETCDCTL=$(which etcdctl)
|
||||
KUBECTL="${KUBE_OUTPUT_HOSTBIN}/kubectl"
|
||||
UPDATE_ETCD_OBJECTS_SCRIPT="${KUBE_ROOT}/cluster/update-storage-objects.sh"
|
||||
|
||||
function startApiServer() {
|
||||
local storage_versions=${1:-""}
|
||||
local storage_media_type=${2:-""}
|
||||
kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS}"
|
||||
kube::log::status " and storage-media-type: ${storage_media_type}"
|
||||
kube::log::status " and runtime-config: ${RUNTIME_CONFIG}"
|
||||
kube::log::status " and storage-version overrides: ${storage_versions}"
|
||||
|
||||
KUBE_API_VERSIONS="${KUBE_API_VERSIONS}" \
|
||||
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
|
||||
--insecure-bind-address="${API_HOST}" \
|
||||
--bind-address="${API_HOST}" \
|
||||
--insecure-port="${API_PORT}" \
|
||||
--storage-backend="etcd3" \
|
||||
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
|
||||
--etcd-prefix="/${ETCD_PREFIX}" \
|
||||
--runtime-config="${RUNTIME_CONFIG}" \
|
||||
--cert-dir="${TMPDIR:-/tmp/}" \
|
||||
--service-cluster-ip-range="10.0.0.0/24" \
|
||||
--storage-versions="${storage_versions}" \
|
||||
--storage-media-type=${storage_media_type} 1>&2 &
|
||||
APISERVER_PID=$!
|
||||
|
||||
# url, prefix, wait, times
|
||||
kube::util::wait_for_url "http://${API_HOST}:${API_PORT}/healthz" "apiserver: " 1 120
|
||||
}
|
||||
|
||||
function killApiServer() {
|
||||
kube::log::status "Killing api server"
|
||||
if [[ -n ${APISERVER_PID-} ]]; then
|
||||
kill ${APISERVER_PID} 1>&2 2>/dev/null
|
||||
wait ${APISERVER_PID} || true
|
||||
kube::log::status "api server exited"
|
||||
fi
|
||||
unset APISERVER_PID
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
killApiServer
|
||||
|
||||
kube::etcd::cleanup
|
||||
|
||||
kube::log::status "Clean up complete"
|
||||
}
|
||||
|
||||
trap cleanup EXIT SIGINT
|
||||
|
||||
make -C "${KUBE_ROOT}" WHAT=cmd/kube-apiserver
|
||||
make -C "${KUBE_ROOT}" WHAT=cluster/images/etcd/attachlease
|
||||
|
||||
kube::etcd::start
|
||||
echo "${ETCD_VERSION}" > "${ETCD_DIR}/version.txt"
|
||||
|
||||
### BEGIN TEST DEFINITION CUSTOMIZATION ###
|
||||
|
||||
# source_file,resource,namespace,name,old_version,new_version
|
||||
tests=(
|
||||
examples/persistent-volume-provisioning/rbd/rbd-storage-class.yaml,storageclasses,,slow,v1beta1,v1
|
||||
)
|
||||
|
||||
KUBE_OLD_API_VERSION="networking.k8s.io/v1,storage.k8s.io/v1beta1,extensions/v1beta1"
|
||||
KUBE_NEW_API_VERSION="networking.k8s.io/v1,storage.k8s.io/v1,extensions/v1beta1"
|
||||
KUBE_OLD_STORAGE_VERSIONS="storage.k8s.io/v1beta1"
|
||||
KUBE_NEW_STORAGE_VERSIONS="storage.k8s.io/v1"
|
||||
|
||||
### END TEST DEFINITION CUSTOMIZATION ###
|
||||
|
||||
#######################################################
|
||||
# Step 1: Start a server which supports both the old and new api versions,
|
||||
# but KUBE_OLD_API_VERSION is the latest (storage) version.
|
||||
# Additionally use KUBE_STORAGE_MEDIA_TYPE_JSON for storage encoding.
|
||||
#######################################################
|
||||
KUBE_API_VERSIONS="v1,${KUBE_OLD_API_VERSION},${KUBE_NEW_API_VERSION}"
|
||||
RUNTIME_CONFIG="api/all=false,api/v1=true,${KUBE_OLD_API_VERSION}=true,${KUBE_NEW_API_VERSION}=true"
|
||||
startApiServer ${KUBE_OLD_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_JSON}
|
||||
|
||||
|
||||
# Create object(s)
|
||||
for test in ${tests[@]}; do
|
||||
IFS=',' read -ra test_data <<<"$test"
|
||||
source_file=${test_data[0]}
|
||||
|
||||
kube::log::status "Creating ${source_file}"
|
||||
${KUBECTL} create -f "${source_file}"
|
||||
|
||||
# Verify that the storage version is the old version
|
||||
resource=${test_data[1]}
|
||||
namespace=${test_data[2]}
|
||||
name=${test_data[3]}
|
||||
old_storage_version=${test_data[4]}
|
||||
|
||||
if [ -n "${namespace}" ]; then
|
||||
namespace="${namespace}/"
|
||||
fi
|
||||
kube::log::status "Verifying ${resource}/${namespace}${name} has storage version ${old_storage_version} in etcd"
|
||||
ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep ${old_storage_version}
|
||||
done
|
||||
|
||||
killApiServer
|
||||
|
||||
|
||||
#######################################################
|
||||
# Step 2: Start a server which supports both the old and new api versions,
|
||||
# but KUBE_NEW_API_VERSION is the latest (storage) version.
|
||||
# Still use KUBE_STORAGE_MEDIA_TYPE_JSON for storage encoding.
|
||||
#######################################################
|
||||
|
||||
KUBE_API_VERSIONS="v1,${KUBE_NEW_API_VERSION},${KUBE_OLD_API_VERSION}"
|
||||
RUNTIME_CONFIG="api/all=false,api/v1=true,${KUBE_OLD_API_VERSION}=true,${KUBE_NEW_API_VERSION}=true"
|
||||
startApiServer ${KUBE_NEW_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_JSON}
|
||||
|
||||
# Update etcd objects, so that will now be stored in the new api version.
|
||||
kube::log::status "Updating storage versions in etcd"
|
||||
${UPDATE_ETCD_OBJECTS_SCRIPT}
|
||||
|
||||
# Verify that the storage version was changed in etcd
|
||||
for test in ${tests[@]}; do
|
||||
IFS=',' read -ra test_data <<<"$test"
|
||||
resource=${test_data[1]}
|
||||
namespace=${test_data[2]}
|
||||
name=${test_data[3]}
|
||||
new_storage_version=${test_data[5]}
|
||||
|
||||
if [ -n "${namespace}" ]; then
|
||||
namespace="${namespace}/"
|
||||
fi
|
||||
kube::log::status "Verifying ${resource}/${namespace}${name} has updated storage version ${new_storage_version} in etcd"
|
||||
ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep ${new_storage_version}
|
||||
done
|
||||
|
||||
killApiServer
|
||||
|
||||
|
||||
#######################################################
|
||||
# Step 3 : Start a server which supports only the new api version.
|
||||
# However, change storage encoding to KUBE_STORAGE_MEDIA_TYPE_PROTOBUF.
|
||||
#######################################################
|
||||
|
||||
KUBE_API_VERSIONS="v1,${KUBE_NEW_API_VERSION}"
|
||||
RUNTIME_CONFIG="api/all=false,api/v1=true,${KUBE_NEW_API_VERSION}=true"
|
||||
|
||||
# This seems to reduce flakiness.
|
||||
sleep 1
|
||||
startApiServer ${KUBE_NEW_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_PROTOBUF}
|
||||
|
||||
for test in ${tests[@]}; do
|
||||
IFS=',' read -ra test_data <<<"$test"
|
||||
resource=${test_data[1]}
|
||||
namespace=${test_data[2]}
|
||||
name=${test_data[3]}
|
||||
namespace_flag=""
|
||||
|
||||
# Verify that the server is able to read the object.
|
||||
if [ -n "${namespace}" ]; then
|
||||
namespace_flag="--namespace=${namespace}"
|
||||
namespace="${namespace}/"
|
||||
fi
|
||||
kube::log::status "Verifying we can retrieve ${resource}/${namespace}${name} via kubectl"
|
||||
# We have to remove the cached discovery information about the old version; otherwise,
|
||||
# the 'kubectl get' will use that and fail to find the resource.
|
||||
rm -rf ${HOME}/.kube/cache/discovery/localhost_8080/${KUBE_OLD_STORAGE_VERSIONS}
|
||||
${KUBECTL} get ${namespace_flag} ${resource}/${name}
|
||||
done
|
||||
|
||||
killApiServer
|
8
vendor/k8s.io/kubernetes/hack/testdata/CRD/bar.yaml
generated
vendored
Normal file
8
vendor/k8s.io/kubernetes/hack/testdata/CRD/bar.yaml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
kind: Bar
|
||||
apiVersion: company.com/v1
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
pruneGroup: "true"
|
||||
someField: field1
|
||||
otherField: field2
|
11
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo-added-subfield.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo-added-subfield.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
kind: Foo
|
||||
apiVersion: company.com/v1
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
pruneGroup: "true"
|
||||
someField: field1
|
||||
otherField: field2
|
||||
nestedField:
|
||||
someSubfield: modifiedSubfield
|
||||
newSubfield: subfield3
|
10
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo-deleted-subfield.yaml
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo-deleted-subfield.yaml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
kind: Foo
|
||||
apiVersion: company.com/v1
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
pruneGroup: "true"
|
||||
someField: field1
|
||||
otherField: field2
|
||||
nestedField:
|
||||
someSubfield: modifiedSubfield
|
11
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo-updated-subfield.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo-updated-subfield.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
kind: Foo
|
||||
apiVersion: company.com/v1
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
pruneGroup: "true"
|
||||
someField: field1
|
||||
otherField: field2
|
||||
nestedField:
|
||||
someSubfield: modifiedSubfield
|
||||
otherSubfield: subfield2
|
11
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo.yaml
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/hack/testdata/CRD/foo.yaml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
kind: Foo
|
||||
apiVersion: company.com/v1
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
pruneGroup: "true"
|
||||
someField: field1
|
||||
otherField: field2
|
||||
nestedField:
|
||||
someSubfield: subfield1
|
||||
otherSubfield: subfield2
|
19
vendor/k8s.io/kubernetes/hack/testdata/CRD/multi-crd-list-added-field.yaml
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/hack/testdata/CRD/multi-crd-list-added-field.yaml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
items:
|
||||
- kind: Foo
|
||||
apiVersion: company.com/v1
|
||||
metadata:
|
||||
name: test-list
|
||||
labels:
|
||||
pruneGroup: "true"
|
||||
someField: modifiedField
|
||||
newField: field3
|
||||
- kind: Bar
|
||||
apiVersion: company.com/v1
|
||||
metadata:
|
||||
name: test-list
|
||||
labels:
|
||||
pruneGroup: "true"
|
||||
someField: modifiedField
|
||||
newField: field3
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user