mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Update to kube v1.17
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
327fcd1b1b
commit
3af1e26d7c
137
vendor/k8s.io/kubernetes/test/utils/admission_webhook.go
generated
vendored
Normal file
137
vendor/k8s.io/kubernetes/test/utils/admission_webhook.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
)
|
||||
|
||||
// NewAdmissionWebhookServer sets up a webhook server with TLS enabled, returns URL and Close function
|
||||
// for the server
|
||||
func NewAdmissionWebhookServer(handler http.Handler) (string, func(), error) {
|
||||
// set up webhook server
|
||||
roots := x509.NewCertPool()
|
||||
if !roots.AppendCertsFromPEM(LocalhostCert) {
|
||||
return "", nil, fmt.Errorf("Failed to append Cert from PEM")
|
||||
}
|
||||
cert, err := tls.X509KeyPair(LocalhostCert, LocalhostKey)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("Failed to build cert with error: %+v", err)
|
||||
}
|
||||
webhookServer := httptest.NewUnstartedServer(handler)
|
||||
webhookServer.TLS = &tls.Config{
|
||||
RootCAs: roots,
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
webhookServer.StartTLS()
|
||||
return webhookServer.URL, webhookServer.Close, nil
|
||||
}
|
||||
|
||||
// AdmissionWebhookHandler creates a HandlerFunc that decodes/encodes AdmissionReview and performs
|
||||
// given admit function
|
||||
func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview) error) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if contentType := r.Header.Get("Content-Type"); contentType != "application/json" {
|
||||
t.Errorf("contentType=%s, expect application/json", contentType)
|
||||
return
|
||||
}
|
||||
|
||||
review := v1beta1.AdmissionReview{}
|
||||
if err := json.Unmarshal(data, &review); err != nil {
|
||||
t.Errorf("Fail to deserialize object: %s with error: %v", string(data), err)
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
if err := admit(&review); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(review); err != nil {
|
||||
t.Errorf("Marshal of response failed with error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// LocalhostCert was generated from crypto/tls/generate_cert.go with the following command:
|
||||
// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
|
||||
var LocalhostCert = []byte(`-----BEGIN CERTIFICATE-----
|
||||
MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS
|
||||
MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
|
||||
MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEA1Z5/aTwqY706M34tn60l8ZHkanWDl8mM1pYf4Q7qg3zA9XqWLX6S
|
||||
4rTYDYCb4stEasC72lQnbEWHbthiQE76zubP8WOFHdvGR3mjAvHWz4FxvLOTheZ+
|
||||
3iDUrl6Aj9UIsYqzmpBJAoY4+vGGf+xHvuukHrVcFqR9ZuBdZuJ/HbbjUyuNr3X9
|
||||
erNIr5Ha17gVzf17SNbYgNrX9gbCeEB8Z9Ox7dVuJhLDkpF0T/B5Zld3BjyUVY/T
|
||||
cukU4dTVp6isbWPvCMRCZCCOpb+qIhxEjJ0n6tnPt8nf9lvDl4SWMl6X1bH+2EFa
|
||||
a8R06G0QI+XhwPyjXUyCR8QEOZPCR5wyqQIDAQABo2gwZjAOBgNVHQ8BAf8EBAMC
|
||||
AqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAuBgNVHREE
|
||||
JzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG
|
||||
9w0BAQsFAAOCAQEAThqgJ/AFqaANsOp48lojDZfZBFxJQ3A4zfR/MgggUoQ9cP3V
|
||||
rxuKAFWQjze1EZc7J9iO1WvH98lOGVNRY/t2VIrVoSsBiALP86Eew9WucP60tbv2
|
||||
8/zsBDSfEo9Wl+Q/gwdEh8dgciUKROvCm76EgAwPGicMAgRsxXgwXHhS5e8nnbIE
|
||||
Ewaqvb5dY++6kh0Oz+adtNT5OqOwXTIRI67WuEe6/B3Z4LNVPQDIj7ZUJGNw8e6L
|
||||
F4nkUthwlKx4yEJHZBRuFPnO7Z81jNKuwL276+mczRH7piI6z9uyMV/JbEsOIxyL
|
||||
W6CzB7pZ9Nj1YLpgzc1r6oONHLokMJJIz/IvkQ==
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
// LocalhostKey is the private key for LocalhostCert.
|
||||
var LocalhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEA1Z5/aTwqY706M34tn60l8ZHkanWDl8mM1pYf4Q7qg3zA9XqW
|
||||
LX6S4rTYDYCb4stEasC72lQnbEWHbthiQE76zubP8WOFHdvGR3mjAvHWz4FxvLOT
|
||||
heZ+3iDUrl6Aj9UIsYqzmpBJAoY4+vGGf+xHvuukHrVcFqR9ZuBdZuJ/HbbjUyuN
|
||||
r3X9erNIr5Ha17gVzf17SNbYgNrX9gbCeEB8Z9Ox7dVuJhLDkpF0T/B5Zld3BjyU
|
||||
VY/TcukU4dTVp6isbWPvCMRCZCCOpb+qIhxEjJ0n6tnPt8nf9lvDl4SWMl6X1bH+
|
||||
2EFaa8R06G0QI+XhwPyjXUyCR8QEOZPCR5wyqQIDAQABAoIBAFAJmb1pMIy8OpFO
|
||||
hnOcYWoYepe0vgBiIOXJy9n8R7vKQ1X2f0w+b3SHw6eTd1TLSjAhVIEiJL85cdwD
|
||||
MRTdQrXA30qXOioMzUa8eWpCCHUpD99e/TgfO4uoi2dluw+pBx/WUyLnSqOqfLDx
|
||||
S66kbeFH0u86jm1hZibki7pfxLbxvu7KQgPe0meO5/13Retztz7/xa/pWIY71Zqd
|
||||
YC8UckuQdWUTxfuQf0470lAK34GZlDy9tvdVOG/PmNkG4j6OQjy0Kmz4Uk7rewKo
|
||||
ZbdphaLPJ2A4Rdqfn4WCoyDnxlfV861T922/dEDZEbNWiQpB81G8OfLL+FLHxyIT
|
||||
LKEu4R0CgYEA4RDj9jatJ/wGkMZBt+UF05mcJlRVMEijqdKgFwR2PP8b924Ka1mj
|
||||
9zqWsfbxQbdPdwsCeVBZrSlTEmuFSQLeWtqBxBKBTps/tUP0qZf7HjfSmcVI89WE
|
||||
3ab8LFjfh4PtK/LOq2D1GRZZkFliqi0gKwYdDoK6gxXWwrumXq4c2l8CgYEA8vrX
|
||||
dMuGCNDjNQkGXx3sr8pyHCDrSNR4Z4FrSlVUkgAW1L7FrCM911BuGh86FcOu9O/1
|
||||
Ggo0E8ge7qhQiXhB5vOo7hiVzSp0FxxCtGSlpdp4W6wx6ZWK8+Pc+6Moos03XdG7
|
||||
MKsdPGDciUn9VMOP3r8huX/btFTh90C/L50sH/cCgYAd02wyW8qUqux/0RYydZJR
|
||||
GWE9Hx3u+SFfRv9aLYgxyyj8oEOXOFjnUYdY7D3KlK1ePEJGq2RG81wD6+XM6Clp
|
||||
Zt2di0pBjYdi0S+iLfbkaUdqg1+ImLoz2YY/pkNxJQWQNmw2//FbMsAJxh6yKKrD
|
||||
qNq+6oonBwTf55hDodVHBwKBgEHgEBnyM9ygBXmTgM645jqiwF0v75pHQH2PcO8u
|
||||
Q0dyDr6PGjiZNWLyw2cBoFXWP9DYXbM5oPTcBMbfizY6DGP5G4uxzqtZHzBE0TDn
|
||||
OKHGoWr5PG7/xDRrSrZOfe3lhWVCP2XqfnqoKCJwlOYuPws89n+8UmyJttm6DBt0
|
||||
mUnxAoGBAIvbR87ZFXkvqstLs4KrdqTz4TQIcpzB3wENukHODPA6C1gzWTqp+OEe
|
||||
GMNltPfGCLO+YmoMQuTpb0kECYV3k4jR3gXO6YvlL9KbY+UOA6P0dDX4ROi2Rklj
|
||||
yh+lxFLYa1vlzzi9r8B7nkR9hrOGMvkfXF42X89g7lx4uMtu2I4q
|
||||
-----END RSA PRIVATE KEY-----`)
|
109
vendor/k8s.io/kubernetes/test/utils/audit.go
generated
vendored
109
vendor/k8s.io/kubernetes/test/utils/audit.go
generated
vendored
@ -20,12 +20,14 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
)
|
||||
@ -46,6 +48,11 @@ type AuditEvent struct {
|
||||
RequestObject bool
|
||||
ResponseObject bool
|
||||
AuthorizeDecision string
|
||||
|
||||
// The Check functions in this package takes ownerships of these maps. You should
|
||||
// not reference these maps after calling the Check functions.
|
||||
AdmissionWebhookMutationAnnotations map[string]string
|
||||
AdmissionWebhookPatchAnnotations map[string]string
|
||||
}
|
||||
|
||||
// MissingEventsReport provides an analysis if any events are missing
|
||||
@ -71,7 +78,7 @@ func (m *MissingEventsReport) String() string {
|
||||
|
||||
// CheckAuditLines searches the audit log for the expected audit lines.
|
||||
func CheckAuditLines(stream io.Reader, expected []AuditEvent, version schema.GroupVersion) (missingReport *MissingEventsReport, err error) {
|
||||
expectations := buildEventExpectations(expected)
|
||||
expectations := newAuditEventTracker(expected)
|
||||
|
||||
scanner := bufio.NewScanner(stream)
|
||||
|
||||
@ -98,24 +105,20 @@ func CheckAuditLines(stream io.Reader, expected []AuditEvent, version schema.Gro
|
||||
return missingReport, err
|
||||
}
|
||||
|
||||
// If the event was expected, mark it as found.
|
||||
if _, found := expectations[event]; found {
|
||||
expectations[event] = true
|
||||
}
|
||||
expectations.Mark(event)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return missingReport, err
|
||||
}
|
||||
|
||||
missingEvents := findMissing(expectations)
|
||||
missingReport.MissingEvents = missingEvents
|
||||
missingReport.MissingEvents = expectations.Missing()
|
||||
missingReport.NumEventsChecked = i
|
||||
return missingReport, nil
|
||||
}
|
||||
|
||||
// CheckAuditList searches an audit event list for the expected audit events.
|
||||
func CheckAuditList(el auditinternal.EventList, expected []AuditEvent) (missing []AuditEvent, err error) {
|
||||
expectations := buildEventExpectations(expected)
|
||||
expectations := newAuditEventTracker(expected)
|
||||
|
||||
for _, e := range el.Items {
|
||||
event, err := testEventFromInternal(&e)
|
||||
@ -123,45 +126,38 @@ func CheckAuditList(el auditinternal.EventList, expected []AuditEvent) (missing
|
||||
return expected, err
|
||||
}
|
||||
|
||||
// If the event was expected, mark it as found.
|
||||
if _, found := expectations[event]; found {
|
||||
expectations[event] = true
|
||||
}
|
||||
expectations.Mark(event)
|
||||
}
|
||||
|
||||
missing = findMissing(expectations)
|
||||
return missing, nil
|
||||
return expectations.Missing(), nil
|
||||
}
|
||||
|
||||
// CheckForDuplicates checks a list for duplicate events
|
||||
func CheckForDuplicates(el auditinternal.EventList) (auditinternal.EventList, error) {
|
||||
// eventMap holds a map of audit events with just a nil value
|
||||
eventMap := map[AuditEvent]*bool{}
|
||||
// existingEvents holds a slice of audit events that have been seen
|
||||
existingEvents := []AuditEvent{}
|
||||
duplicates := auditinternal.EventList{}
|
||||
var err error
|
||||
for _, e := range el.Items {
|
||||
event, err := testEventFromInternal(&e)
|
||||
if err != nil {
|
||||
return duplicates, err
|
||||
}
|
||||
event.ID = e.AuditID
|
||||
if _, ok := eventMap[event]; ok {
|
||||
duplicates.Items = append(duplicates.Items, e)
|
||||
err = fmt.Errorf("failed duplicate check")
|
||||
continue
|
||||
for _, existing := range existingEvents {
|
||||
if reflect.DeepEqual(existing, event) {
|
||||
duplicates.Items = append(duplicates.Items, e)
|
||||
continue
|
||||
}
|
||||
}
|
||||
eventMap[event] = nil
|
||||
existingEvents = append(existingEvents, event)
|
||||
}
|
||||
return duplicates, err
|
||||
}
|
||||
|
||||
// buildEventExpectations creates a bool map out of a list of audit events
|
||||
func buildEventExpectations(expected []AuditEvent) map[AuditEvent]bool {
|
||||
expectations := map[AuditEvent]bool{}
|
||||
for _, event := range expected {
|
||||
expectations[event] = false
|
||||
var err error
|
||||
if len(duplicates.Items) > 0 {
|
||||
err = fmt.Errorf("failed duplicate check")
|
||||
}
|
||||
return expectations
|
||||
|
||||
return duplicates, err
|
||||
}
|
||||
|
||||
// testEventFromInternal takes an internal audit event and returns a test event
|
||||
@ -192,15 +188,58 @@ func testEventFromInternal(e *auditinternal.Event) (AuditEvent, error) {
|
||||
event.ImpersonatedGroups = strings.Join(e.ImpersonatedUser.Groups, ",")
|
||||
}
|
||||
event.AuthorizeDecision = e.Annotations["authorization.k8s.io/decision"]
|
||||
for k, v := range e.Annotations {
|
||||
if strings.HasPrefix(k, mutating.PatchAuditAnnotationPrefix) {
|
||||
if event.AdmissionWebhookPatchAnnotations == nil {
|
||||
event.AdmissionWebhookPatchAnnotations = map[string]string{}
|
||||
}
|
||||
event.AdmissionWebhookPatchAnnotations[k] = v
|
||||
} else if strings.HasPrefix(k, mutating.MutationAuditAnnotationPrefix) {
|
||||
if event.AdmissionWebhookMutationAnnotations == nil {
|
||||
event.AdmissionWebhookMutationAnnotations = map[string]string{}
|
||||
}
|
||||
event.AdmissionWebhookMutationAnnotations[k] = v
|
||||
}
|
||||
}
|
||||
return event, nil
|
||||
}
|
||||
|
||||
// findMissing checks for false values in the expectations map and returns them as a list
|
||||
func findMissing(expectations map[AuditEvent]bool) []AuditEvent {
|
||||
// auditEvent is a private wrapper on top of AuditEvent used by auditEventTracker
|
||||
type auditEvent struct {
|
||||
event AuditEvent
|
||||
found bool
|
||||
}
|
||||
|
||||
// auditEventTracker keeps track of AuditEvent expectations and marks matching events as found
|
||||
type auditEventTracker struct {
|
||||
events []*auditEvent
|
||||
}
|
||||
|
||||
// newAuditEventTracker creates a tracker that tracks whether expect events are found
|
||||
func newAuditEventTracker(expected []AuditEvent) *auditEventTracker {
|
||||
expectations := &auditEventTracker{events: []*auditEvent{}}
|
||||
for _, event := range expected {
|
||||
// we copy the references to the maps in event
|
||||
expectations.events = append(expectations.events, &auditEvent{event: event, found: false})
|
||||
}
|
||||
return expectations
|
||||
}
|
||||
|
||||
// Mark marks the given event as found if it's expected
|
||||
func (t *auditEventTracker) Mark(event AuditEvent) {
|
||||
for _, e := range t.events {
|
||||
if reflect.DeepEqual(e.event, event) {
|
||||
e.found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Missing reports events that are expected but not found
|
||||
func (t *auditEventTracker) Missing() []AuditEvent {
|
||||
var missing []AuditEvent
|
||||
for event, found := range expectations {
|
||||
if !found {
|
||||
missing = append(missing, event)
|
||||
for _, e := range t.events {
|
||||
if !e.found {
|
||||
missing = append(missing, e.event)
|
||||
}
|
||||
}
|
||||
return missing
|
||||
|
2
vendor/k8s.io/kubernetes/test/utils/audit_dynamic.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/utils/audit_dynamic.go
generated
vendored
@ -95,8 +95,6 @@ func (a *AuditTestServer) WaitForEvents(expected []AuditEvent) ([]AuditEvent, er
|
||||
var missing []AuditEvent
|
||||
err := wait.PollImmediate(50*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
var err error
|
||||
a.LockedEventList.RLock()
|
||||
defer a.LockedEventList.RUnlock()
|
||||
el := a.GetEventList()
|
||||
if len(el.Items) < 1 {
|
||||
return false, nil
|
||||
|
34
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
@ -232,3 +232,37 @@ func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.PersistentVolume) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().PersistentVolumes().Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace string, obj *v1.PersistentVolumeClaim) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(obj)
|
||||
if err == nil || apierrs.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
if IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
192
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
192
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -20,17 +20,25 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// RegistryList holds public and private image registries
|
||||
type RegistryList struct {
|
||||
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
|
||||
E2eRegistry string `yaml:"e2eRegistry"`
|
||||
GcRegistry string `yaml:"gcRegistry"`
|
||||
PrivateRegistry string `yaml:"privateRegistry"`
|
||||
SampleRegistry string `yaml:"sampleRegistry"`
|
||||
GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"`
|
||||
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
|
||||
DockerGluster string `yaml:"dockerGluster"`
|
||||
E2eRegistry string `yaml:"e2eRegistry"`
|
||||
InvalidRegistry string `yaml:"invalidRegistry"`
|
||||
GcRegistry string `yaml:"gcRegistry"`
|
||||
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
|
||||
GoogleContainerRegistry string `yaml:"googleContainerRegistry"`
|
||||
PrivateRegistry string `yaml:"privateRegistry"`
|
||||
SampleRegistry string `yaml:"sampleRegistry"`
|
||||
QuayK8sCSI string `yaml:"quayK8sCSI"`
|
||||
QuayIncubator string `yaml:"quayIncubator"`
|
||||
}
|
||||
|
||||
// Config holds an images registry, name, and version
|
||||
@ -57,11 +65,18 @@ func (i *Config) SetVersion(version string) {
|
||||
|
||||
func initReg() RegistryList {
|
||||
registry := RegistryList{
|
||||
DockerLibraryRegistry: "docker.io/library",
|
||||
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
|
||||
GcRegistry: "k8s.gcr.io",
|
||||
PrivateRegistry: "gcr.io/k8s-authenticated-test",
|
||||
SampleRegistry: "gcr.io/google-samples",
|
||||
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
|
||||
DockerLibraryRegistry: "docker.io/library",
|
||||
DockerGluster: "docker.io/gluster",
|
||||
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
|
||||
InvalidRegistry: "invalid.com/invalid",
|
||||
GcRegistry: "k8s.gcr.io",
|
||||
GcrReleaseRegistry: "gcr.io/gke-release",
|
||||
GoogleContainerRegistry: "gcr.io/google-containers",
|
||||
PrivateRegistry: "gcr.io/k8s-authenticated-test",
|
||||
SampleRegistry: "gcr.io/google-samples",
|
||||
QuayK8sCSI: "quay.io/k8scsi",
|
||||
QuayIncubator: "quay.io/kubernetes_incubator",
|
||||
}
|
||||
repoList := os.Getenv("KUBE_TEST_REPO_LIST")
|
||||
if repoList == "" {
|
||||
@ -81,10 +96,17 @@ func initReg() RegistryList {
|
||||
}
|
||||
|
||||
var (
|
||||
registry = initReg()
|
||||
dockerLibraryRegistry = registry.DockerLibraryRegistry
|
||||
e2eRegistry = registry.E2eRegistry
|
||||
gcRegistry = registry.GcRegistry
|
||||
registry = initReg()
|
||||
dockerLibraryRegistry = registry.DockerLibraryRegistry
|
||||
dockerGluster = registry.DockerGluster
|
||||
e2eRegistry = registry.E2eRegistry
|
||||
gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry
|
||||
gcRegistry = registry.GcRegistry
|
||||
gcrReleaseRegistry = registry.GcrReleaseRegistry
|
||||
googleContainerRegistry = registry.GoogleContainerRegistry
|
||||
invalidRegistry = registry.InvalidRegistry
|
||||
quayK8sCSI = registry.QuayK8sCSI
|
||||
quayIncubator = registry.QuayIncubator
|
||||
// PrivateRegistry is an image repository that requires authentication
|
||||
PrivateRegistry = registry.PrivateRegistry
|
||||
sampleRegistry = registry.SampleRegistry
|
||||
@ -94,18 +116,18 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
// CRDConversionWebhook image
|
||||
CRDConversionWebhook = iota
|
||||
// AdmissionWebhook image
|
||||
AdmissionWebhook
|
||||
// Agnhost image
|
||||
Agnhost
|
||||
Agnhost = iota
|
||||
// AgnhostPrivate image
|
||||
AgnhostPrivate
|
||||
// APIServer image
|
||||
APIServer
|
||||
// AppArmorLoader image
|
||||
AppArmorLoader
|
||||
// AuditProxy image
|
||||
AuditProxy
|
||||
// AuthenticatedAlpine image
|
||||
AuthenticatedAlpine
|
||||
// AuthenticatedWindowsNanoServer image
|
||||
AuthenticatedWindowsNanoServer
|
||||
// BusyBox image
|
||||
BusyBox
|
||||
// CheckMetadataConcealment image
|
||||
@ -118,44 +140,30 @@ const (
|
||||
Dnsutils
|
||||
// EchoServer image
|
||||
EchoServer
|
||||
// EntrypointTester image
|
||||
EntrypointTester
|
||||
// Etcd image
|
||||
Etcd
|
||||
// Fakegitserver image
|
||||
Fakegitserver
|
||||
// GBFrontend image
|
||||
GBFrontend
|
||||
// GBRedisSlave image
|
||||
GBRedisSlave
|
||||
// Hostexec image
|
||||
Hostexec
|
||||
// InClusterClient image
|
||||
InClusterClient
|
||||
// GlusterDynamicProvisioner image
|
||||
GlusterDynamicProvisioner
|
||||
// Httpd image
|
||||
Httpd
|
||||
// HttpdNew image
|
||||
HttpdNew
|
||||
// InvalidRegistryImage image
|
||||
InvalidRegistryImage
|
||||
// IpcUtils image
|
||||
IpcUtils
|
||||
// Iperf image
|
||||
Iperf
|
||||
// JessieDnsutils image
|
||||
JessieDnsutils
|
||||
// Kitten image
|
||||
Kitten
|
||||
// Liveness image
|
||||
Liveness
|
||||
// LogsGenerator image
|
||||
LogsGenerator
|
||||
// Mounttest image
|
||||
Mounttest
|
||||
// MounttestUser image
|
||||
MounttestUser
|
||||
// Nautilus image
|
||||
Nautilus
|
||||
// Net image
|
||||
Net
|
||||
// Netexec image
|
||||
Netexec
|
||||
// Nettest image
|
||||
Nettest
|
||||
// NFSProvisioner image
|
||||
NFSProvisioner
|
||||
// Nginx image
|
||||
Nginx
|
||||
// NginxNew image
|
||||
@ -164,27 +172,27 @@ const (
|
||||
Nonewprivs
|
||||
// NonRoot runs with a default user of 1234
|
||||
NonRoot
|
||||
// NoSnatTest image
|
||||
NoSnatTest
|
||||
// NoSnatTestProxy image
|
||||
NoSnatTestProxy
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
// Pause image
|
||||
Pause
|
||||
// Perl image
|
||||
Perl
|
||||
// Porter image
|
||||
Porter
|
||||
// PortForwardTester image
|
||||
PortForwardTester
|
||||
// PrometheusDummyExporter image
|
||||
PrometheusDummyExporter
|
||||
// PrometheusToSd image
|
||||
PrometheusToSd
|
||||
// Redis image
|
||||
Redis
|
||||
// RegressionIssue74839 image
|
||||
RegressionIssue74839
|
||||
// ResourceConsumer image
|
||||
ResourceConsumer
|
||||
// ResourceController image
|
||||
ResourceController
|
||||
// ServeHostname image
|
||||
ServeHostname
|
||||
// SdDummyExporter image
|
||||
SdDummyExporter
|
||||
// StartupScript image
|
||||
StartupScript
|
||||
// TestWebserver image
|
||||
TestWebserver
|
||||
// VolumeNFSServer image
|
||||
@ -199,52 +207,45 @@ const (
|
||||
|
||||
func initImageConfigs() map[int]Config {
|
||||
configs := map[int]Config{}
|
||||
configs[CRDConversionWebhook] = Config{e2eRegistry, "crd-conversion-webhook", "1.13rev2"}
|
||||
configs[AdmissionWebhook] = Config{e2eRegistry, "webhook", "1.15v1"}
|
||||
configs[Agnhost] = Config{e2eRegistry, "agnhost", "1.0"}
|
||||
configs[Agnhost] = Config{e2eRegistry, "agnhost", "2.8"}
|
||||
configs[AgnhostPrivate] = Config{PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{gcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
configs[APIServer] = Config{e2eRegistry, "sample-apiserver", "1.10"}
|
||||
configs[AppArmorLoader] = Config{e2eRegistry, "apparmor-loader", "1.0"}
|
||||
configs[AuditProxy] = Config{e2eRegistry, "audit-proxy", "1.0"}
|
||||
configs[BusyBox] = Config{dockerLibraryRegistry, "busybox", "1.29"}
|
||||
configs[CheckMetadataConcealment] = Config{e2eRegistry, "metadata-concealment", "1.2"}
|
||||
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{e2eRegistry, "cuda-vector-add", "2.0"}
|
||||
configs[Dnsutils] = Config{e2eRegistry, "dnsutils", "1.1"}
|
||||
configs[EchoServer] = Config{e2eRegistry, "echoserver", "2.2"}
|
||||
configs[EntrypointTester] = Config{e2eRegistry, "entrypoint-tester", "1.0"}
|
||||
configs[Etcd] = Config{gcRegistry, "etcd", "3.3.10"}
|
||||
configs[Fakegitserver] = Config{e2eRegistry, "fakegitserver", "1.0"}
|
||||
configs[GBFrontend] = Config{sampleRegistry, "gb-frontend", "v6"}
|
||||
configs[GBRedisSlave] = Config{sampleRegistry, "gb-redisslave", "v3"}
|
||||
configs[Hostexec] = Config{e2eRegistry, "hostexec", "1.1"}
|
||||
configs[InClusterClient] = Config{e2eRegistry, "inclusterclient", "1.0"}
|
||||
configs[Etcd] = Config{gcRegistry, "etcd", "3.4.3"}
|
||||
configs[GlusterDynamicProvisioner] = Config{dockerGluster, "glusterdynamic-provisioner", "v1.0"}
|
||||
configs[Httpd] = Config{dockerLibraryRegistry, "httpd", "2.4.38-alpine"}
|
||||
configs[HttpdNew] = Config{dockerLibraryRegistry, "httpd", "2.4.39-alpine"}
|
||||
configs[InvalidRegistryImage] = Config{invalidRegistry, "alpine", "3.1"}
|
||||
configs[IpcUtils] = Config{e2eRegistry, "ipc-utils", "1.0"}
|
||||
configs[Iperf] = Config{e2eRegistry, "iperf", "1.0"}
|
||||
configs[JessieDnsutils] = Config{e2eRegistry, "jessie-dnsutils", "1.0"}
|
||||
configs[Kitten] = Config{e2eRegistry, "kitten", "1.0"}
|
||||
configs[Liveness] = Config{e2eRegistry, "liveness", "1.1"}
|
||||
configs[LogsGenerator] = Config{e2eRegistry, "logs-generator", "1.0"}
|
||||
configs[Mounttest] = Config{e2eRegistry, "mounttest", "1.0"}
|
||||
configs[MounttestUser] = Config{e2eRegistry, "mounttest-user", "1.0"}
|
||||
configs[Nautilus] = Config{e2eRegistry, "nautilus", "1.0"}
|
||||
configs[Net] = Config{e2eRegistry, "net", "1.0"}
|
||||
configs[Netexec] = Config{e2eRegistry, "netexec", "1.1"}
|
||||
configs[Nettest] = Config{e2eRegistry, "nettest", "1.0"}
|
||||
configs[NFSProvisioner] = Config{quayIncubator, "nfs-provisioner", "v2.2.2"}
|
||||
configs[Nginx] = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"}
|
||||
configs[NginxNew] = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"}
|
||||
configs[Nonewprivs] = Config{e2eRegistry, "nonewprivs", "1.0"}
|
||||
configs[NonRoot] = Config{e2eRegistry, "nonroot", "1.0"}
|
||||
configs[NoSnatTest] = Config{e2eRegistry, "no-snat-test", "1.0"}
|
||||
configs[NoSnatTestProxy] = Config{e2eRegistry, "no-snat-test-proxy", "1.0"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{gcRegistry, "pause", "3.1"}
|
||||
configs[Perl] = Config{dockerLibraryRegistry, "perl", "5.26"}
|
||||
configs[Porter] = Config{e2eRegistry, "porter", "1.0"}
|
||||
configs[PortForwardTester] = Config{e2eRegistry, "port-forward-tester", "1.0"}
|
||||
configs[Redis] = Config{e2eRegistry, "redis", "1.0"}
|
||||
configs[PrometheusDummyExporter] = Config{gcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{gcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
configs[Redis] = Config{dockerLibraryRegistry, "redis", "5.0.5-alpine"}
|
||||
configs[RegressionIssue74839] = Config{e2eRegistry, "regression-issue-74839-amd64", "1.0"}
|
||||
configs[ResourceConsumer] = Config{e2eRegistry, "resource-consumer", "1.5"}
|
||||
configs[ResourceController] = Config{e2eRegistry, "resource-consumer-controller", "1.0"}
|
||||
configs[ServeHostname] = Config{e2eRegistry, "serve-hostname", "1.1"}
|
||||
configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[StartupScript] = Config{googleContainerRegistry, "startup-script", "v1"}
|
||||
configs[TestWebserver] = Config{e2eRegistry, "test-webserver", "1.0"}
|
||||
configs[VolumeNFSServer] = Config{e2eRegistry, "volume/nfs", "1.0"}
|
||||
configs[VolumeISCSIServer] = Config{e2eRegistry, "volume/iscsi", "2.0"}
|
||||
@ -277,3 +278,38 @@ func (i *Config) GetE2EImage() string {
|
||||
func GetPauseImageName() string {
|
||||
return GetE2EImage(Pause)
|
||||
}
|
||||
|
||||
// ReplaceRegistryInImageURL replaces the registry in the image URL with a custom one
|
||||
func ReplaceRegistryInImageURL(imageURL string) (string, error) {
|
||||
parts := strings.Split(imageURL, "/")
|
||||
countParts := len(parts)
|
||||
registryAndUser := strings.Join(parts[:countParts-1], "/")
|
||||
|
||||
switch registryAndUser {
|
||||
case "gcr.io/kubernetes-e2e-test-images":
|
||||
registryAndUser = e2eRegistry
|
||||
case "k8s.gcr.io":
|
||||
registryAndUser = gcRegistry
|
||||
case "gcr.io/k8s-authenticated-test":
|
||||
registryAndUser = PrivateRegistry
|
||||
case "gcr.io/google-samples":
|
||||
registryAndUser = sampleRegistry
|
||||
case "gcr.io/gke-release":
|
||||
registryAndUser = gcrReleaseRegistry
|
||||
case "docker.io/library":
|
||||
registryAndUser = dockerLibraryRegistry
|
||||
case "quay.io/k8scsi":
|
||||
registryAndUser = quayK8sCSI
|
||||
default:
|
||||
if countParts == 1 {
|
||||
// We assume we found an image from docker hub library
|
||||
// e.g. openjdk -> docker.io/library/openjdk
|
||||
registryAndUser = dockerLibraryRegistry
|
||||
break
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Registry: %s is missing in test/utils/image/manifest.go, please add the registry, otherwise the test will fail on air-gapped clusters", registryAndUser)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s", registryAndUser, parts[countParts-1]), nil
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/utils/pod_store.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/utils/pod_store.go
generated
vendored
@ -61,6 +61,7 @@ func NewPodStore(c clientset.Interface, namespace string, label labels.Selector,
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
close(stopCh)
|
||||
return nil, err
|
||||
}
|
||||
return &PodStore{Store: store, stopCh: stopCh, Reflector: reflector}, nil
|
||||
|
396
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
396
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -36,7 +37,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -104,28 +107,31 @@ type RunObjectConfig interface {
|
||||
GetReplicas() int
|
||||
GetLabelValue(string) (string, bool)
|
||||
GetGroupResource() schema.GroupResource
|
||||
GetGroupVersionResource() schema.GroupVersionResource
|
||||
}
|
||||
|
||||
type RCConfig struct {
|
||||
Affinity *v1.Affinity
|
||||
Client clientset.Interface
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
Image string
|
||||
Command []string
|
||||
Name string
|
||||
Namespace string
|
||||
PollInterval time.Duration
|
||||
Timeout time.Duration
|
||||
PodStatusFile *os.File
|
||||
Replicas int
|
||||
CpuRequest int64 // millicores
|
||||
CpuLimit int64 // millicores
|
||||
MemRequest int64 // bytes
|
||||
MemLimit int64 // bytes
|
||||
GpuLimit int64 // count
|
||||
ReadinessProbe *v1.Probe
|
||||
DNSPolicy *v1.DNSPolicy
|
||||
PriorityClassName string
|
||||
Affinity *v1.Affinity
|
||||
Client clientset.Interface
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
Image string
|
||||
Command []string
|
||||
Name string
|
||||
Namespace string
|
||||
PollInterval time.Duration
|
||||
Timeout time.Duration
|
||||
PodStatusFile *os.File
|
||||
Replicas int
|
||||
CpuRequest int64 // millicores
|
||||
CpuLimit int64 // millicores
|
||||
MemRequest int64 // bytes
|
||||
MemLimit int64 // bytes
|
||||
GpuLimit int64 // count
|
||||
ReadinessProbe *v1.Probe
|
||||
DNSPolicy *v1.DNSPolicy
|
||||
PriorityClassName string
|
||||
TerminationGracePeriodSeconds *int64
|
||||
Lifecycle *v1.Lifecycle
|
||||
|
||||
// Env vars, set the same for every pod.
|
||||
Env map[string]string
|
||||
@ -155,6 +161,9 @@ type RCConfig struct {
|
||||
// Maximum allowable container failures. If exceeded, RunRC returns an error.
|
||||
// Defaults to replicas*0.1 if unspecified.
|
||||
MaxContainerFailures *int
|
||||
// Maximum allowed pod deletions count. If exceeded, RunRC returns an error.
|
||||
// Defaults to 0.
|
||||
MaxAllowedPodDeletions int
|
||||
|
||||
// If set to false starting RC will print progress, otherwise only errors will be printed.
|
||||
Silent bool
|
||||
@ -295,6 +304,10 @@ func (config *DeploymentConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) create() error {
|
||||
deployment := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -313,13 +326,15 @@ func (config *DeploymentConfig) create() error {
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: config.Affinity,
|
||||
Affinity: config.Affinity,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -371,6 +386,10 @@ func (config *ReplicaSetConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) create() error {
|
||||
rs := &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -389,13 +408,15 @@ func (config *ReplicaSetConfig) create() error {
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: config.Affinity,
|
||||
Affinity: config.Affinity,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -443,6 +464,10 @@ func (config *JobConfig) GetGroupResource() schema.GroupResource {
|
||||
return batchinternal.Resource("jobs")
|
||||
}
|
||||
|
||||
func (config *JobConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return batchinternal.SchemeGroupVersion.WithResource("jobs")
|
||||
}
|
||||
|
||||
func (config *JobConfig) create() error {
|
||||
job := &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -457,12 +482,14 @@ func (config *JobConfig) create() error {
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: config.Affinity,
|
||||
Affinity: config.Affinity,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
@ -519,6 +546,10 @@ func (config *RCConfig) GetGroupResource() schema.GroupResource {
|
||||
return api.Resource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return api.SchemeGroupVersion.WithResource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetClient() clientset.Interface {
|
||||
return config.Client
|
||||
}
|
||||
@ -573,12 +604,13 @@ func (config *RCConfig) create() error {
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: config.ReadinessProbe,
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
DNSPolicy: *config.DNSPolicy,
|
||||
NodeSelector: config.NodeSelector,
|
||||
Tolerations: config.Tolerations,
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(&one),
|
||||
PriorityClassName: config.PriorityClassName,
|
||||
},
|
||||
},
|
||||
@ -655,6 +687,9 @@ func (config *RCConfig) applyTo(template *v1.PodTemplateSpec) {
|
||||
if config.GpuLimit > 0 {
|
||||
template.Spec.Containers[0].Resources.Limits["nvidia.com/gpu"] = *resource.NewQuantity(config.GpuLimit, resource.DecimalSI)
|
||||
}
|
||||
if config.Lifecycle != nil {
|
||||
template.Spec.Containers[0].Lifecycle = config.Lifecycle
|
||||
}
|
||||
if len(config.Volumes) > 0 {
|
||||
template.Spec.Volumes = config.Volumes
|
||||
}
|
||||
@ -763,6 +798,7 @@ func (config *RCConfig) start() error {
|
||||
oldPods := make([]*v1.Pod, 0)
|
||||
oldRunning := 0
|
||||
lastChange := time.Now()
|
||||
podDeletionsCount := 0
|
||||
for oldRunning != config.Replicas {
|
||||
time.Sleep(interval)
|
||||
|
||||
@ -793,9 +829,10 @@ func (config *RCConfig) start() error {
|
||||
|
||||
diff := Diff(oldPods, pods)
|
||||
deletedPods := diff.DeletedPods()
|
||||
if len(deletedPods) != 0 {
|
||||
// There are some pods that have disappeared.
|
||||
err := fmt.Errorf("%d pods disappeared for %s: %v", len(deletedPods), config.Name, strings.Join(deletedPods, ", "))
|
||||
podDeletionsCount += len(deletedPods)
|
||||
if podDeletionsCount > config.MaxAllowedPodDeletions {
|
||||
// Number of pods which disappeared is over threshold
|
||||
err := fmt.Errorf("%d pods disappeared for %s: %v", podDeletionsCount, config.Name, strings.Join(deletedPods, ", "))
|
||||
config.RCConfigLog(err.Error())
|
||||
config.RCConfigLog(diff.String(sets.NewString()))
|
||||
return err
|
||||
@ -908,12 +945,22 @@ type TestNodePreparer interface {
|
||||
}
|
||||
|
||||
type PrepareNodeStrategy interface {
|
||||
// Modify pre-created Node objects before the test starts.
|
||||
PreparePatch(node *v1.Node) []byte
|
||||
// Create or modify any objects that depend on the node before the test starts.
|
||||
// Caller will re-try when http.StatusConflict error is returned.
|
||||
PrepareDependentObjects(node *v1.Node, client clientset.Interface) error
|
||||
// Clean up any node modifications after the test finishes.
|
||||
CleanupNode(node *v1.Node) *v1.Node
|
||||
// Clean up any objects that depend on the node after the test finishes.
|
||||
// Caller will re-try when http.StatusConflict error is returned.
|
||||
CleanupDependentObjects(nodeName string, client clientset.Interface) error
|
||||
}
|
||||
|
||||
type TrivialNodePrepareStrategy struct{}
|
||||
|
||||
var _ PrepareNodeStrategy = &TrivialNodePrepareStrategy{}
|
||||
|
||||
func (*TrivialNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
|
||||
return []byte{}
|
||||
}
|
||||
@ -923,11 +970,21 @@ func (*TrivialNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
return &nodeCopy
|
||||
}
|
||||
|
||||
func (*TrivialNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type LabelNodePrepareStrategy struct {
|
||||
labelKey string
|
||||
labelValue string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{}
|
||||
|
||||
func NewLabelNodePrepareStrategy(labelKey string, labelValue string) *LabelNodePrepareStrategy {
|
||||
return &LabelNodePrepareStrategy{
|
||||
labelKey: labelKey,
|
||||
@ -949,6 +1006,148 @@ func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
return nodeCopy
|
||||
}
|
||||
|
||||
func (*LabelNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*LabelNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeAllocatableStrategy fills node.status.allocatable and csiNode.spec.drivers[*].allocatable.
|
||||
// csiNode is created if it does not exist. On cleanup, any csiNode.spec.drivers[*].allocatable is
|
||||
// set to nil.
|
||||
type NodeAllocatableStrategy struct {
|
||||
// Node.status.allocatable to fill to all nodes.
|
||||
nodeAllocatable map[v1.ResourceName]string
|
||||
// Map <driver_name> -> VolumeNodeResources to fill into csiNode.spec.drivers[<driver_name>].
|
||||
csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources
|
||||
// List of in-tree volume plugins migrated to CSI.
|
||||
migratedPlugins []string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &NodeAllocatableStrategy{}
|
||||
|
||||
func NewNodeAllocatableStrategy(nodeAllocatable map[v1.ResourceName]string, csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources, migratedPlugins []string) *NodeAllocatableStrategy {
|
||||
return &NodeAllocatableStrategy{nodeAllocatable, csiNodeAllocatable, migratedPlugins}
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte {
|
||||
newNode := node.DeepCopy()
|
||||
for name, value := range s.nodeAllocatable {
|
||||
newNode.Status.Allocatable[name] = resource.MustParse(value)
|
||||
}
|
||||
|
||||
oldJSON, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
newJSON, err := json.Marshal(newNode)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(oldJSON, newJSON, v1.Node{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
for name := range s.nodeAllocatable {
|
||||
delete(nodeCopy.Status.Allocatable, name)
|
||||
}
|
||||
return nodeCopy
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientset.Interface) error {
|
||||
csiNode := &storagev1beta1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Annotations: map[string]string{
|
||||
v1.MigratedPluginsAnnotationKey: strings.Join(s.migratedPlugins, ","),
|
||||
},
|
||||
},
|
||||
Spec: storagev1beta1.CSINodeSpec{
|
||||
Drivers: []storagev1beta1.CSINodeDriver{},
|
||||
},
|
||||
}
|
||||
|
||||
for driver, allocatable := range s.csiNodeAllocatable {
|
||||
d := storagev1beta1.CSINodeDriver{
|
||||
Name: driver,
|
||||
Allocatable: allocatable,
|
||||
NodeID: nodeName,
|
||||
}
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
|
||||
}
|
||||
|
||||
_, err := client.StorageV1beta1().CSINodes().Create(csiNode)
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
// Something created CSINode instance after we checked it did not exist.
|
||||
// Make the caller to re-try PrepareDependentObjects by returning Conflict error
|
||||
err = apierrs.NewConflict(storagev1beta1.Resource("csinodes"), nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, client clientset.Interface) error {
|
||||
for driverName, allocatable := range s.csiNodeAllocatable {
|
||||
found := false
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
found = true
|
||||
csiNode.Spec.Drivers[i].Allocatable = allocatable
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
d := storagev1beta1.CSINodeDriver{
|
||||
Name: driverName,
|
||||
Allocatable: allocatable,
|
||||
}
|
||||
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
|
||||
}
|
||||
}
|
||||
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.migratedPlugins, ",")
|
||||
|
||||
_, err := client.StorageV1beta1().CSINodes().Update(csiNode)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return s.createCSINode(node.Name, client)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return s.updateCSINode(csiNode, client)
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for driverName := range s.csiNodeAllocatable {
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
csiNode.Spec.Drivers[i].Allocatable = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.updateCSINode(csiNode, client)
|
||||
}
|
||||
|
||||
func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
patch := strategy.PreparePatch(node)
|
||||
@ -957,17 +1156,34 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
}
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
if _, err = client.CoreV1().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil {
|
||||
return nil
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("To many conflicts when applying patch %v to Node %v", string(patch), node.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when applying patch %v to Node %v: %s", string(patch), node.Name, err)
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
if err = strategy.PrepareDependentObjects(node, client); err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error while preparing objects for node %s: %s", node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when creating objects for node %s: %s", node.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -978,14 +1194,31 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
|
||||
return nil
|
||||
}
|
||||
if _, err = client.CoreV1().Nodes().Update(updatedNode); err == nil {
|
||||
return nil
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("To many conflicts when trying to cleanup Node %v", nodeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when trying to cleanup Node %v: %s", nodeName, err)
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
err = strategy.CleanupDependentObjects(nodeName, client)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error when cleaning up Node %v objects: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when trying to cleanup Node %v objects: %s", nodeName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TestPodCreateStrategy func(client clientset.Interface, namespace string, podCount int) error
|
||||
@ -1077,6 +1310,70 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe
|
||||
return createError
|
||||
}
|
||||
|
||||
func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int) error {
|
||||
var createError error
|
||||
lock := sync.Mutex{}
|
||||
createPodFunc := func(i int) {
|
||||
pvcName := fmt.Sprintf("pvc-%d", i)
|
||||
|
||||
// pv
|
||||
pv := factory(i)
|
||||
// bind to "pvc-$i"
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
Namespace: namespace,
|
||||
Name: pvcName,
|
||||
APIVersion: "v1",
|
||||
}
|
||||
pv.Status.Phase = v1.VolumeBound
|
||||
if err := CreatePersistentVolumeWithRetries(client, pv); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error creating PV: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// pvc
|
||||
pvc := claimTemplate.DeepCopy()
|
||||
pvc.Name = pvcName
|
||||
// bind to "pv-$i"
|
||||
pvc.Spec.VolumeName = pv.Name
|
||||
pvc.Status.Phase = v1.ClaimBound
|
||||
if err := CreatePersistentVolumeClaimWithRetries(client, namespace, pvc); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error creating PVC: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// pod
|
||||
pod := podTemplate.DeepCopy()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "vol",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvcName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := makeCreatePod(client, namespace, pod); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if count < 30 {
|
||||
workqueue.ParallelizeUntil(context.TODO(), count, count, createPodFunc)
|
||||
} else {
|
||||
workqueue.ParallelizeUntil(context.TODO(), 30, count, createPodFunc)
|
||||
}
|
||||
return createError
|
||||
}
|
||||
|
||||
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error {
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1105,6 +1402,14 @@ func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
}
|
||||
}
|
||||
|
||||
// volumeFactory creates an unique PersistentVolume for given integer.
|
||||
type volumeFactory func(uniqueID int) *v1.PersistentVolume
|
||||
|
||||
func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
return func(client clientset.Interface, namespace string, podCount int) error {
|
||||
return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factory, podTemplate, podCount)
|
||||
}
|
||||
}
|
||||
func NewSimpleCreatePodStrategy() TestPodCreateStrategy {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1249,6 +1554,13 @@ func attachConfigMaps(template *v1.PodTemplateSpec, configMapNames []string) {
|
||||
template.Spec.Containers[0].VolumeMounts = mounts
|
||||
}
|
||||
|
||||
func (config *RCConfig) getTerminationGracePeriodSeconds(defaultGrace *int64) *int64 {
|
||||
if config.TerminationGracePeriodSeconds == nil || *config.TerminationGracePeriodSeconds < 0 {
|
||||
return defaultGrace
|
||||
}
|
||||
return config.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
func attachServiceAccountTokenProjection(template *v1.PodTemplateSpec, name string) {
|
||||
template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
@ -1382,7 +1694,7 @@ func (config *DaemonConfig) Run() error {
|
||||
return running == len(nodes.Items), nil
|
||||
})
|
||||
if err != nil {
|
||||
config.LogFunc("Timed out while waiting for DaemonsSet %v/%v to be running.", config.Namespace, config.Name)
|
||||
config.LogFunc("Timed out while waiting for DaemonSet %v/%v to be running.", config.Namespace, config.Name)
|
||||
} else {
|
||||
config.LogFunc("Created Daemon %v/%v", config.Namespace, config.Name)
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/test/utils/update_resources.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/utils/update_resources.go
generated
vendored
@ -22,13 +22,13 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/kubectl/pkg/scale"
|
||||
)
|
||||
|
||||
const (
|
||||
// Parameters for retrying updates/waits with linear backoff.
|
||||
// TODO: Try to move this to exponential backoff by modifying kubectl.Scale().
|
||||
// TODO: Try to move this to exponential backoff by modifying scale.Scale().
|
||||
updateRetryInterval = 5 * time.Second
|
||||
updateRetryTimeout = 1 * time.Minute
|
||||
waitRetryInterval = 5 * time.Second
|
||||
@ -45,17 +45,17 @@ func RetryErrorCondition(condition wait.ConditionFunc) wait.ConditionFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func ScaleResourceWithRetries(scalesGetter scale.ScalesGetter, namespace, name string, size uint, gr schema.GroupResource) error {
|
||||
scaler := kubectl.NewScaler(scalesGetter)
|
||||
preconditions := &kubectl.ScalePrecondition{
|
||||
func ScaleResourceWithRetries(scalesGetter scaleclient.ScalesGetter, namespace, name string, size uint, gvr schema.GroupVersionResource) error {
|
||||
scaler := scale.NewScaler(scalesGetter)
|
||||
preconditions := &scale.ScalePrecondition{
|
||||
Size: -1,
|
||||
ResourceVersion: "",
|
||||
}
|
||||
waitForReplicas := kubectl.NewRetryParams(waitRetryInterval, waitRetryTimeout)
|
||||
cond := RetryErrorCondition(kubectl.ScaleCondition(scaler, preconditions, namespace, name, size, nil, gr))
|
||||
waitForReplicas := scale.NewRetryParams(waitRetryInterval, waitRetryTimeout)
|
||||
cond := RetryErrorCondition(scale.ScaleCondition(scaler, preconditions, namespace, name, size, nil, gvr))
|
||||
err := wait.PollImmediate(updateRetryInterval, updateRetryTimeout, cond)
|
||||
if err == nil {
|
||||
err = kubectl.WaitForScaleHasDesiredReplicas(scalesGetter, gr, name, namespace, size, waitForReplicas)
|
||||
err = scale.WaitForScaleHasDesiredReplicas(scalesGetter, gvr.GroupResource(), name, namespace, size, waitForReplicas)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while scaling %s to %d replicas: %v", name, size, err)
|
||||
|
Reference in New Issue
Block a user