rebase: update kubernetes to v1.25.0

update kubernetes to latest v1.25.0
release.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2022-08-24 07:54:25 +05:30
committed by mergify[bot]
parent f47839d73d
commit e3bf375035
645 changed files with 42507 additions and 9219 deletions

View File

@ -258,7 +258,9 @@ func CopyPodLogs(ctx context.Context, cs clientset.Interface, ns, podName string
// logsForPod starts reading the logs for a certain pod. If the pod has more than one
// container, opts.Container must be set. Reading stops when the context is done.
// The stream includes formatted error messages and ends with
// rpc error: code = Unknown desc = Error: No such container: 41a...
//
// rpc error: code = Unknown desc = Error: No such container: 41a...
//
// when the pod gets deleted while streaming.
func logsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) {
return cs.CoreV1().Pods(ns).GetLogs(pod, opts).Stream(ctx)

View File

@ -48,11 +48,11 @@ import (
// or be built into the binary.
//
// LoadFromManifests has some limitations:
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
// and silently ignored
// - the latest stable API version for each item is used, regardless of what
// is specified in the manifest files
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
// and silently ignored
// - the latest stable API version for each item is used, regardless of what
// is specified in the manifest files
func LoadFromManifests(files ...string) ([]interface{}, error) {
var items []interface{}
err := visitManifests(func(data []byte) error {
@ -115,11 +115,11 @@ func visitManifests(cb func([]byte) error, files ...string) error {
// PatchItems has some limitations:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func PatchItems(f *framework.Framework, driverNamspace *v1.Namespace, items ...interface{}) error {
func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ...interface{}) error {
for _, item := range items {
// Uncomment when debugging the loading and patching of items.
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := patchItemRecursively(f, driverNamspace, item); err != nil {
if err := patchItemRecursively(f, driverNamespace, item); err != nil {
return err
}
}
@ -132,10 +132,10 @@ func PatchItems(f *framework.Framework, driverNamspace *v1.Namespace, items ...i
// It returns either a cleanup function or an error, but never both.
//
// Cleaning up after a test can be triggered in two ways:
// - the test invokes the returned cleanup function,
// usually in an AfterEach
// - the test suite terminates, potentially after
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
// - the test invokes the returned cleanup function,
// usually in an AfterEach
// - the test suite terminates, potentially after
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
//
// PatchItems has the some limitations as LoadFromManifests:
// - only some common items are supported, unknown ones trigger an error

View File

@ -23,7 +23,6 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
@ -34,22 +33,22 @@ import (
//
// All of that is optional, see PatchCSIOptions. Just beware
// that not renaming the CSI driver deployment can be problematic:
// - when multiple tests deploy the driver, they need
// to run sequentially
// - might conflict with manual deployments
// - when multiple tests deploy the driver, they need
// to run sequentially
// - might conflict with manual deployments
//
// This function is written so that it works for CSI driver deployments
// that follow these conventions:
// - driver and provisioner names are identical
// - the driver binary accepts a --drivername parameter
// - the paths inside the container are either fixed
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
// okay) or are specified directly in a parameter (for example,
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
// - driver and provisioner names are identical
// - the driver binary accepts a --drivername parameter
// - the paths inside the container are either fixed
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
// okay) or are specified directly in a parameter (for example,
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
//
// Driver deployments that are different will have to do the patching
// without this function, or skip patching entirely.
func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interface{}) error {
func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object interface{}) error {
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
o.OldDriverName != o.NewDriverName

View File

@ -16,7 +16,7 @@ limitations under the License.
package utils
import "github.com/onsi/ginkgo"
import "github.com/onsi/ginkgo/v2"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {

View File

@ -25,7 +25,7 @@ import (
"path/filepath"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/v2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"

View File

@ -26,7 +26,7 @@ import (
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -49,7 +49,7 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
ns := driverNamespace.Name
podEventLog := ginkgo.GinkgoWriter
var podEventLog io.Writer = ginkgo.GinkgoWriter
var podEventLogCloser io.Closer
to := podlogs.LogOutput{
StatusWriter: ginkgo.GinkgoWriter,
@ -57,14 +57,17 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
if framework.TestContext.ReportDir == "" {
to.LogWriter = ginkgo.GinkgoWriter
} else {
test := ginkgo.CurrentGinkgoTestDescription()
test := ginkgo.CurrentSpecReport()
// Clean up each individual component text such that
// it contains only characters that are valid as file
// name.
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
var components []string
for _, component := range test.ComponentTexts {
components = append(components, reg.ReplaceAllString(component, "_"))
var testName []string
for _, text := range test.ContainerHierarchyTexts {
testName = append(testName, reg.ReplaceAllString(text, "_"))
if len(test.LeafNodeText) > 0 {
testName = append(testName, reg.ReplaceAllString(test.LeafNodeText, "_"))
}
}
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
@ -74,7 +77,7 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
// keeps each directory name smaller (the full test
// name at one point exceeded 256 characters, which was
// too much for some filesystems).
logDir := framework.TestContext.ReportDir + "/" + strings.Join(components, "/")
logDir := framework.TestContext.ReportDir + "/" + strings.Join(testName, "/")
to.LogPathPrefix = logDir + "/"
err := os.MkdirAll(logDir, 0755)

View File

@ -21,7 +21,7 @@ import (
"fmt"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

View File

@ -28,18 +28,16 @@ import (
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -65,11 +63,6 @@ const (
maxValidSize string = "10Ei"
)
const (
// ClusterRole name for e2e test Priveledged Pod Security Policy User
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
@ -417,54 +410,6 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa
return pod
}
// PrivilegedTestPSPClusterRoleBinding test Pod Security Policy Role bindings
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
namespace string,
teardown bool,
saNames []string) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
roleBindingClient := client.RbacV1().RoleBindings(namespace)
for _, saName := range saNames {
ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "psp-" + saName,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: podSecurityPolicyPrivilegedClusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
roleBindingClient.Delete(context.TODO(), binding.GetName(), metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
_, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{})
return apierrors.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for RBAC binding %s deletion: %v", binding.GetName(), err)
if teardown {
continue
}
_, err = roleBindingClient.Create(context.TODO(), binding, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
func isSudoPresent(nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)