rebase: update kubernetes to 1.28.0 in main

updating kubernetes to 1.28.0
in the main repo.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2023-08-17 07:15:28 +02:00
committed by mergify[bot]
parent b2fdc269c3
commit ff3e84ad67
706 changed files with 45252 additions and 16346 deletions

View File

@ -21,11 +21,10 @@ import (
"fmt"
"time"
"github.com/davecgh/go-spew/spew"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/dump"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -37,7 +36,7 @@ type LogfFn func(format string, args ...interface{})
func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, logf LogfFn) {
if newRS != nil {
logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS))
logf("New ReplicaSet %q of Deployment %q:\n%s", newRS.Name, deployment.Name, dump.Pretty(*newRS))
} else {
logf("New ReplicaSet of Deployment %q is nil.", deployment.Name)
}
@ -45,7 +44,7 @@ func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.R
logf("All old ReplicaSets of Deployment %q:", deployment.Name)
}
for i := range allOldRSs {
logf(spew.Sprintf("%+v", *allOldRSs[i]))
logf(dump.Pretty(*allOldRSs[i]))
}
}
@ -65,7 +64,7 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsL
if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) {
availability = "available"
}
logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod))
logf("Pod %q is %s:\n%s", pod.Name, availability, dump.Pretty(pod))
}
}

View File

@ -173,8 +173,6 @@ const (
DistrolessIptables
// Etcd image
Etcd
// GlusterDynamicProvisioner image
GlusterDynamicProvisioner
// Httpd image
Httpd
// HttpdNew image
@ -226,8 +224,6 @@ const (
VolumeNFSServer
// VolumeISCSIServer image
VolumeISCSIServer
// VolumeGlusterServer image
VolumeGlusterServer
// VolumeRBDServer image
VolumeRBDServer
// WindowsServer image
@ -236,7 +232,7 @@ const (
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
configs := map[ImageID]Config{}
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.43"}
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"}
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
@ -244,10 +240,9 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.3"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.7-0"}
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"}
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
@ -273,9 +268,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.13"}
configs[SdDummyExporter] = Config{list.GcRegistry, "sd-dummy-exporter", "v0.2.0"}
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.3"}
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.3"}
configs[VolumeGlusterServer] = Config{list.PromoterE2eRegistry, "volume/gluster", "1.3"}
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.4"}
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.6"}
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.6"}
configs[WindowsServer] = Config{list.MicrosoftRegistry, "windows", "1809"}
// This adds more config entries. Those have no pre-defined ImageID number,

View File

@ -27,6 +27,11 @@ import (
// GetK8sRootDir returns the root directory for kubernetes, if present in the gopath.
func GetK8sRootDir() (string, error) {
dir := os.Getenv("KUBE_ROOT")
if len(dir) > 0 {
return dir, nil
}
dir, err := RootDir()
if err != nil {
return "", err
@ -59,12 +64,17 @@ func RootDir() (string, error) {
}
// GetK8sBuildOutputDir returns the build output directory for k8s
func GetK8sBuildOutputDir() (string, error) {
// For dockerized build, targetArch (eg: 'linux/arm64', 'linux/amd64') must be explicitly specified
// For non dockerized build, targetArch is ignored
func GetK8sBuildOutputDir(isDockerizedBuild bool, targetArch string) (string, error) {
k8sRoot, err := GetK8sRootDir()
if err != nil {
return "", err
}
buildOutputDir := filepath.Join(k8sRoot, "_output/local/go/bin")
if isDockerizedBuild {
buildOutputDir = filepath.Join(k8sRoot, "_output/dockerized/bin/", targetArch)
}
if _, err := os.Stat(buildOutputDir); err != nil {
return "", err
}

View File

@ -183,8 +183,11 @@ type RCConfig struct {
ServiceAccountTokenProjections int
//Additional containers to run in the pod
// Additional containers to run in the pod
AdditionalContainers []v1.Container
// Security context for created pods
SecurityContext *v1.SecurityContext
}
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
@ -335,11 +338,12 @@ func (config *DeploymentConfig) create() error {
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
Containers: []v1.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Lifecycle: config.Lifecycle,
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Lifecycle: config.Lifecycle,
SecurityContext: config.SecurityContext,
},
},
},
@ -421,11 +425,12 @@ func (config *ReplicaSetConfig) create() error {
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
Containers: []v1.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Lifecycle: config.Lifecycle,
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Lifecycle: config.Lifecycle,
SecurityContext: config.SecurityContext,
},
},
},
@ -499,10 +504,11 @@ func (config *JobConfig) create() error {
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
Containers: []v1.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Lifecycle: config.Lifecycle,
Name: config.Name,
Image: config.Image,
Command: config.Command,
Lifecycle: config.Lifecycle,
SecurityContext: config.SecurityContext,
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
@ -612,12 +618,13 @@ func (config *RCConfig) create() error {
Affinity: config.Affinity,
Containers: []v1.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
Lifecycle: config.Lifecycle,
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
Lifecycle: config.Lifecycle,
SecurityContext: config.SecurityContext,
},
},
DNSPolicy: *config.DNSPolicy,