rebase: update kubernetes dep to 1.24.0

As kubernetes 1.24.0 is released, updating
kubernetes dependencies to 1.24.0

updates: #3086

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2022-05-05 08:17:06 +05:30
committed by mergify[bot]
parent fc1529f268
commit c4f79d455f
959 changed files with 80055 additions and 27456 deletions

View File

@ -86,16 +86,6 @@ rules:
- k8s.io/kubernetes/pkg/kubelet/config
- k8s.io/kubernetes/pkg/kubelet/configmap
- k8s.io/kubernetes/pkg/kubelet/container
- k8s.io/kubernetes/pkg/kubelet/dockershim
- k8s.io/kubernetes/pkg/kubelet/dockershim/cm
- k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
- k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/network
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/remote
- k8s.io/kubernetes/pkg/kubelet/envvars
- k8s.io/kubernetes/pkg/kubelet/eviction
- k8s.io/kubernetes/pkg/kubelet/eviction/api
@ -197,6 +187,9 @@ rules:
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util
- k8s.io/kubernetes/pkg/scheduler/volumebinder
- k8s.io/kubernetes/pkg/scheduler
- k8s.io/kubernetes/pkg/scheduler/profile
- k8s.io/kubernetes/pkg/scheduler/testing
- k8s.io/kubernetes/pkg/security/apparmor
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
@ -249,6 +242,7 @@ rules:
- k8s.io/kubernetes/pkg/scheduler/internal/cache
- selectorRegexp: k8s[.]io/kubernetes/test/
allowedPrefixes:
- k8s.io/kubernetes/test/e2e/common
- k8s.io/kubernetes/test/e2e/framework
- k8s.io/kubernetes/test/e2e/framework/auth
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper

View File

@ -1,23 +1,22 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-testing-approvers
- timothysc
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
- SataQiu
- sig-testing-approvers
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
- SataQiu
reviewers:
- sig-testing-reviewers
- timothysc
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
- SataQiu
- alejandrox1
- sig-testing-reviewers
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
- SataQiu
labels:
- area/e2e-test-framework
- area/e2e-test-framework
emeritus_approvers:
- timothysc

View File

@ -23,7 +23,7 @@ import (
"net/url"
"strings"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
@ -75,7 +75,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
}, scheme.ParameterCodec)
var stdout, stderr bytes.Buffer
Logf("ExecWithOptions: execute(POST %s %s)", req.URL())
Logf("ExecWithOptions: execute(POST %s)", req.URL())
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
if options.PreserveWhitespace {
return stdout.String(), stderr.String(), err

View File

@ -24,8 +24,8 @@ package framework
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"strings"
"sync"
@ -47,6 +47,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -78,11 +79,12 @@ type Framework struct {
ScalesGetter scaleclient.ScalesGetter
SkipNamespaceCreation bool // Whether to skip creating a namespace
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
namespacesToDelete []*v1.Namespace // Some tests have more than one.
NamespaceDeletionTimeout time.Duration
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
SkipNamespaceCreation bool // Whether to skip creating a namespace
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
namespacesToDelete []*v1.Namespace // Some tests have more than one.
NamespaceDeletionTimeout time.Duration
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
gatherer *ContainerResourceGatherer
// Constraints that passed to a check which is executed after data is gathered to
@ -328,7 +330,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
if err := os.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
@ -345,7 +347,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
Logf("Writing to %s", filePath)
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
if err := os.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
@ -521,6 +523,23 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
if createTestingNS == nil {
createTestingNS = CreateTestingNS
}
if labels == nil {
labels = make(map[string]string)
} else {
labelsCopy := make(map[string]string)
for k, v := range labels {
labelsCopy[k] = v
}
labels = labelsCopy
}
enforceLevel := admissionapi.LevelRestricted
if f.NamespacePodSecurityEnforceLevel != "" {
enforceLevel = f.NamespacePodSecurityEnforceLevel
}
labels[admissionapi.EnforceLevelLabel] = string(enforceLevel)
ns, err := createTestingNS(baseName, f.ClientSet, labels)
// check ns instead of err to see if it's nil as we may
// fail to create serviceAccount in it.

View File

@ -142,10 +142,6 @@ func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, p
}
}
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
// TODO(mauriciopoppe): remove this statement once we add `sync` to the test image, ref #101172
if e2epod.NodeOSDistroIs("windows") {
command = fmt.Sprintf("echo '%s' > '%s';", contents, path)
}
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
if err != nil {
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))

View File

@ -19,7 +19,7 @@ package metrics
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"sort"
"strconv"
@ -73,7 +73,7 @@ func GrabKubeletMetricsWithoutProxy(nodeName, path string) (KubeletMetrics, erro
return KubeletMetrics{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return KubeletMetrics{}, err
}

View File

@ -25,16 +25,11 @@ import (
utilpointer "k8s.io/utils/pointer"
)
// PreconfiguredRuntimeClassHandler returns configured runtime handler.
func PreconfiguredRuntimeClassHandler(handler string) string {
if handler == "docker" {
return handler
}
// test-handler is the name of the runtime handler that is expected to be
// preconfigured in the test environment.
return "test-handler"
}
const (
// PreconfiguredRuntimeClassHandler is the name of the runtime handler
// that is expected to be preconfigured in the test environment.
PreconfiguredRuntimeClassHandler = "test-handler"
)
// NewRuntimeClassPod returns a test pod with the given runtimeClassName
func NewRuntimeClassPod(runtimeClassName string) *v1.Pod {

View File

@ -34,7 +34,7 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const etcdImage = "3.5.1-0"
const etcdImage = "3.5.3-0"
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {

View File

@ -20,7 +20,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io"
"net"
"net/http"
"regexp"
@ -116,7 +116,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
}
errorStream.Close()
go func() {
message, err := ioutil.ReadAll(errorStream)
message, err := io.ReadAll(errorStream)
switch {
case err != nil:
klog.ErrorS(err, "error reading from error stream")

View File

@ -576,6 +576,28 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
return execPod
}
// WithWindowsHostProcess sets the Pod's Windows HostProcess option to true. When this option is set,
// HostNetwork can be enabled.
// Containers running as HostProcess will require certain usernames to be set, otherwise the Pod will
// not start: NT AUTHORITY\SYSTEM, NT AUTHORITY\Local service, NT AUTHORITY\NetworkService.
// If the given username is empty, NT AUTHORITY\SYSTEM will be used instead.
// See: https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/
func WithWindowsHostProcess(pod *v1.Pod, username string) {
if pod.Spec.SecurityContext == nil {
pod.Spec.SecurityContext = &v1.PodSecurityContext{}
}
if pod.Spec.SecurityContext.WindowsOptions == nil {
pod.Spec.SecurityContext.WindowsOptions = &v1.WindowsSecurityContextOptions{}
}
trueVar := true
if username == "" {
username = "NT AUTHORITY\\SYSTEM"
}
pod.Spec.SecurityContext.WindowsOptions.HostProcess = &trueVar
pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = &username
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.

View File

@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
)
// NodeOSDistroIs returns true if the distro is the same as `--node-os-distro`
@ -113,3 +114,19 @@ func GetLinuxLabel() *v1.SELinuxOptions {
return &v1.SELinuxOptions{
Level: "s0:c0,c1"}
}
// GetRestrictedPodSecurityContext returns a minimal restricted pod security context.
func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
return &v1.PodSecurityContext{
RunAsNonRoot: pointer.BoolPtr(true),
SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault},
}
}
// GetRestrictedContainerSecurityContext returns a minimal restricted container security context.
func GetRestrictedContainerSecurityContext() *v1.SecurityContext {
return &v1.SecurityContext{
AllowPrivilegeEscalation: pointer.BoolPtr(false),
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
}
}

View File

@ -561,7 +561,7 @@ func WaitForPodFailedReason(c clientset.Interface, pod *v1.Pod, reason string, t
return false, nil
})
if waitErr != nil {
return fmt.Errorf("error waiting for pod SysctlForbidden status: %v", waitErr)
return fmt.Errorf("error waiting for pod failure status: %v", waitErr)
}
return nil
}

View File

@ -21,7 +21,7 @@ import (
"os"
"sync"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
)
@ -97,6 +97,9 @@ type ProviderInterface interface {
CreatePD(zone string) (string, error)
DeletePD(pdName string) error
CreateShare() (string, string, string, error)
DeleteShare(accountName, shareName string) error
CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error)
DeletePVSource(pvSource *v1.PersistentVolumeSource) error
@ -137,6 +140,14 @@ func (n NullProvider) DeleteNode(node *v1.Node) error {
return fmt.Errorf("provider does not support DeleteNode")
}
func (n NullProvider) CreateShare() (string, string, string, error) {
return "", "", "", fmt.Errorf("provider does not support volume creation")
}
func (n NullProvider) DeleteShare(accountName, shareName string) error {
return fmt.Errorf("provider does not support volume deletion")
}
// CreatePD is a base implementation which creates PD.
func (n NullProvider) CreatePD(zone string) (string, error) {
return "", fmt.Errorf("provider does not support volume creation")

View File

@ -664,15 +664,23 @@ func createPDWithRetry(zone string) (string, error) {
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
newDiskName, err = createPD(zone)
if err != nil {
framework.Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
framework.Logf("Couldn't create a new PD in zone %q, sleeping 5 seconds: %v", zone, err)
continue
}
framework.Logf("Successfully created a new PD: %q.", newDiskName)
framework.Logf("Successfully created a new PD in zone %q: %q.", zone, newDiskName)
return newDiskName, nil
}
return "", err
}
func CreateShare() (string, string, string, error) {
return framework.TestContext.CloudConfig.Provider.CreateShare()
}
func DeleteShare(accountName, shareName string) error {
return framework.TestContext.CloudConfig.Provider.DeleteShare(accountName, shareName)
}
// CreatePDWithRetry creates PD with retry.
func CreatePDWithRetry() (string, error) {
return createPDWithRetry("")

View File

@ -272,16 +272,6 @@ func SkipIfAppArmorNotSupported() {
SkipUnlessNodeOSDistroIs(AppArmorDistros...)
}
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
func RunIfContainerRuntimeIs(runtimes ...string) {
for _, containerRuntime := range runtimes {
if containerRuntime == framework.TestContext.ContainerRuntime {
return
}
}
skipInternalf(1, "Skipped because container runtime %q is not in %s", framework.TestContext.ContainerRuntime, runtimes)
}
// RunIfSystemSpecNameIs runs if the system spec name is included in the names.
func RunIfSystemSpecNameIs(names ...string) {
for _, name := range names {
@ -313,3 +303,10 @@ func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(componentName string, c
skipInternalf(1, "Skipped because client failed to delete component:%s pod, err:%v", componentName, err)
}
}
// SkipIfIPv6 skips if the cluster IP family is IPv6 and the provider is included in the unsupportedProviders.
func SkipIfIPv6(unsupportedProviders ...string) {
if framework.TestContext.ClusterIsIPv6() && framework.ProviderIs(unsupportedProviders...) {
skipInternalf(1, "Not supported for IPv6 clusters and providers %v (found %s)", unsupportedProviders, framework.TestContext.Provider)
}
}

View File

@ -20,7 +20,6 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
@ -102,7 +101,7 @@ func GetSigner(provider string) (ssh.Signer, error) {
}
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
buffer, err := ioutil.ReadFile(key)
buffer, err := os.ReadFile(key)
if err != nil {
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err)
}
@ -245,7 +244,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i
err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
if client, err = ssh.Dial("tcp", host, config); err != nil {
return false, err
return false, nil // retrying, error will be logged above
}
return true, nil
})

View File

@ -22,7 +22,6 @@ import (
"errors"
"flag"
"fmt"
"io/ioutil"
"math"
"os"
"sort"
@ -78,13 +77,12 @@ type TestContextType struct {
KubeConfig string
KubeContext string
KubeAPIContentType string
KubeVolumeDir string
KubeletRootDir string
CertDir string
Host string
BearerToken string `datapolicy:"token"`
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
RepoRoot string
DockershimCheckpointDir string
RepoRoot string
// ListImages will list off all images that are used then quit
ListImages bool
@ -109,7 +107,6 @@ type TestContextType struct {
EtcdUpgradeStorage string
EtcdUpgradeVersion string
GCEUpgradeScript string
ContainerRuntime string
ContainerRuntimeEndpoint string
ContainerRuntimeProcessName string
ContainerRuntimePidFile string
@ -308,15 +305,17 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flags.Var(cliflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
flags.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).")
flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/containerd/containerd.sock", "The container runtime endpoint of cluster VM instances.")
flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
flags.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.")
flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.")
flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
flags.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
// Expect the test suite to work with both the new and legacy non-blocking control plane taints by default
// TODO: remove the node-role.kubernetes.io/master taint in 1.25 or later.
// The change will likely require an action for some users that do not
// use k8s originated tools like kubeadm or kOps for creating clusters
// and taint their control plane nodes with "master", expecting the test
// suite to work with this legacy non-blocking taint.
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests. The default taint 'node-role.kubernetes.io/master' is DEPRECATED and will be removed from the list in a future release.")
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.")
@ -338,7 +337,8 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
flags.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver")
flags.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
flags.StringVar(&TestContext.KubeletRootDir, "kubelet-root-dir", "/var/lib/kubelet", "The data directory of kubelet. Some tests (for example, CSI storage tests) deploy DaemonSets which need to know this value and cannot query it. Such tests only work in clusters where the data directory is the same on all nodes.")
flags.StringVar(&TestContext.KubeletRootDir, "volume-dir", "/var/lib/kubelet", "An alias for --kubelet-root-dir, kept for backwards compatibility.")
flags.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
flags.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
// NOTE: Node E2E tests have this flag defined as well, but true by default.
@ -450,7 +450,7 @@ func AfterReadingAllFlags(t *TestContextType) {
if len(t.Host) == 0 && len(t.KubeConfig) == 0 {
// Check if we can use the in-cluster config
if clusterConfig, err := restclient.InClusterConfig(); err == nil {
if tempFile, err := ioutil.TempFile(os.TempDir(), "kubeconfig-"); err == nil {
if tempFile, err := os.CreateTemp(os.TempDir(), "kubeconfig-"); err == nil {
kubeConfig := createKubeConfig(clusterConfig)
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
t.KubeConfig = tempFile.Name()

View File

@ -29,7 +29,6 @@ import (
"errors"
"fmt"
"io/fs"
"io/ioutil"
"os"
"path"
"path/filepath"
@ -82,12 +81,12 @@ func Read(filePath string) ([]byte, error) {
}
// Here we try to generate an error that points test authors
// or users in the right direction for resolving the problem.
error := fmt.Sprintf("Test file %q was not found.\n", filePath)
err := fmt.Sprintf("Test file %q was not found.\n", filePath)
for _, filesource := range filesources {
error += filesource.DescribeFiles()
error += "\n"
err += filesource.DescribeFiles()
err += "\n"
}
return nil, errors.New(error)
return nil, errors.New(err)
}
// Exists checks whether a file could be read. Unexpected errors
@ -122,7 +121,7 @@ func (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) {
} else {
fullPath = filepath.Join(r.Root, filePath)
}
data, err := ioutil.ReadFile(fullPath)
data, err := os.ReadFile(fullPath)
if os.IsNotExist(err) {
// Not an error (yet), some other provider may have the file.
return nil, nil

View File

@ -26,6 +26,7 @@ const (
podDeleteTimeout = 5 * time.Minute
claimProvisionTimeout = 5 * time.Minute
claimProvisionShortTimeout = 1 * time.Minute
dataSourceProvisionTimeout = 5 * time.Minute
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
@ -56,6 +57,9 @@ type TimeoutContext struct {
// ClaimProvision is how long claims have to become dynamically provisioned.
ClaimProvision time.Duration
// DataSourceProvision is how long claims have to become dynamically provisioned from source claim.
DataSourceProvision time.Duration
// ClaimProvisionShort is the same as `ClaimProvision`, but shorter.
ClaimProvisionShort time.Duration
@ -96,6 +100,7 @@ func NewTimeoutContextWithDefaults() *TimeoutContext {
PodDelete: podDeleteTimeout,
ClaimProvision: claimProvisionTimeout,
ClaimProvisionShort: claimProvisionShortTimeout,
DataSourceProvision: dataSourceProvisionTimeout,
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,

View File

@ -1,11 +1,9 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-storage-approvers
- rootfs
- jingxu97
- sig-storage-approvers
- jingxu97
reviewers:
- sig-storage-reviewers
- jeffvance
- copejon
- davidz627
- sig-storage-reviewers
emeritus_approvers:
- rootfs

View File

@ -41,6 +41,7 @@ package volume
import (
"context"
"crypto/sha256"
"fmt"
"path/filepath"
"strconv"
@ -51,6 +52,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
@ -235,6 +237,73 @@ func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod
return pod, ip
}
// GetVolumeAttachmentName returns the hash value of the provisioner, the config ClientNodeSelection name,
// and the VolumeAttachment name of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
var nodeName string
// For provisioning tests, ClientNodeSelection is not set so we do not know the NodeName of the VolumeAttachment of the PV that is
// bound to the PVC with the passed in claimName and claimNamespace. We need this NodeName because it is used to generate the
// attachmentName that is returned, and used to look up a certain VolumeAttachment in WaitForVolumeAttachmentTerminated.
// To get the nodeName of the VolumeAttachment, we get all the VolumeAttachments, look for the VolumeAttachment with a
// PersistentVolumeName equal to the PV that is bound to the passed in PVC, and then we get the NodeName from that VolumeAttachment.
if config.ClientNodeSelection.Name == "" {
claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
pvName := claim.Spec.VolumeName
volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
for _, volumeAttachment := range volumeAttachments.Items {
if *volumeAttachment.Spec.Source.PersistentVolumeName == pvName {
nodeName = volumeAttachment.Spec.NodeName
break
}
}
} else {
nodeName = config.ClientNodeSelection.Name
}
handle := getVolumeHandle(cs, claimName, claimNamespace)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, provisioner, nodeName)))
return fmt.Sprintf("csi-%x", attachmentHash)
}
// getVolumeHandle returns the VolumeHandle of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace string) string {
// re-get the claim to the latest state with bound volume
claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
}
if pv.Spec.CSI == nil {
gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
return ""
}
return pv.Spec.CSI.VolumeHandle
}
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
if waitErr != nil {
return fmt.Errorf("error waiting volume attachment %v to terminate: %v", attachmentName, waitErr)
}
return nil
}
// startVolumeServer starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.

View File

@ -27,8 +27,10 @@ import (
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
@ -269,17 +271,19 @@ func describeItem(item interface{}) string {
var errorItemNotSupported = errors.New("not supported")
var factories = map[What]ItemFactory{
{"ClusterRole"}: &clusterRoleFactory{},
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
{"CSIDriver"}: &csiDriverFactory{},
{"DaemonSet"}: &daemonSetFactory{},
{"Role"}: &roleFactory{},
{"RoleBinding"}: &roleBindingFactory{},
{"Secret"}: &secretFactory{},
{"Service"}: &serviceFactory{},
{"ServiceAccount"}: &serviceAccountFactory{},
{"StatefulSet"}: &statefulSetFactory{},
{"StorageClass"}: &storageClassFactory{},
{"ClusterRole"}: &clusterRoleFactory{},
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
{"CSIDriver"}: &csiDriverFactory{},
{"DaemonSet"}: &daemonSetFactory{},
{"Role"}: &roleFactory{},
{"RoleBinding"}: &roleBindingFactory{},
{"Secret"}: &secretFactory{},
{"Service"}: &serviceFactory{},
{"ServiceAccount"}: &serviceAccountFactory{},
{"StatefulSet"}: &statefulSetFactory{},
{"Deployment"}: &deploymentFactory{},
{"StorageClass"}: &storageClassFactory{},
{"CustomResourceDefinition"}: &customResourceDefinitionFactory{},
}
// PatchName makes the name of some item unique by appending the
@ -362,6 +366,14 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
case *appsv1.Deployment:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
return err
}
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
case *appsv1.DaemonSet:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
@ -370,6 +382,8 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
case *apiextensionsv1.CustomResourceDefinition:
// Do nothing. Patching name to all CRDs won't always be the expected behavior.
default:
return fmt.Errorf("missing support for patching item of type %T", item)
}
@ -528,6 +542,27 @@ func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
}, nil
}
type deploymentFactory struct{}
func (f *deploymentFactory) New() runtime.Object {
return &appsv1.Deployment{}
}
func (*deploymentFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*appsv1.Deployment)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().Deployments(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create Deployment: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type daemonSetFactory struct{}
func (f *daemonSetFactory) New() runtime.Object {
@ -612,6 +647,35 @@ func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interfa
}, nil
}
type customResourceDefinitionFactory struct{}
func (f *customResourceDefinitionFactory) New() runtime.Object {
return &apiextensionsv1.CustomResourceDefinition{}
}
func (*customResourceDefinitionFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
var err error
unstructCRD := &unstructured.Unstructured{}
gvr := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
item, ok := i.(*apiextensionsv1.CustomResourceDefinition)
if !ok {
return nil, errorItemNotSupported
}
unstructCRD.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(i)
if err != nil {
return nil, err
}
if _, err = f.DynamicClient.Resource(gvr).Create(context.TODO(), unstructCRD, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create CustomResourceDefinition: %w", err)
}
return func() error {
return f.DynamicClient.Resource(gvr).Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
// PrettyPrint returns a human-readable representation of an item.
func PrettyPrint(item interface{}) string {
data, err := json.MarshalIndent(item, "", " ")

View File

@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
@ -52,6 +53,10 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
o.OldDriverName != o.NewDriverName
substKubeletRootDir := func(s string) string {
return strings.ReplaceAll(s, "/var/lib/kubelet/", e2eframework.TestContext.KubeletRootDir+"/")
}
patchVolumes := func(volumes []v1.Volume) {
if !rename {
return
@ -65,6 +70,8 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
if file == o.OldDriverName {
*p = path.Join(dir, o.NewDriverName)
}
// Inject non-standard kubelet path.
*p = substKubeletRootDir(*p)
}
}
}
@ -79,6 +86,15 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
container.Args[e] = strings.Replace(container.Args[e], "/"+o.OldDriverName+"/", "/"+o.NewDriverName+"/", 1)
}
}
// Modify --kubelet-registration-path.
for e := range container.Args {
container.Args[e] = substKubeletRootDir(container.Args[e])
}
for e := range container.VolumeMounts {
container.VolumeMounts[e].MountPath = substKubeletRootDir(container.VolumeMounts[e].MountPath)
}
// Overwrite driver name resp. provider name
// by appending a parameter with the right
// value.

View File

@ -0,0 +1,22 @@
# test/e2e/testing-manifests
## Embedded Test Data
In case one needs to use any test fixture inside your tests and those are defined inside this directory, they need to be added to the `//go:embed` directive in `embed.go`.
For example, if one wants to include this Readme as a test fixture (potential bad idea in reality!),
```
// embed.go
...
//go:embed some other files README.md
...
```
This fixture can be accessed in the e2e tests using `test/e2e/framework/testfiles.Read` like
`testfiles.Read("test/e2e/testing-manifests/README.md)`.
This is needed since [migrating to //go:embed from go-bindata][1].
[1]: https://github.com/kubernetes/kubernetes/pull/99829

View File

@ -0,0 +1,21 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: dns-backend
labels:
name: dns-backend
spec:
replicas: 1
selector:
name: dns-backend
template:
metadata:
labels:
name: dns-backend
spec:
containers:
- name: dns-backend
image: k8s.gcr.io/example-dns-backend:v1
ports:
- name: backend-port
containerPort: 8000

View File

@ -0,0 +1,9 @@
kind: Service
apiVersion: v1
metadata:
name: dns-backend
spec:
ports:
- port: 8000
selector:
name: dns-backend

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: dns-frontend
labels:
name: dns-frontend
spec:
containers:
- name: dns-frontend
image: k8s.gcr.io/example-dns-frontend:v1
command:
- python
- client.py
- http://dns-backend.development.svc.cluster.local:8000
imagePullPolicy: Always
restartPolicy: Never

View File

@ -0,0 +1,33 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing_manifests
import (
"embed"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
)
//go:embed cluster-dns flexvolume guestbook kubectl sample-device-plugin.yaml scheduling/nvidia-driver-installer.yaml statefulset storage-csi
var e2eTestingManifestsFS embed.FS
func GetE2ETestingManifestsFS() e2etestfiles.EmbeddedFileSource {
return e2etestfiles.EmbeddedFileSource{
EmbeddedFS: e2eTestingManifestsFS,
Root: "test/e2e/testing-manifests",
}
}

View File

@ -0,0 +1,145 @@
#!/bin/sh
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This driver is especially designed to test a long mounting scenario
# which can cause a volume to be detached while mount is in progress.
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
VALID_MNTDEVICE=foo
# attach always returns one valid mount device so a different device
# showing up in a subsequent driver call implies a bug
validateMountDeviceOrDie() {
MNTDEVICE=$1
CALL=$2
if [ "$MNTDEVICE" != "$VALID_MNTDEVICE" ]; then
log "{\"status\":\"Failure\",\"message\":\"call "${CALL}" expected device "${VALID_MNTDEVICE}", got device "${MNTDEVICE}"\"}"
exit 0
fi
}
log() {
printf "$*" >&1
}
debug() {
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
}
attach() {
debug "attach $@"
log "{\"status\":\"Success\",\"device\":\""${VALID_MNTDEVICE}"\"}"
exit 0
}
detach() {
debug "detach $@"
# TODO issue 44737 detach is passed PV name, not mount device
log "{\"status\":\"Success\"}"
exit 0
}
waitforattach() {
debug "waitforattach $@"
MNTDEVICE=$1
validateMountDeviceOrDie "$MNTDEVICE" "waitforattach"
log "{\"status\":\"Success\",\"device\":\""${MNTDEVICE}"\"}"
exit 0
}
isattached() {
debug "isattached $@"
log "{\"status\":\"Success\",\"attached\":true}"
exit 0
}
domountdevice() {
debug "domountdevice $@"
MNTDEVICE=$2
validateMountDeviceOrDie "$MNTDEVICE" "domountdevice"
MNTPATH=$1
mkdir -p ${MNTPATH} >/dev/null 2>&1
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
sleep 120
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
log "{\"status\":\"Success\"}"
exit 0
}
unmountdevice() {
debug "unmountdevice $@"
MNTPATH=$1
rm "${MNTPATH}/index.html" >/dev/null 2>&1
umount ${MNTPATH} >/dev/null 2>&1
log "{\"status\":\"Success\"}"
exit 0
}
expandvolume() {
debug "expandvolume $@"
log "{\"status\":\"Success\"}"
exit 0
}
expandfs() {
debug "expandfs $@"
log "{\"status\":\"Success\"}"
exit 0
}
op=$1
if [ "$op" = "init" ]; then
debug "init $@"
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":true, \"requiresFSResize\":true}}"
exit 0
fi
shift
case "$op" in
attach)
attach $*
;;
detach)
detach $*
;;
waitforattach)
waitforattach $*
;;
isattached)
isattached $*
;;
mountdevice)
domountdevice $*
;;
unmountdevice)
unmountdevice $*
;;
expandvolume)
expandvolume $*
;;
expandfs)
expandfs $*
;;
*)
log "{\"status\":\"Not supported\"}"
exit 0
esac
exit 1

View File

@ -0,0 +1,70 @@
#!/bin/sh
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This driver implements a tmpfs with a pre-populated file index.html.
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
log() {
printf "$*" >&1
}
debug() {
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
}
domount() {
debug "domount $@"
MNTPATH=$1
mkdir -p ${MNTPATH} >/dev/null 2>&1
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
log "{\"status\":\"Success\"}"
exit 0
}
unmount() {
debug "unmount $@"
MNTPATH=$1
rm ${MNTPATH}/index.html >/dev/null 2>&1
umount ${MNTPATH} >/dev/null 2>&1
log "{\"status\":\"Success\"}"
exit 0
}
op=$1
if [ "$op" = "init" ]; then
debug "init $@"
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":false}}"
exit 0
fi
shift
case "$op" in
mount)
domount $*
;;
unmount)
unmount $*
;;
*)
log "{\"status\":\"Not supported\"}"
exit 0
esac
exit 1

View File

@ -0,0 +1,143 @@
#!/bin/sh
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This driver implements a tmpfs with a pre-populated file index.html.
# Attach is required, but it is a no-op that always returns success.
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
VALID_MNTDEVICE=foo
# attach always returns one valid mount device so a different device
# showing up in a subsequent driver call implies a bug
validateMountDeviceOrDie() {
MNTDEVICE=$1
CALL=$2
if [ "$MNTDEVICE" != "$VALID_MNTDEVICE" ]; then
log "{\"status\":\"Failure\",\"message\":\"call "${CALL}" expected device "${VALID_MNTDEVICE}", got device "${MNTDEVICE}"\"}"
exit 0
fi
}
log() {
printf "$*" >&1
}
debug() {
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
}
attach() {
debug "attach $@"
log "{\"status\":\"Success\",\"device\":\""${VALID_MNTDEVICE}"\"}"
exit 0
}
detach() {
debug "detach $@"
# TODO issue 44737 detach is passed PV name, not mount device
log "{\"status\":\"Success\"}"
exit 0
}
waitforattach() {
debug "waitforattach $@"
MNTDEVICE=$1
validateMountDeviceOrDie "$MNTDEVICE" "waitforattach"
log "{\"status\":\"Success\",\"device\":\""${MNTDEVICE}"\"}"
exit 0
}
isattached() {
debug "isattached $@"
log "{\"status\":\"Success\",\"attached\":true}"
exit 0
}
domountdevice() {
debug "domountdevice $@"
MNTDEVICE=$2
validateMountDeviceOrDie "$MNTDEVICE" "domountdevice"
MNTPATH=$1
mkdir -p ${MNTPATH} >/dev/null 2>&1
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
log "{\"status\":\"Success\"}"
exit 0
}
unmountdevice() {
debug "unmountdevice $@"
MNTPATH=$1
rm "${MNTPATH}/index.html" >/dev/null 2>&1
umount ${MNTPATH} >/dev/null 2>&1
log "{\"status\":\"Success\"}"
exit 0
}
expandvolume() {
debug "expandvolume $@"
log "{\"status\":\"Success\"}"
exit 0
}
expandfs() {
debug "expandfs $@"
log "{\"status\":\"Success\"}"
exit 0
}
op=$1
if [ "$op" = "init" ]; then
debug "init $@"
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":true, \"requiresFSResize\":true}}"
exit 0
fi
shift
case "$op" in
attach)
attach $*
;;
detach)
detach $*
;;
waitforattach)
waitforattach $*
;;
isattached)
isattached $*
;;
mountdevice)
domountdevice $*
;;
unmountdevice)
unmountdevice $*
;;
expandvolume)
expandvolume $*
;;
expandfs)
expandfs $*
;;
*)
log "{\"status\":\"Not supported\"}"
exit 0
esac
exit 1

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: agnhost-primary
spec:
replicas: 1
selector:
matchLabels:
app: agnhost
role: primary
tier: backend
template:
metadata:
labels:
app: agnhost
role: primary
tier: backend
spec:
containers:
- name: primary
image: {{.AgnhostImage}}
args: [ "guestbook", "--http-port", "6379" ]
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: agnhost-primary
labels:
app: agnhost
role: primary
tier: backend
spec:
ports:
- port: 6379
targetPort: 6379
selector:
app: agnhost
role: primary
tier: backend

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: agnhost-replica
spec:
replicas: 2
selector:
matchLabels:
app: agnhost
role: replica
tier: backend
template:
metadata:
labels:
app: agnhost
role: replica
tier: backend
spec:
containers:
- name: replica
image: {{.AgnhostImage}}
args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ]
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: agnhost-replica
labels:
app: agnhost
role: replica
tier: backend
spec:
ports:
- port: 6379
selector:
app: agnhost
role: replica
tier: backend

View File

@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
spec:
replicas: 3
selector:
matchLabels:
app: guestbook
tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: guestbook-frontend
image: {{.AgnhostImage}}
args: [ "guestbook", "--backend-port", "6379" ]
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 80

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
- port: 80
selector:
app: guestbook
tier: frontend

View File

@ -0,0 +1,29 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: frontend
spec:
replicas: 3
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v4
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below:
# value: env
ports:
- containerPort: 80

View File

@ -0,0 +1,26 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-master
labels:
app: redis
role: master
tier: backend
spec:
replicas: 1
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: docker.io/library/redis:5.0.5-alpine
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: redis-slave
labels:
app: redis
role: slave
tier: backend
spec:
replicas: 2
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: docker.io/library/redis:5.0.5-alpine
# We are only implementing the dns option of:
# https://github.com/kubernetes/examples/blob/97c7ed0eb6555a4b667d2877f965d392e00abc45/guestbook/redis-slave/run.sh
command: [ "redis-server", "--slaveof", "redis-master", "6379" ]
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below:
# value: env
ports:
- containerPort: 6379

View File

@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-master
spec:
replicas: 1
selector:
matchLabels:
app: redis
role: master
tier: backend
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: {{.RedisImage}}
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
role: master
tier: backend
spec:
ports:
- port: 6379
targetPort: 6379
selector:
app: redis
role: master
tier: backend

View File

@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-slave
spec:
replicas: 2
selector:
matchLabels:
app: redis
role: slave
tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: {{.RedisImage}}
# We are only implementing the dns option of:
# https://github.com/kubernetes/examples/blob/97c7ed0eb6555a4b667d2877f965d392e00abc45/guestbook/redis-slave/run.sh
command: [ "redis-server", "--slaveof", "redis-master", "6379" ]
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below:
# value: env
ports:
- containerPort: 6379

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
role: slave
tier: backend
spec:
ports:
- port: 6379
selector:
app: redis
role: slave
tier: backend

View File

@ -0,0 +1,40 @@
{
"kind":"ReplicationController",
"apiVersion":"v1",
"metadata":{
"name":"agnhost-primary",
"labels":{
"app":"agnhost",
"role":"primary"
}
},
"spec":{
"replicas":1,
"selector":{
"app":"agnhost",
"role":"primary"
},
"template":{
"metadata":{
"labels":{
"app":"agnhost",
"role":"primary"
}
},
"spec":{
"containers":[
{
"name":"agnhost-primary",
"image": "{{.AgnhostImage}}",
"ports":[
{
"name":"agnhost-server",
"containerPort":6379
}
]
}
]
}
}
}
}

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: Pod
metadata:
labels:
name: agnhost
role: primary
name: agnhost-primary
spec:
containers:
- name: primary
image: k8s.gcr.io/e2e-test-images/agnhost:2.32
env:
- name: PRIMARY
value: "true"
ports:
- containerPort: 6379
resources:
limits:
cpu: "0.1"
volumeMounts:
- mountPath: /agnhost-primary-data
name: data
- name: sentinel
image: k8s.gcr.io/e2e-test-images/agnhost:2.32
env:
- name: SENTINEL
value: "true"
ports:
- containerPort: 26379
volumes:
- name: data
emptyDir: {}

View File

@ -0,0 +1,23 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "agnhost-primary",
"labels": {
"app": "agnhost",
"role": "primary"
}
},
"spec": {
"ports": [
{
"port": 6379,
"targetPort": "agnhost-server"
}
],
"selector": {
"app": "agnhost",
"role": "primary"
}
}
}

View File

@ -0,0 +1,21 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: cronjob-test
spec:
schedule: "*/1 * * * *"
concurrencyPolicy: Allow
suspend: false
startingDeadlineSeconds: 30
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
jobTemplate:
spec:
template:
spec:
containers:
- name: test
image: {{.BusyBoxImage}}
args:
- "/bin/true"
restartPolicy: OnFailure

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
name: busybox1
labels:
app: busybox1
spec:
containers:
- image: {{.BusyBoxImage}}
command: ["/bin/sh", "-c", "mkdir -p /root/foo/bar && echo 'foobar' > /root/foo/bar/foo.bar && sleep 3600"]
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

@ -0,0 +1,19 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpd-deployment
spec:
replicas: 2
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
containers:
- name: httpd
image: {{.HttpdNewImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,18 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpd-deployment
spec:
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
containers:
- name: httpd
image: {{.HttpdNewImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,18 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpd-deployment
spec:
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
containers:
- name: httpd
image: {{.HttpdImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: httpd-rc
spec:
replicas: 1
selector:
run: httpd-rc
template:
metadata:
labels:
run: httpd-rc
spec:
containers:
- image: {{.HttpdNewImage}}
name: httpd-rc

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: pause
labels:
name: pause
spec:
containers:
- name: pause
image: {{.PauseImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
name: httpd
labels:
name: httpd
spec:
containers:
- name: httpd
image: {{.HttpdImage}}
ports:
- containerPort: 80
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
timeoutSeconds: 5

View File

@ -0,0 +1,13 @@
# Copy of pod.yaml without file extension for test
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View File

@ -0,0 +1,14 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: slow
provisioner: kubernetes.io/rbd
parameters:
monitors: 127.0.0.1:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: "kube-system"
pool: kube
userId: kube
userSecretName: ceph-secret-user

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: sample-device-plugin-beta
namespace: kube-system
labels:
k8s-app: sample-device-plugin
spec:
selector:
matchLabels:
k8s-app: sample-device-plugin
template:
metadata:
labels:
k8s-app: sample-device-plugin
annotations:
spec:
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins
- name: plugins-registry-probe-mode
hostPath:
path: /var/lib/kubelet/plugins_registry
- name: dev
hostPath:
path: /dev
containers:
- image: k8s.gcr.io/e2e-test-images/sample-device-plugin:1.3
name: sample-device-plugin
env:
- name: PLUGIN_SOCK_DIR
value: "/var/lib/kubelet/device-plugins"
securityContext:
privileged: true
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
- name: plugins-registry-probe-mode
mountPath: /var/lib/kubelet/plugins_registry
- name: dev
mountPath: /dev
updateStrategy:
type: RollingUpdate

View File

@ -0,0 +1,86 @@
# This DaemonSet was originally referenced from
# https://github.com/GoogleCloudPlatform/container-engine-accelerators/blob/master/daemonset.yaml
# The Dockerfile and other source for this daemonset are in
# https://github.com/GoogleCloudPlatform/cos-gpu-installer
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nvidia-driver-installer
namespace: kube-system
labels:
k8s-app: nvidia-driver-installer
spec:
selector:
matchLabels:
k8s-app: nvidia-driver-installer
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
name: nvidia-driver-installer
k8s-app: nvidia-driver-installer
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cloud.google.com/gke-accelerator
operator: Exists
tolerations:
- operator: "Exists"
hostNetwork: true
hostPID: true
volumes:
- name: dev
hostPath:
path: /dev
- name: vulkan-icd-mount
hostPath:
path: /home/kubernetes/bin/nvidia/vulkan/icd.d
- name: nvidia-install-dir-host
hostPath:
path: /home/kubernetes/bin/nvidia
- name: root-mount
hostPath:
path: /
initContainers:
# The COS GPU installer image version may be dependent on the version of COS being used.
# Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/
# and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS.
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.3 - suitable for COS M85 as per https://cloud.google.com/container-optimized-os/docs/release-notes#cos-85-13310-1209-3
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.5
name: nvidia-driver-installer
resources:
requests:
cpu: 0.15
securityContext:
privileged: true
env:
- name: NVIDIA_INSTALL_DIR_HOST
value: /home/kubernetes/bin/nvidia
- name: NVIDIA_INSTALL_DIR_CONTAINER
value: /usr/local/nvidia
- name: VULKAN_ICD_DIR_HOST
value: /home/kubernetes/bin/nvidia/vulkan/icd.d
- name: VULKAN_ICD_DIR_CONTAINER
value: /etc/vulkan/icd.d
- name: ROOT_MOUNT_DIR
value: /root
volumeMounts:
- name: nvidia-install-dir-host
mountPath: /usr/local/nvidia
- name: vulkan-icd-mount
mountPath: /etc/vulkan/icd.d
- name: dev
mountPath: /dev
- name: root-mount
mountPath: /root
containers:
- image: "k8s.gcr.io/pause:3.7"
name: pause

View File

@ -0,0 +1,58 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: cassandra
# The labels will be applied automatically
# from the labels in the pod template, if not set
# labels:
# app: cassandra
spec:
replicas: 2
# The selector will be applied automatically
# from the labels in the pod template, if not set.
# selector:
# app: cassandra
template:
metadata:
labels:
app: cassandra
spec:
containers:
- command:
- /run.sh
resources:
limits:
cpu: 0.5
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEED_PROVIDER
value: "io.k8s.cassandra.KubernetesSeedProvider"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: gcr.io/google-samples/cassandra:v13
name: cassandra
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
volumeMounts:
- mountPath: /cassandra_data
name: data
volumes:
- name: data
emptyDir: {}

View File

@ -0,0 +1,11 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: cassandra-pdb
labels:
pdb: cassandra
spec:
minAvailable: 2
selector:
matchLabels:
app: cassandra

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: cassandra
name: cassandra
spec:
clusterIP: None
ports:
- port: 9042
selector:
app: cassandra

View File

@ -0,0 +1,90 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cassandra
spec:
serviceName: cassandra
replicas: 3
selector:
matchLabels:
app: cassandra
template:
metadata:
labels:
app: cassandra
spec:
containers:
- name: cassandra
image: gcr.io/google-samples/cassandra:v13
imagePullPolicy: Always
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
resources:
requests:
cpu: "300m"
memory: 1Gi
securityContext:
capabilities:
add:
- IPC_LOCK
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- nodetool drain
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CASSANDRA_SEEDS
value: "cassandra-0.cassandra.$(POD_NAMESPACE).svc.cluster.local"
- name: CASSANDRA_CLUSTER_NAME
value: "K8Demo"
- name: CASSANDRA_DC
value: "DC1-K8Demo"
- name: CASSANDRA_RACK
value: "Rack1-K8Demo"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
readinessProbe:
exec:
command:
- /bin/bash
- -c
- /ready-probe.sh
initialDelaySeconds: 15
timeoutSeconds: 5
# These volume mounts are persistent. They are like inline claims,
# but not exactly because the names need to match exactly one of
# the stateful pod volumes.
volumeMounts:
- name: cassandra-data
mountPath: /cassandra_data
# These are converted to volume claims by the controller
# and mounted at the paths mentioned above.
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
volumeClaimTemplates:
- metadata:
name: cassandra-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: cassandra-test-server
spec:
replicas: 3
selector:
matchLabels:
app: test-server
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: k8s.gcr.io/cassandra-e2e-test:0.1
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: tester-pdb
labels:
pdb: test-server
spec:
minAvailable: 1
selector:
matchLabels:
app: test-server
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-server
name: test-server
spec:
ports:
- port: 8080
selector:
app: test-server
type: LoadBalancer

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
labels:
app: cockroachdb
annotations:
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
clusterIP: None
selector:
app: cockroachdb
# This is needed to make the peer-finder work properly and to help avoid
# edge cases where instance 0 comes up after losing its data and needs to
# decide whether it should create a new cluster or try to join an existing
# one. If it creates a new cluster when it should have joined an existing
# one, we'd end up with two separate clusters listening at the same service
# endpoint, which would be very bad.
publishNotReadyAddresses: true

View File

@ -0,0 +1,103 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 3
selector:
matchLabels:
app: cockroachdb
template:
metadata:
labels:
app: cockroachdb
spec:
# Init containers are run only once in the lifetime of a pod, before
# it's started up for the first time. It has to exit successfully
# before the pod's main containers are allowed to start.
# This particular init container does a DNS lookup for other pods in
# the set to help determine whether or not a cluster already exists.
# If any other pods exist, it creates a file in the cockroach-data
# directory to pass that information along to the primary container that
# has to decide what command-line flags to use when starting CockroachDB.
# This only matters when a pod's persistent volume is empty - if it has
# data from a previous execution, that data will always be used.
initContainers:
- name: bootstrap
image: cockroachdb/cockroach-k8s-init:0.1
imagePullPolicy: IfNotPresent
args:
- "-on-start=/on-start.sh"
- "-service=cockroachdb"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: datadir
mountPath: "/cockroach/cockroach-data"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- cockroachdb
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
image: cockroachdb/cockroach:v1.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
command:
- "/bin/bash"
- "-ecx"
- |
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
# We only want to initialize a new cluster (by omitting the join flag)
# if we're sure that we're the first node (i.e. index 0) and that
# there aren't any other nodes running as part of the cluster that
# this is supposed to be a part of (which indicates that a cluster
# already exists and we should make sure not to create a new one).
# It's fine to run without --join on a restart if there aren't any
# other nodes.
if [ ! "$(hostname)" == "cockroachdb-0" ] || \
[ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
then
# We don't join cockroachdb in order to avoid a node attempting
# to join itself, which currently doesn't work
# (https://github.com/cockroachdb/cockroach/issues/9625).
CRARGS+=("--join" "cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb")
fi
exec /cockroach/cockroach ${CRARGS[*]}
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,11 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: etcd-pdb
labels:
pdb: etcd
spec:
minAvailable: 2
selector:
matchLabels:
app: etcd

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: etcd
labels:
app: etcd
spec:
ports:
- port: 2380
name: etcd-server
- port: 2379
name: etcd-client
clusterIP: None
selector:
app: etcd
publishNotReadyAddresses: true

View File

@ -0,0 +1,178 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: etcd
labels:
app: etcd
spec:
serviceName: etcd
replicas: 3
selector:
matchLabels:
app: etcd
template:
metadata:
name: etcd
labels:
app: etcd
spec:
containers:
- name: etcd
image: k8s.gcr.io/etcd:3.2.24
imagePullPolicy: Always
ports:
- containerPort: 2380
name: peer
- containerPort: 2379
name: client
resources:
requests:
cpu: 100m
memory: 512Mi
env:
- name: INITIAL_CLUSTER_SIZE
value: "3"
- name: SET_NAME
value: etcd
volumeMounts:
- name: datadir
mountPath: /var/run/etcd
lifecycle:
preStop:
exec:
command:
- "/bin/sh"
- "-ec"
- |
EPS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
done
HOSTNAME=$(hostname)
member_hash() {
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
}
echo "Removing ${HOSTNAME} from etcd cluster"
ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
if [ $? -eq 0 ]; then
# Remove everything otherwise the cluster will no longer scale-up
rm -rf /var/run/etcd/*
fi
command:
- "/bin/sh"
- "-ec"
- |
HOSTNAME=$(hostname)
# store member id into PVC for later member replacement
collect_member() {
while ! etcdctl member list &>/dev/null; do sleep 1; done
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
exit 0
}
eps() {
EPS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
done
echo ${EPS}
}
member_hash() {
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
}
# re-joining after failure?
if [ -e /var/run/etcd/default.etcd ]; then
echo "Re-joining etcd member"
member_id=$(cat /var/run/etcd/member_id)
# re-join member
ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SET_NAME}:2380
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--data-dir /var/run/etcd/default.etcd
fi
# etcd-SET_ID
SET_ID=${HOSTNAME:5:${#HOSTNAME}}
# adding a new member to existing cluster (assuming all initial pods are available)
if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
export ETCDCTL_ENDPOINT=$(eps)
# member already added?
MEMBER_HASH=$(member_hash)
if [ -n "${MEMBER_HASH}" ]; then
# the member hash exists but for some reason etcd failed
# as the datadir has not be created, we can remove the member
# and retrieve new hash
etcdctl member remove ${MEMBER_HASH}
fi
echo "Adding new member"
etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SET_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
if [ $? -ne 0 ]; then
echo "Exiting"
rm -f /var/run/etcd/new_member_envs
exit 1
fi
cat /var/run/etcd/new_member_envs
source /var/run/etcd/new_member_envs
collect_member &
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--data-dir /var/run/etcd/default.etcd \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--initial-cluster ${ETCD_INITIAL_CLUSTER} \
--initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
fi
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
while true; do
echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
sleep 1s
done
done
PEERS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
done
collect_member &
# join member
exec etcd --name ${HOSTNAME} \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster ${PEERS} \
--initial-cluster-state new \
--data-dir /var/run/etcd/default.etcd
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
# upstream recommended max is 700M
storage: 1Gi

View File

@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: etcd-test-server
spec:
replicas: 3
selector:
matchLabels:
app: test-server
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: k8s.gcr.io/etcd-statefulset-e2e-test:0.0
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2

View File

@ -0,0 +1,16 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: galera
labels:
app: mysql
spec:
ports:
- port: 3306
name: mysql
# *.galear.default.svc.cluster.local
clusterIP: None
selector:
app: mysql
publishNotReadyAddresses: true

View File

@ -0,0 +1,87 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: "galera"
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: install
image: k8s.gcr.io/galera-install:0.1
imagePullPolicy: Always
args:
- "--work-dir=/work-dir"
volumeMounts:
- name: workdir
mountPath: "/work-dir"
- name: config
mountPath: "/etc/mysql"
- name: bootstrap
image: debian:jessie
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=galera"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: workdir
mountPath: "/work-dir"
- name: config
mountPath: "/etc/mysql"
containers:
- name: mysql
image: k8s.gcr.io/mysql-galera:e2e
ports:
- containerPort: 3306
name: mysql
- containerPort: 4444
name: sst
- containerPort: 4567
name: replication
- containerPort: 4568
name: ist
args:
- --defaults-file=/etc/mysql/my-galera.cnf
- --user=root
readinessProbe:
# TODO: If docker exec is buggy just use k8s.gcr.io/mysql-healthz:1.0
exec:
command:
- sh
- -c
- "mysql -u root -e 'show databases;'"
initialDelaySeconds: 15
timeoutSeconds: 5
successThreshold: 2
volumeMounts:
- name: datadir
mountPath: /var/lib/
- name: config
mountPath: /etc/mysql
volumes:
- name: config
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql
labels:
app: mysql
data:
master.cnf: |
[mysqld]
log-bin
slave.cnf: |
[mysqld]
super-read-only

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
clusterIP: None
selector:
app: mysql
---
apiVersion: v1
kind: Service
metadata:
name: mysql-read
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql
type: LoadBalancer

View File

@ -0,0 +1,162 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: mysql
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: init-mysql
image: mysql:5.7
command:
- bash
- "-c"
- |
set -ex
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/master.cnf /mnt/conf.d/
else
cp /mnt/config-map/slave.cnf /mnt/conf.d/
fi
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
- name: clone-mysql
image: gcr.io/google-samples/xtrabackup:1.0
command:
- bash
- "-c"
- |
set -ex
[[ -d /var/lib/mysql/mysql ]] && exit 0
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: mysql:5.7.15
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 1
memory: 1Gi
livenessProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
exec:
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
timeoutSeconds: 1
- name: xtrabackup
image: gcr.io/google-samples/xtrabackup:1.0
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
if [[ -f xtrabackup_slave_info ]]; then
mv xtrabackup_slave_info change_master_to.sql.in
rm -f xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm xtrabackup_binlog_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
mv change_master_to.sql.in change_master_to.sql.orig
mysql -h 127.0.0.1 <<EOF
$(<change_master_to.sql.orig),
MASTER_HOST='mysql-0.mysql',
MASTER_USER='root',
MASTER_PASSWORD='',
MASTER_CONNECT_RETRY=10;
START SLAVE;
EOF
fi
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 100Mi
volumes:
- name: conf
emptyDir: {}
- name: config-map
configMap:
name: mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: default
resources:
requests:
storage: 10Gi
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: mysql-pdb
labels:
pdb: mysql
spec:
minAvailable: 2
selector:
matchLabels:
app: mysql

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql-test-server
spec:
replicas: 3
selector:
matchLabels:
app: test-server
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: k8s.gcr.io/mysql-e2e-test:0.1
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: tester-pdb
labels:
pdb: test-server
spec:
minAvailable: 1
selector:
matchLabels:
app: test-server
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-server
name: test-server
spec:
ports:
- port: 8080
selector:
app: test-server
type: LoadBalancer

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx

View File

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx"
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: {{.NginxImageNew}}
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
annotations:
volume.beta.kubernetes.io/storage-class: nginx-sc
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,16 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: redis
labels:
app: redis
spec:
ports:
- port: 6379
name: peer
# *.redis.default.svc.cluster.local
clusterIP: None
selector:
app: redis
publishNotReadyAddresses: true

View File

@ -0,0 +1,81 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: rd
spec:
serviceName: "redis"
replicas: 3
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
initContainers:
- name: install
image: k8s.gcr.io/e2e-test-images/pets/redis-installer:1.5
imagePullPolicy: Always
args:
- "--install-into=/opt"
- "--work-dir=/work-dir"
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
- name: bootstrap
image: debian:jessie
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=redis"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
containers:
- name: redis
image: debian:jessie
ports:
- containerPort: 6379
name: peer
command:
- /opt/redis/redis-server
args:
- /opt/redis/redis.conf
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/redis/redis-cli -h $(hostname) ping"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /data
- name: opt
mountPath: /opt
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,18 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: zk
labels:
app: zk
spec:
ports:
- port: 2888
name: peer
- port: 3888
name: leader-election
# *.zk.default.svc.cluster.local
clusterIP: None
selector:
app: zk
publishNotReadyAddresses: true

View File

@ -0,0 +1,88 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zoo
spec:
serviceName: "zk"
replicas: 3
selector:
matchLabels:
app: zk
template:
metadata:
labels:
app: zk
spec:
initContainers:
- name: install
image: k8s.gcr.io/e2e-test-images/pets/zookeeper-installer:1.5
imagePullPolicy: Always
args:
- "--install-into=/opt"
- "--work-dir=/work-dir"
volumeMounts:
- name: opt
mountPath: "/opt/"
- name: workdir
mountPath: "/work-dir"
- name: bootstrap
image: java:openjdk-8-jre
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=zk"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
- name: datadir
mountPath: "/tmp/zookeeper"
containers:
- name: zk
image: openjdk:8-jre
ports:
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
command:
- /opt/zookeeper/bin/zkServer.sh
args:
- start-foreground
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/zookeeper/bin/zkCli.sh ls /"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /tmp/zookeeper
- name: opt
mountPath: /opt
# Mount the work-dir just for debugging
- name: workdir
mountPath: /work-dir
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,19 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- saad-ali
- gnufied
- jingxu97
- jsafrane
- msau42
- xing-yang
reviewers:
- saad-ali
- gnufied
- jingxu97
- jsafrane
- msau42
- xing-yang
emeritus_approvers:
- davidz627
- rootfs

View File

@ -0,0 +1,50 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: hellos.hello.example.com
spec:
group: hello.example.com
names:
kind: Hello
listKind: HelloList
plural: hellos
singular: hello
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Hello is a specification for a Hello resource
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: HelloSpec is the spec for a Hello resource
properties:
fileContents:
type: string
fileName:
type: string
required:
- fileContents
- fileName
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,57 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
api-approved.kubernetes.io: https://github.com/kubernetes/enhancements/pull/2934
creationTimestamp: null
name: volumepopulators.populator.storage.k8s.io
spec:
group: populator.storage.k8s.io
names:
kind: VolumePopulator
listKind: VolumePopulatorList
plural: volumepopulators
singular: volumepopulator
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .sourceKind
name: SourceKind
type: string
name: v1beta1
schema:
openAPIV3Schema:
description: VolumePopulator represents the registration for a volume populator. VolumePopulators are cluster scoped.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
sourceKind:
description: Kind of the data source this populator supports
properties:
group:
type: string
kind:
type: string
required:
- group
- kind
type: object
required:
- sourceKind
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,68 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: hello-account
namespace: hello
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hello-role
rules:
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, patch]
- apiGroups: [""]
resources: [persistentvolumeclaims]
verbs: [get, list, watch, patch, create, delete]
- apiGroups: [""]
resources: [pods]
verbs: [get, list, watch, create, delete]
- apiGroups: [storage.k8s.io]
resources: [storageclasses]
verbs: [get, list, watch]
- apiGroups: [hello.example.com]
resources: [hellos]
verbs: [get, list, watch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hello-binding
subjects:
- kind: ServiceAccount
name: hello-account
namespace: hello
roleRef:
kind: ClusterRole
name: hello-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-populator
namespace: hello
spec:
selector:
matchLabels:
app: hello
template:
metadata:
labels:
app: hello
spec:
serviceAccount: hello-account
containers:
- name: hello
image: k8s.gcr.io/sig-storage/hello-populator:v1.0.1
imagePullPolicy: IfNotPresent
args:
- --mode=controller
- --image-name=k8s.gcr.io/sig-storage/hello-populator:v1.0.1
- --http-endpoint=:8080
ports:
- containerPort: 8080
name: http-endpoint
protocol: TCP

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: volume-data-source-validator
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: volume-data-source-validator
rules:
- apiGroups: [populator.storage.k8s.io]
resources: [volumepopulators]
verbs: [get, list, watch]
- apiGroups: [""]
resources: [persistentvolumeclaims]
verbs: [get, list, watch]
- apiGroups: [""]
resources: [events]
verbs: [list, watch, create, update, patch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: volume-data-source-validator
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: volume-data-source-validator
namespace: kube-system
roleRef:
kind: ClusterRole
name: volume-data-source-validator
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: volume-data-source-validator
namespace: kube-system
spec:
serviceName: volume-data-source-validator
replicas: 1
selector:
matchLabels:
app: volume-data-source-validator
template:
metadata:
labels:
app: volume-data-source-validator
spec:
serviceAccount: volume-data-source-validator
containers:
- name: volume-data-source-validator
image: k8s.gcr.io/sig-storage/volume-data-source-validator:v1.0.0
args:
- "--v=5"
- "--leader-election=false"
imagePullPolicy: Always

View File

@ -0,0 +1,4 @@
# Replaced by individual roles for external-attacher, external-provisioner and external-snapshotter:
# - https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
# - https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
# - https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/rbac.yaml

View File

@ -0,0 +1,93 @@
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.3.0/deploy/kubernetes//rbac.yaml
# for csi-driver-host-path v1.7.3
# by ./update-hostpath.sh
#
# This YAML file contains all RBAC objects that are necessary to run external
# CSI attacher.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# attacher, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
# replace with non-default namespace name
namespace: default
---
# Attacher must be able to work with PVs, CSINodes and VolumeAttachments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
#Secret permission is optional.
#Enable it if you need value from secret.
#For example, you have key `csi.storage.k8s.io/controller-publish-secret-name` in StorageClass.parameters
#see https://kubernetes-csi.github.io/docs/secrets-and-credentials.html
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# Attacher must be able to work with configmaps or leases in the current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: external-attacher-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: csi-attacher
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: external-attacher-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,89 @@
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.4.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
# for csi-driver-host-path v1.7.3
# by ./update-hostpath.sh
#
# This YAML file contains all RBAC objects that are necessary to run external
# CSI health monitor controller.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# health monitor controller, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-external-health-monitor-controller
# replace with non-default namespace name
namespace: default
---
# Health monitor controller must be able to work with PVs, PVCs, Nodes and Pods
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-health-monitor-controller-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-external-health-monitor-controller-role
subjects:
- kind: ServiceAccount
name: csi-external-health-monitor-controller
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: external-health-monitor-controller-runner
apiGroup: rbac.authorization.k8s.io
---
# Health monitor controller must be able to work with configmaps or leases in the current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: external-health-monitor-controller-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-external-health-monitor-controller-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: csi-external-health-monitor-controller
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: external-health-monitor-controller-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,129 @@
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.0.0/deploy/kubernetes//rbac.yaml
# for csi-driver-host-path v1.7.3
# by ./update-hostpath.sh
#
# This YAML file contains all RBAC objects that are necessary to run external
# CSI provisioner.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# provisioner, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
# replace with non-default namespace name
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
# Access to volumeattachments is only needed when the CSI driver
# has the PUBLISH_UNPUBLISH_VOLUME controller capability.
# In that case, external-provisioner will watch volumeattachments
# to determine when it is safe to delete a volume.
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# Provisioner must be able to work with endpoints in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: external-provisioner-cfg
rules:
# Only one of the following rules for endpoints or leases is required based on
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
# Permissions for CSIStorageCapacity are only needed enabling the publishing
# of storage capacity information.
- apiGroups: ["storage.k8s.io"]
resources: ["csistoragecapacities"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
# The GET permissions below are needed for walking up the ownership chain
# for CSIStorageCapacity. They are sufficient for deployment via
# StatefulSet (only needs to get Pod) and Deployment (needs to get
# Pod and then ReplicaSet to find the Deployment).
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: csi-provisioner
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,94 @@
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.3.0/deploy/kubernetes//rbac.yaml
# for csi-driver-host-path v1.7.3
# by ./update-hostpath.sh
#
# This YAML file contains all RBAC objects that are necessary to run external
# CSI resizer.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# resizer, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-resizer
# replace with non-default namespace name
namespace: default
---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-resizer-runner
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role
subjects:
- kind: ServiceAccount
name: csi-resizer
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: external-resizer-runner
apiGroup: rbac.authorization.k8s.io
---
# Resizer must be able to work with end point in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: external-resizer-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: csi-resizer
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: external-resizer-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,88 @@
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v5.0.0-rc1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
# for csi-driver-host-path master
# by ./update-hostpath.sh
#
# Together with the RBAC file for external-provisioner, this YAML file
# contains all RBAC objects that are necessary to run external CSI
# snapshotter.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - optionally rename the non-namespaced ClusterRole if there
# are conflicts with other deployments
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-snapshotter
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# rename if there are conflicts
name: external-snapshotter-runner
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# Secret permission is optional.
# Enable it if your driver needs secret.
# For example, `csi.storage.k8s.io/snapshotter-secret-name` is set in VolumeSnapshotClass.
# See https://kubernetes-csi.github.io/docs/secrets-and-credentials.html for more details.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
subjects:
- kind: ServiceAccount
name: csi-snapshotter
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
# change the name also here if the ClusterRole gets renamed
name: external-snapshotter-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: default # TODO: replace with the namespace you want for your sidecar
name: external-snapshotter-leaderelection
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-snapshotter-leaderelection
namespace: default # TODO: replace with the namespace you want for your sidecar
subjects:
- kind: ServiceAccount
name: csi-snapshotter
namespace: default # TODO: replace with the namespace you want for your sidecar
roleRef:
kind: Role
name: external-snapshotter-leaderelection
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,152 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-gce-pd-controller
spec:
serviceName: "csi-gce-pd"
replicas: 1
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
# Host network must be used for interaction with Workload Identity in GKE
# since it replaces GCE Metadata Server with GKE Metadata Server. Remove
# this requirement when issue is resolved and before any exposure of
# metrics ports
hostNetwork: true
serviceAccountName: csi-gce-pd-controller-sa
containers:
- name: csi-snapshotter
image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--metrics-address=:22014"
- "--leader-election"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=300s"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--feature-gates=Topology=true"
- "--http-endpoint=:22011"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=250s"
- "--extra-create-metadata"
# - "--run-controller-service=false" # disable the controller service of the CSI driver
# - "--run-node-service=false" # disable the node service of the CSI driver
- "--leader-election"
- "--default-fstype=ext4"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 22011
name: http-endpoint
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--http-endpoint=:22012"
- "--leader-election"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--timeout=250s"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 22012
name: http-endpoint
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.1.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--http-endpoint=:22013"
- "--leader-election"
- "--leader-election-namespace=$(PDCSI_NAMESPACE)"
- "--handle-volume-inuse-error=false"
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 22013
name: http-endpoint
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: gce-pd-driver
image: k8s.gcr.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"
env:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/cloud-sa/cloud-sa.json"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cloud-sa-volume
readOnly: true
mountPath: "/etc/cloud-sa"
volumes:
- name: socket-dir
emptyDir: {}
- name: cloud-sa-volume
secret:
secretName: cloud-sa

View File

@ -0,0 +1,198 @@
##### Controller Service Account, Roles, Rolebindings
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-gce-pd-controller-sa
---
# xref: https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
roleRef:
kind: ClusterRole
name: csi-gce-pd-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# xref: https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
roleRef:
kind: ClusterRole
name: csi-gce-pd-attacher-role
apiGroup: rbac.authorization.k8s.io
---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-resizer-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-resizer-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
roleRef:
kind: ClusterRole
name: csi-gce-pd-resizer-role
apiGroup: rbac.authorization.k8s.io
---
# xref: https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-snapshotter-binding
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
roleRef:
kind: ClusterRole
name: csi-gce-pd-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-leaderelection-role
namespace: gce-pd-csi-driver
labels:
k8s-app: gcp-compute-persistent-disk-csi-driver
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-leaderelection-binding
namespace: gce-pd-csi-driver
labels:
k8s-app: gcp-compute-persistent-disk-csi-driver
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
roleRef:
kind: Role
name: csi-gce-pd-leaderelection-role
apiGroup: rbac.authorization.k8s.io
---
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp-csi-controller-driver-registrar-role
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
roleRef:
kind: ClusterRole
name: e2e-test-privileged-psp
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,115 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-gce-pd-node
spec:
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
containers:
- name: csi-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock"
- "--http-endpoint=:22013"
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/pd.csi.storage.gke.io /registration/pd.csi.storage.gke.io-reg.sock"]
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 22013
name: http-endpoint
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz
port: http-endpoint
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: gce-pd-driver
securityContext:
privileged: true
image: k8s.gcr.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"
- "--run-controller-service=false"
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /dev
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
mountPath: /etc/udev
- name: udev-rules-lib
mountPath: /lib/udev
- name: udev-socket
mountPath: /run/udev
- name: sys
mountPath: /sys
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/
type: DirectoryOrCreate
- name: device-dir
hostPath:
path: /dev
type: Directory
# The following mounts are required to trigger host udevadm from
# container
- name: udev-rules-etc
hostPath:
path: /etc/udev
type: Directory
- name: udev-rules-lib
hostPath:
path: /lib/udev
type: Directory
- name: udev-socket
hostPath:
path: /run/udev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
# See "special case". This will tolerate everything. Node component should
# be scheduled on all nodes.
tolerations:
- operator: Exists
nodeSelector:
kubernetes.io/os: linux

View File

@ -0,0 +1,4 @@
The files in this directory are exact copys of "kubernetes-latest" in
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.7.3/deploy/
Do not edit manually. Run ./update-hostpath.sh to refresh the content.

View File

@ -0,0 +1,17 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: hostpath.csi.k8s.io
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: hostpath.csi.k8s.io
app.kubernetes.io/component: csi-driver
spec:
# Supports persistent and ephemeral inline volumes.
volumeLifecycleModes:
- Persistent
- Ephemeral
# To determine at runtime which mode a volume uses, pod info and its
# "csi.storage.k8s.io/ephemeral" entry are needed.
podInfoOnMount: true

View File

@ -0,0 +1,394 @@
# All of the individual sidecar RBAC roles get bound
# to this account.
kind: ServiceAccount
apiVersion: v1
metadata:
name: csi-hostpathplugin-sa
namespace: default
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: serviceaccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: attacher-cluster-role
name: csi-hostpathplugin-attacher-cluster-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-attacher-runner
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: health-monitor-controller-cluster-role
name: csi-hostpathplugin-health-monitor-controller-cluster-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-health-monitor-controller-runner
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: provisioner-cluster-role
name: csi-hostpathplugin-provisioner-cluster-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-provisioner-runner
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: resizer-cluster-role
name: csi-hostpathplugin-resizer-cluster-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-resizer-runner
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: snapshotter-cluster-role
name: csi-hostpathplugin-snapshotter-cluster-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-snapshotter-runner
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: attacher-role
name: csi-hostpathplugin-attacher-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: external-attacher-cfg
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: health-monitor-controller-role
name: csi-hostpathplugin-health-monitor-controller-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: external-health-monitor-controller-cfg
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: provisioner-role
name: csi-hostpathplugin-provisioner-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: external-provisioner-cfg
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: resizer-role
name: csi-hostpathplugin-resizer-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: external-resizer-cfg
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: snapshotter-role
name: csi-hostpathplugin-snapshotter-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: external-snapshotter-leaderelection
subjects:
- kind: ServiceAccount
name: csi-hostpathplugin-sa
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpathplugin
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: plugin
spec:
serviceName: "csi-hostpathplugin"
# One replica only:
# Host path driver only works when everything runs
# on a single node.
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: plugin
template:
metadata:
labels:
app.kubernetes.io/instance: hostpath.csi.k8s.io
app.kubernetes.io/part-of: csi-driver-host-path
app.kubernetes.io/name: csi-hostpathplugin
app.kubernetes.io/component: plugin
spec:
serviceAccountName: csi-hostpathplugin-sa
containers:
- name: hostpath
image: k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
securityContext:
privileged: true
ports:
- containerPort: 9898
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet/pods
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /var/lib/kubelet/plugins
mountPropagation: Bidirectional
name: plugins-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- mountPath: /dev
name: dev-dir
- name: csi-external-health-monitor-controller
image: k8s.gcr.io/sig-storage/csi-external-health-monitor-controller:v0.4.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
args:
- --v=5
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: k8s.gcr.io/sig-storage/livenessprobe:v2.4.0
args:
- --csi-address=/csi/csi.sock
- --health-port=9898
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
args:
- --v=5
- --csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
- name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
args:
- -v=5
- --csi-address=/csi/csi.sock
- --feature-gates=Topology=true
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
- name: csi-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
args:
- -v=5
- -csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
- name: csi-snapshotter
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
args:
- -v=5
- --csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins
type: Directory
name: plugins-dir
- hostPath:
# 'path' is where PV data is persisted on host.
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
path: /var/lib/csi-hostpath-data/
type: DirectoryOrCreate
name: csi-data-dir
- hostPath:
path: /dev
type: Directory
name: dev-dir

Some files were not shown because too many files have changed in this diff Show More