vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

54
vendor/k8s.io/kubernetes/test/e2e/node/BUILD generated vendored Normal file
View File

@ -0,0 +1,54 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"apparmor.go",
"events.go",
"framework.go",
"kubelet.go",
"kubelet_perf.go",
"mount_propagation.go",
"pod_gc.go",
"pods.go",
"pre_stop.go",
"security_context.go",
"ssh.go",
],
importpath = "k8s.io/kubernetes/test/e2e/node",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

9
vendor/k8s.io/kubernetes/test/e2e/node/OWNERS generated vendored Normal file
View File

@ -0,0 +1,9 @@
approvers:
- Random-Liu
- dchen1107
- derekwaynecarr
- tallclair
- vishh
- yujuhong
reviewers:
- sig-node-reviewers

49
vendor/k8s.io/kubernetes/test/e2e/node/apparmor.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = SIGDescribe("AppArmor", func() {
f := framework.NewDefaultFramework("apparmor")
Context("load AppArmor profiles", func() {
BeforeEach(func() {
common.SkipIfAppArmorNotSupported()
common.LoadAppArmorProfiles(f)
})
AfterEach(func() {
if !CurrentGinkgoTestDescription().Failed {
return
}
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
})
It("should enforce an AppArmor profile", func() {
common.CreateAppArmorTestPod(f, false, true)
})
It("can disable an AppArmor profile, using unconfined", func() {
common.CreateAppArmorTestPod(f, true, true)
})
})
})

130
vendor/k8s.io/kubernetes/test/e2e/node/events.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"strconv"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("Events", func() {
f := framework.NewDefaultFramework("events")
framework.ConformanceIt("should be sent by kubelets and the scheduler about pods scheduling and running ", func() {
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
By("creating the pod")
name := "send-events-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "p",
Image: framework.ServeHostnameImage,
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
},
}
By("submitting the pod to kubernetes")
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, nil)
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create pod: %v", err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(len(pods.Items)).To(Equal(1))
By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod: %v", err)
}
fmt.Printf("%+v\n", podWithUid)
var events *v1.EventList
// Check for scheduler event about the pod.
By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID),
"involvedObject.namespace": f.Namespace.Name,
"source": v1.DefaultSchedulerName,
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
if err != nil {
return false, err
}
if len(events.Items) > 0 {
fmt.Println("Saw scheduler event for our pod.")
return true, nil
}
return false, nil
}))
// Check for kubelet event about the pod.
By("checking for kubelet event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.uid": string(podWithUid.UID),
"involvedObject.kind": "Pod",
"involvedObject.namespace": f.Namespace.Name,
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
if err != nil {
return false, err
}
if len(events.Items) > 0 {
fmt.Println("Saw kubelet event for our pod.")
return true, nil
}
return false, nil
}))
})
})

23
vendor/k8s.io/kubernetes/test/e2e/node/framework.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import "k8s.io/kubernetes/test/e2e/framework"
func SIGDescribe(text string, body func()) bool {
return framework.KubeDescribe("[sig-node] "+text, body)
}

456
vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go generated vendored Normal file
View File

@ -0,0 +1,456 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"path/filepath"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// Interval to framework.Poll /runningpods on a node
pollInterval = 1 * time.Second
// Interval to framework.Poll /stats/container on a node
containerStatsPollingInterval = 5 * time.Second
// Maximum number of nodes that we constraint to
maxNodesToCheck = 10
)
// getPodMatches returns a set of pod names on the given node that matches the
// podNamePrefix and namespace.
func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
matches := sets.NewString()
framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
runningPods, err := framework.GetKubeletPods(c, nodeName)
if err != nil {
framework.Logf("Error checking running pods on %v: %v", nodeName, err)
return matches
}
for _, pod := range runningPods.Items {
if pod.Namespace == namespace && strings.HasPrefix(pod.Name, podNamePrefix) {
matches.Insert(pod.Name)
}
}
return matches
}
// waitTillNPodsRunningOnNodes polls the /runningpods endpoint on kubelet until
// it finds targetNumPods pods that match the given criteria (namespace and
// podNamePrefix). Note that we usually use label selector to filter pods that
// belong to the same RC. However, we use podNamePrefix with namespace here
// because pods returned from /runningpods do not contain the original label
// information; they are reconstructed by examining the container runtime. In
// the scope of this test, we do not expect pod naming conflicts so
// podNamePrefix should be sufficient to identify the pods.
func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
return wait.Poll(pollInterval, timeout, func() (bool, error) {
matchCh := make(chan sets.String, len(nodeNames))
for _, item := range nodeNames.List() {
// Launch a goroutine per node to check the pods running on the nodes.
nodeName := item
go func() {
matchCh <- getPodMatches(c, nodeName, podNamePrefix, namespace)
}()
}
seen := sets.NewString()
for i := 0; i < len(nodeNames.List()); i++ {
seen = seen.Union(<-matchCh)
}
if seen.Len() == targetNumPods {
return true, nil
}
framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
return false, nil
})
}
// updates labels of nodes given by nodeNames.
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
const maxRetries = 5
for nodeName := range nodeNames {
var node *v1.Node
var err error
for i := 0; i < maxRetries; i++ {
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting node %s: %v", nodeName, err)
continue
}
if toAdd != nil {
for k, v := range toAdd {
node.ObjectMeta.Labels[k] = v
}
}
if toRemove != nil {
for k := range toRemove {
delete(node.ObjectMeta.Labels, k)
}
}
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
framework.Logf("Error updating node %s: %v", nodeName, err)
} else {
break
}
}
Expect(err).NotTo(HaveOccurred())
}
}
// Restart the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 1` command in the
// pod's (only) container. This command changes the number of nfs server threads from
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
func restartNfsServer(serverPod *v1.Pod) {
const startcmd = "/usr/sbin/rpc.nfsd 1"
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
framework.RunKubectlOrDie("exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd)
}
// Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the
// pod's (only) container. This command changes the number of nfs server threads to 0,
// thus closing all open nfs connections.
func stopNfsServer(serverPod *v1.Pod) {
const stopcmd = "/usr/sbin/rpc.nfsd 0"
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
framework.RunKubectlOrDie("exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd)
}
// Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container
// will execute the passed in shell cmd. Waits for the pod to start.
// Note: the nfs plugin is defined inline, no PV or PVC.
func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod {
By("create pod using nfs volume")
isPrivileged := true
cmdLine := []string{"-c", cmd}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pod-nfs-vol-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pod-nfs-vol",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: cmdLine,
VolumeMounts: []v1.VolumeMount{
{
Name: "nfs-vol",
MountPath: "/mnt",
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyNever, //don't restart pod
Volumes: []v1.Volume{
{
Name: "nfs-vol",
VolumeSource: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nfsIP,
Path: "/exports",
ReadOnly: false,
},
},
},
},
},
}
rtnPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = f.WaitForPodReady(rtnPod.Name) // running & ready
Expect(err).NotTo(HaveOccurred())
rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod
Expect(err).NotTo(HaveOccurred())
return rtnPod
}
// Checks for a lingering nfs mount and/or uid directory on the pod's host. The host IP is used
// so that this test runs in GCE, where it appears that SSH cannot resolve the hostname.
// If expectClean is true then we expect the node to be cleaned up and thus commands like
// `ls <uid-dir>` should fail (since that dir was removed). If expectClean is false then we expect
// the node is not cleaned up, and thus cmds like `ls <uid-dir>` should succeed. We wait for the
// kubelet to be cleaned up, afterwhich an error is reported.
func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
timeout := 5 * time.Minute
poll := 20 * time.Second
podDir := filepath.Join("/var/lib/kubelet/pods", string(pod.UID))
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
// use ip rather than hostname in GCE
nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred())
condMsg := "deleted"
if !expectClean {
condMsg = "present"
}
// table of host tests to perform (order may matter so not using a map)
type testT struct {
feature string // feature to test
cmd string // remote command to execute on node
}
tests := []testT{
{
feature: "pod UID directory",
cmd: fmt.Sprintf("sudo ls %v", podDir),
},
{
feature: "pod nfs mount",
cmd: fmt.Sprintf("sudo mount | grep %v", mountDir),
},
}
for _, test := range tests {
framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
err = wait.Poll(poll, timeout, func() (bool, error) {
result, err := framework.NodeExec(nodeIP, test.cmd)
Expect(err).NotTo(HaveOccurred())
framework.LogSSHResult(result)
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
if expectClean && ok { // keep trying
return false, nil
}
if !expectClean && !ok { // stop wait loop
return true, fmt.Errorf("%v is gone but expected to exist", test.feature)
}
return true, nil // done, host is as expected
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
}
if expectClean {
framework.Logf("Pod's host has been cleaned up")
} else {
framework.Logf("Pod's host has not been cleaned up (per expectation)")
}
}
var _ = SIGDescribe("kubelet", func() {
var (
c clientset.Interface
ns string
)
f := framework.NewDefaultFramework("kubelet")
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
SIGDescribe("Clean up pods on node", func() {
var (
numNodes int
nodeNames sets.String
nodeLabels map[string]string
resourceMonitor *framework.ResourceMonitor
)
type DeleteTest struct {
podsPerNode int
timeout time.Duration
}
deleteTests := []DeleteTest{
{podsPerNode: 10, timeout: 1 * time.Minute},
}
BeforeEach(func() {
// Use node labels to restrict the pods to be assigned only to the
// nodes we observe initially.
nodeLabels = make(map[string]string)
nodeLabels["kubelet_cleanup"] = "true"
nodes := framework.GetReadySchedulableNodesOrDie(c)
numNodes = len(nodes.Items)
Expect(numNodes).NotTo(BeZero())
nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them
// (if there are 1000 nodes in the cluster, starting 10 pods/node
// will take ~10 minutes today). And there is also deletion phase.
// Instead, we choose at most 10 nodes.
if numNodes > maxNodesToCheck {
numNodes = maxNodesToCheck
}
for i := 0; i < numNodes; i++ {
nodeNames.Insert(nodes.Items[i].Name)
}
updateNodeLabels(c, nodeNames, nodeLabels, nil)
// Start resourceMonitor only in small clusters.
if len(nodes.Items) <= maxNodesToCheck {
resourceMonitor = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingInterval)
resourceMonitor.Start()
}
})
AfterEach(func() {
if resourceMonitor != nil {
resourceMonitor.Stop()
}
// If we added labels to nodes in this test, remove them now.
updateNodeLabels(c, nodeNames, nil, nodeLabels)
})
for _, itArg := range deleteTests {
name := fmt.Sprintf(
"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
It(name, func() {
totalPods := itArg.podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName,
Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: totalPods,
NodeSelector: nodeLabels,
})).NotTo(HaveOccurred())
// Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status.
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods,
time.Second*30)).NotTo(HaveOccurred())
if resourceMonitor != nil {
resourceMonitor.LogLatest()
}
By("Deleting the RC")
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of
// failures are:
// - kubelet deadlock
// - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness)
start := time.Now()
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
itArg.timeout)).NotTo(HaveOccurred())
framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start))
if resourceMonitor != nil {
resourceMonitor.LogCPUSummary()
}
})
}
})
// Test host cleanup when disrupting the volume environment.
SIGDescribe("host cleanup with volume mounts [sig-storage][HostCleanup][Flaky]", func() {
type hostCleanupTest struct {
itDescr string
podCmd string
}
// Disrupt the nfs-server pod after a client pod accesses the nfs volume.
// Note: the nfs-server is stopped NOT deleted. This is done to preserve its ip addr.
// If the nfs-server pod is deleted the client pod's mount can not be unmounted.
// If the nfs-server pod is deleted and re-created, due to having a different ip
// addr, the client pod's mount still cannot be unmounted.
Context("Host cleanup after disrupting NFS volume [NFS]", func() {
// issue #31272
var (
nfsServerPod *v1.Pod
nfsIP string
NFSconfig framework.VolumeTestConfig
pod *v1.Pod // client pod
)
// fill in test slice for this context
testTbl := []hostCleanupTest{
{
itDescr: "after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed.",
podCmd: "sleep 6000", // keep pod running
},
{
itDescr: "after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed.",
podCmd: "while true; do echo FeFieFoFum >>/mnt/SUCCESS; sleep 1; cat /mnt/SUCCESS; done",
},
}
BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
NFSconfig, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
})
AfterEach(func() {
err := framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name)
err = framework.DeletePodWithWait(f, c, nfsServerPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
})
// execute It blocks from above table of tests
for _, t := range testTbl {
It(t.itDescr, func() {
pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd)
By("Stop the NFS server")
stopNfsServer(nfsServerPod)
By("Delete the pod mounted to the NFS volume -- expect failure")
err := framework.DeletePodWithWait(f, c, pod)
Expect(err).To(HaveOccurred())
// pod object is now stale, but is intentionally not nil
By("Check if pod's host has been cleaned up -- expect not")
checkPodCleanup(c, pod, false)
By("Restart the nfs server")
restartNfsServer(nfsServerPod)
By("Verify that the deleted client pod is now cleaned up")
checkPodCleanup(c, pod, true)
})
}
})
})
})

285
vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// Interval to poll /stats/container on a node
containerStatsPollingPeriod = 10 * time.Second
// The monitoring time for one test.
monitoringTime = 20 * time.Minute
// The periodic reporting period.
reportingPeriod = 5 * time.Minute
// Timeout for waiting for the image prepulling to complete.
imagePrePullingLongTimeout = time.Minute * 8
)
type resourceTest struct {
podsPerNode int
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
}
func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
for _, n := range nodeNames {
podList, err := framework.GetKubeletRunningPods(c, n)
if err != nil {
framework.Logf("Unable to retrieve kubelet pods for node %v", n)
continue
}
framework.Logf("%d pods are running on node %v", len(podList.Items), n)
}
}
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
// TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName,
Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: totalPods,
})).NotTo(HaveOccurred())
// Log once and flush the stats.
rm.LogLatest()
rm.Reset()
By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met.
// Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine
// for the current test duration, but we should reclaim the
// entries if we plan to monitor longer (e.g., 8 hours).
deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now())
framework.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
time.Sleep(reportingPeriod)
}
logPodsOnNodes(f.ClientSet, nodeNames.List())
}
By("Reporting overall resource usage")
logPodsOnNodes(f.ClientSet, nodeNames.List())
usageSummary, err := rm.GetLatest()
Expect(err).NotTo(HaveOccurred())
// TODO(random-liu): Remove the original log when we migrate to new perfdash
framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary()
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
// Log perf result
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC")
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
}
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
if expected == nil {
return
}
var errList []string
for nodeName, nodeSummary := range actual {
var nodeErrs []string
for cName, expectedResult := range expected {
container, ok := nodeSummary[cName]
if !ok {
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing", cName))
continue
}
expectedValue := expectedResult.MemoryRSSInBytes
actualValue := container.MemoryRSSInBytes
if expectedValue != 0 && actualValue > expectedValue {
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: expected RSS memory (MB) < %d; got %d",
cName, expectedValue, actualValue))
}
}
if len(nodeErrs) > 0 {
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
if err != nil {
framework.Logf("Unable to get heap stats from %q", nodeName)
} else {
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
}
}
}
if len(errList) > 0 {
framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
}
}
func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
if expected == nil {
return
}
var errList []string
for nodeName, perNodeSummary := range actual {
var nodeErrs []string
for cName, expectedResult := range expected {
perContainerSummary, ok := perNodeSummary[cName]
if !ok {
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing", cName))
continue
}
for p, expectedValue := range expectedResult {
actualValue, ok := perContainerSummary[p]
if !ok {
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing percentile %v", cName, p))
continue
}
if actualValue > expectedValue {
nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: expected %.0fth%% usage < %.3f; got %.3f",
cName, p*100, expectedValue, actualValue))
}
}
}
if len(nodeErrs) > 0 {
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
}
}
if len(errList) > 0 {
framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
}
}
// Slow by design (1 hour)
var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
var nodeNames sets.String
f := framework.NewDefaultFramework("kubelet-perf")
var om *framework.RuntimeOperationMonitor
var rm *framework.ResourceMonitor
BeforeEach(func() {
// Wait until image prepull pod has completed so that they wouldn't
// affect the runtime cpu usage. Fail the test if prepulling cannot
// finish in time.
if err := framework.WaitForPodsSuccess(f.ClientSet, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
}
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
nodeNames.Insert(node.Name)
}
om = framework.NewRuntimeOperationMonitor(f.ClientSet)
rm = framework.NewResourceMonitor(f.ClientSet, framework.TargetContainers(), containerStatsPollingPeriod)
rm.Start()
})
AfterEach(func() {
rm.Stop()
result := om.GetLatestRuntimeOperationErrorRate()
framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
})
SIGDescribe("regular resource usage tracking", func() {
// We assume that the scheduler will make reasonable scheduling choices
// and assign ~N pods on the node.
// Although we want to track N pods per node, there are N + add-on pods
// in the cluster. The cluster add-on pods can be distributed unevenly
// among the nodes because they are created during the cluster
// initialization. This *noise* is obvious when N is small. We
// deliberately set higher resource usage limits to account for the
// noise.
//
// We set all resource limits generously because this test is mainly
// used to catch resource leaks in the soak cluster. For tracking
// kubelet/runtime resource usage, please see the node e2e benchmark
// dashboard. http://node-perf-dash.k8s.io/
//
// TODO(#36621): Deprecate this test once we have a node e2e soak
// cluster.
rTests := []resourceTest{
{
podsPerNode: 0,
cpuLimits: framework.ContainersCPUSummary{
stats.SystemContainerKubelet: {0.50: 0.10, 0.95: 0.20},
stats.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.20},
},
memLimits: framework.ResourceUsagePerContainer{
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
// The detail can be found at https://github.com/kubernetes/kubernetes/issues/28384#issuecomment-244158892
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 125 * 1024 * 1024},
},
},
{
cpuLimits: framework.ContainersCPUSummary{
stats.SystemContainerKubelet: {0.50: 0.35, 0.95: 0.50},
stats.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.50},
},
podsPerNode: 100,
memLimits: framework.ResourceUsagePerContainer{
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
},
},
}
for _, testArg := range rTests {
itArg := testArg
podsPerNode := itArg.podsPerNode
name := fmt.Sprintf(
"resource tracking for %d pods per node", podsPerNode)
It(name, func() {
runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits)
})
}
})
SIGDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() {
density := []int{100}
for i := range density {
podsPerNode := density[i]
name := fmt.Sprintf(
"resource tracking for %d pods per node", podsPerNode)
It(name, func() {
runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil)
})
}
})
})

View File

@ -0,0 +1,179 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func preparePod(name string, node *v1.Node, propagation v1.MountPropagationMode, hostDir string) *v1.Pod {
const containerName = "cntr"
bTrue := true
var oneSecond int64 = 1
// The pod prepares /mnt/test/<podname> and sleeps.
cmd := fmt.Sprintf("mkdir /mnt/test/%[1]s; sleep 3600", name)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
NodeName: node.Name,
Containers: []v1.Container{
{
Name: containerName,
Image: "busybox",
Command: []string{"sh", "-c", cmd},
VolumeMounts: []v1.VolumeMount{
{
Name: "host",
MountPath: "/mnt/test",
MountPropagation: &propagation,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &bTrue,
},
},
},
Volumes: []v1.Volume{
{
Name: "host",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: hostDir,
},
},
},
},
// speed up termination of the pod
TerminationGracePeriodSeconds: &oneSecond,
},
}
return pod
}
var _ = SIGDescribe("Mount propagation [Feature:MountPropagation]", func() {
f := framework.NewDefaultFramework("mount-propagation")
It("should propagate mounts to the host", func() {
// This test runs two pods: master and slave with respective mount
// propagation on common /var/lib/kubelet/XXXX directory. Both mount a
// tmpfs to a subdirectory there. We check that these mounts are
// propagated to the right places.
// Pick a node where all pods will run.
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling")
node := &nodes.Items[0]
// Fail the test if the namespace is not set. We expect that the
// namespace is unique and we might delete user data if it's not.
if len(f.Namespace.Name) == 0 {
Expect(f.Namespace.Name).ToNot(Equal(""))
return
}
// hostDir is the directory that's shared via HostPath among all pods.
// Make sure it's random enough so we don't clash with another test
// running in parallel.
hostDir := "/var/lib/kubelet/" + f.Namespace.Name
defer func() {
cleanCmd := fmt.Sprintf("sudo rm -rf %q", hostDir)
framework.IssueSSHCommand(cleanCmd, framework.TestContext.Provider, node)
}()
podClient := f.PodClient()
master := podClient.CreateSync(preparePod("master", node, v1.MountPropagationBidirectional, hostDir))
slave := podClient.CreateSync(preparePod("slave", node, v1.MountPropagationHostToContainer, hostDir))
// Check that the pods sees directories of each other. This just checks
// that they have the same HostPath, not the mount propagation.
podNames := []string{master.Name, slave.Name}
for _, podName := range podNames {
for _, dirName := range podNames {
cmd := fmt.Sprintf("test -d /mnt/test/%s", dirName)
f.ExecShellInPod(podName, cmd)
}
}
// Each pod mounts one tmpfs to /mnt/test/<podname> and puts a file there.
for _, podName := range podNames {
cmd := fmt.Sprintf("mount -t tmpfs e2e-mount-propagation-%[1]s /mnt/test/%[1]s; echo %[1]s > /mnt/test/%[1]s/file", podName)
f.ExecShellInPod(podName, cmd)
// unmount tmpfs when the test finishes
cmd = fmt.Sprintf("umount /mnt/test/%s", podName)
defer f.ExecShellInPod(podName, cmd)
}
// The host mounts one tmpfs to testdir/host and puts a file there so we
// can check mount propagation from the host to pods.
cmd := fmt.Sprintf("sudo mkdir %[1]q/host; sudo mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir)
err := framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
framework.ExpectNoError(err)
defer func() {
cmd := fmt.Sprintf("sudo umount %q/host", hostDir)
framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
}()
// Now check that mounts are propagated to the right containers.
// expectedMounts is map of pod name -> expected mounts visible in the
// pod.
expectedMounts := map[string]sets.String{
// Master sees only its own mount and not the slave's one.
"master": sets.NewString("master", "host"),
// Slave sees master's mount + itself.
"slave": sets.NewString("master", "slave", "host"),
}
dirNames := append(podNames, "host")
for podName, mounts := range expectedMounts {
for _, mountName := range dirNames {
cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName)
stdout, stderr, err := f.ExecShellInPodWithFullOutput(podName, cmd)
framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName)
shouldBeVisible := mounts.Has(mountName)
if shouldBeVisible {
framework.ExpectNoError(err, "%s: failed to run %q", msg, cmd)
Expect(stdout).To(Equal(mountName), msg)
} else {
// We *expect* cat to return error here
Expect(err).To(HaveOccurred(), msg)
}
}
}
// Check that the mounts are/are not propagated to the host.
// Host can see mount from master
cmd = fmt.Sprintf("test `cat %q/master/file` = master", hostDir)
err = framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
framework.ExpectNoError(err, "host should see mount from master")
// Host can't see mount from slave
cmd = fmt.Sprintf("test ! -e %q/slave/file", hostDir)
err = framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
framework.ExpectNoError(err, "host shouldn't see mount from slave")
})
})

99
vendor/k8s.io/kubernetes/test/e2e/node/pod_gc.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
)
// This test requires that --terminated-pod-gc-threshold=100 be set on the controller manager
//
// Slow by design (7 min)
var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() {
f := framework.NewDefaultFramework("pod-garbage-collector")
It("should handle the creation of 1000 pods", func() {
var count int
for count < 1000 {
pod, err := createTerminatingPod(f)
pod.ResourceVersion = ""
pod.Status.Phase = v1.PodFailed
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(pod)
if err != nil {
framework.Failf("err failing pod: %v", err)
}
count++
if count%50 == 0 {
framework.Logf("count: %v", count)
}
}
framework.Logf("created: %v", count)
// The gc controller polls every 30s and fires off a goroutine per
// pod to terminate.
var err error
var pods *v1.PodList
timeout := 2 * time.Minute
gcThreshold := 100
By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
framework.Logf("Failed to list pod %v", err)
return false, nil
}
if len(pods.Items) != gcThreshold {
framework.Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold)
return false, nil
}
return true, nil
})
if pollErr != nil {
framework.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err)
}
})
})
func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) {
uuid := uuid.NewUUID()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: string(uuid),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: string(uuid),
Image: "busybox",
},
},
SchedulerName: "please don't schedule my pods",
},
}
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
}

241
vendor/k8s.io/kubernetes/test/e2e/node/pods.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"crypto/tls"
"fmt"
"net/http"
"regexp"
"strconv"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = SIGDescribe("Pods Extended", func() {
f := framework.NewDefaultFramework("pods")
framework.KubeDescribe("Delete Grace Period", func() {
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
// Flaky issue #36821.
framework.ConformanceIt("should be submitted and removed [Flaky]", func() {
By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
},
},
},
}
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
By("submitting the pod to kubernetes")
podClient.Create(pod)
By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(1))
By("verifying pod creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
framework.Failf("Timeout while waiting for pod creation")
}
// We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
// start local proxy, so we can send graceful deletion over query string, rather than body parameter
cmd := framework.KubectlCmd("proxy", "-p", "0")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to start up proxy")
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
var n int
n, err = stdout.Read(buf)
Expect(err).NotTo(HaveOccurred(), "failed to read from kubectl proxy stdout")
output := string(buf[:n])
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
match := proxyRegexp.FindStringSubmatch(output)
Expect(len(match)).To(Equal(2))
port, err := strconv.Atoi(match[1])
Expect(err).NotTo(HaveOccurred(), "failed to convert port into string")
endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
req, err := http.NewRequest("DELETE", endpoint, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create http request")
By("deleting the pod gracefully")
rsp, err := client.Do(req)
Expect(err).NotTo(HaveOccurred(), "failed to use http client to send delete")
defer rsp.Body.Close()
By("verifying the kubelet observed the termination notice")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil {
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
return false, nil
}
for _, kubeletPod := range podList.Items {
if pod.Name != kubeletPod.Name {
continue
}
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
framework.Logf("deletion has not yet been observed")
return false, nil
}
return true, nil
}
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
By("verifying pod deletion was observed")
deleted := false
timeout := false
var lastPod *v1.Pod
timer := time.After(1 * time.Minute)
for !deleted && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted {
lastPod = event.Object.(*v1.Pod)
deleted = true
}
case <-timer:
timeout = true
}
}
if !deleted {
framework.Failf("Failed to observe pod deletion")
}
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
})
})
framework.KubeDescribe("Pods Set QOS Class", func() {
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
framework.ConformanceIt("should be submitted and removed ", func() {
By("creating the pod")
name := "pod-qos-class-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": name,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
},
},
},
}
By("submitting the pod to kubernetes")
podClient.Create(pod)
By("verifying QOS class is set on the pod")
pod, err := podClient.Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
})
})
})

172
vendor/k8s.io/kubernetes/test/e2e/node/pre_stop.go generated vendored Normal file
View File

@ -0,0 +1,172 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"encoding/json"
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
// partially cloned from webserver.go
type State struct {
Received map[string]int
}
func testPreStop(c clientset.Interface, ns string) {
// This is the server that will receive the preStop notification
podDescr := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "server",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "server",
Image: imageutils.GetE2EImage(imageutils.Nettest),
Ports: []v1.ContainerPort{{ContainerPort: 8080}},
},
},
},
}
By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns))
podDescr, err := c.CoreV1().Pods(ns).Create(podDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod.
defer func() {
By("Deleting the server pod")
c.CoreV1().Pods(ns).Delete(podDescr.Name, nil)
}()
By("Waiting for pods to come up.")
err = framework.WaitForPodRunningInNamespace(c, podDescr)
framework.ExpectNoError(err, "waiting for server pod to start")
val := "{\"Source\": \"prestop\"}"
podOut, err := c.CoreV1().Pods(ns).Get(podDescr.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "getting pod info")
preStopDescr := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "tester",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "tester",
Image: "busybox",
Command: []string{"sleep", "600"},
Lifecycle: &v1.Lifecycle{
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{
"wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP),
},
},
},
},
},
},
},
}
By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns))
preStopDescr, err = c.CoreV1().Pods(ns).Create(preStopDescr)
framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name))
deletePreStop := true
// At the end of the test, clean up by removing the pod.
defer func() {
if deletePreStop {
By("Deleting the tester pod")
c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil)
}
}()
err = framework.WaitForPodRunningInNamespace(c, preStopDescr)
framework.ExpectNoError(err, "waiting for tester pod to start")
// Delete the pod with the preStop handler.
By("Deleting pre-stop pod")
if err := c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil); err == nil {
deletePreStop = false
}
framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name))
// Validate that the server received the web poke.
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
var body []byte
body, err = c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(podDescr.Name).
Suffix("read").
DoRaw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Error validating prestop: %v", err)
return true, err
}
By(fmt.Sprintf("Error validating prestop: %v", err))
} else {
framework.Logf("Saw: %s", string(body))
state := State{}
err := json.Unmarshal(body, &state)
if err != nil {
framework.Logf("Error parsing: %v", err)
return false, nil
}
if state.Received["prestop"] != 0 {
return true, nil
}
}
return false, nil
})
framework.ExpectNoError(err, "validating pre-stop.")
}
var _ = SIGDescribe("PreStop", func() {
f := framework.NewDefaultFramework("prestop")
/*
Testname: pods-prestop-handler-invoked
Description: Makes sure a pod's preStop handler is successfully
invoked immediately before a container is terminated.
*/
framework.ConformanceIt("should call prestop when killing a pod ", func() {
testPreStop(f.ClientSet, f.Namespace.Name)
})
})

View File

@ -0,0 +1,234 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/* This test check that SecurityContext parameters specified at the
* pod or the container level work as intended. These tests cannot be
* run when the 'SecurityContextDeny' admission controller is not used
* so they are skipped by default.
*/
package node
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
podName := "security-context-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
HostIPC: hostIPC,
HostPID: hostPID,
SecurityContext: &v1.PodSecurityContext{},
Containers: []v1.Container{
{
Name: "test-container",
Image: "busybox",
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}
var _ = SIGDescribe("Security Context [Feature:SecurityContext]", func() {
f := framework.NewDefaultFramework("security-context")
It("should support pod.Spec.SecurityContext.SupplementalGroups", func() {
pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"id", "-G"}
pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678}
groups := []string{"1234", "5678"}
f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups)
})
It("should support pod.Spec.SecurityContext.RunAsUser", func() {
pod := scTestPod(false, false)
userID := int64(1001)
pod.Spec.SecurityContext.RunAsUser = &userID
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("%v", userID),
})
})
It("should support container.SecurityContext.RunAsUser", func() {
pod := scTestPod(false, false)
userID := int64(1001)
overrideUserID := int64(1002)
pod.Spec.SecurityContext.RunAsUser = &userID
pod.Spec.Containers[0].SecurityContext = new(v1.SecurityContext)
pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
fmt.Sprintf("%v", overrideUserID),
})
})
It("should support volume SELinux relabeling", func() {
testPodSELinuxLabeling(f, false, false)
})
It("should support volume SELinux relabeling when using hostIPC", func() {
testPodSELinuxLabeling(f, true, false)
})
It("should support volume SELinux relabeling when using hostPID", func() {
testPodSELinuxLabeling(f, false, true)
})
It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
pod.Annotations[v1.SeccompPodAnnotationKey] = "docker/default"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
It("should support seccomp alpha docker/default annotation [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "docker/default"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
})
It("should support seccomp default which is unconfined [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
})
func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) {
// Write and read a file with an empty_dir volume
// with a pod with the MCS label s0:c0,c1
pod := scTestPod(hostIPC, hostPID)
volumeName := "test-volume"
mountPath := "/mounted_volume"
pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: v1.StorageMediumDefault,
},
},
},
}
pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c0,c1",
}
pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
pod, err := client.Create(pod)
framework.ExpectNoError(err, "Error creating pod %v", pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
testContent := "hello"
testFilePath := mountPath + "/TEST"
err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent)
Expect(err).To(BeNil())
content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath)
Expect(err).To(BeNil())
Expect(content).To(ContainSubstring(testContent))
foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Confirm that the file can be accessed from a second
// pod using host_path with the same MCS label
volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeVolumeDir, foundPod.UID, volumeName)
By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir))
pod = scTestPod(hostIPC, hostPID)
pod.Spec.NodeName = foundPod.Spec.NodeName
volumeMounts := []v1.VolumeMount{
{
Name: volumeName,
MountPath: mountPath,
},
}
volumes := []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: volumeHostPath,
},
},
},
}
pod.Spec.Containers[0].VolumeMounts = volumeMounts
pod.Spec.Volumes = volumes
pod.Spec.Containers[0].Command = []string{"cat", testFilePath}
pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c0,c1",
}
f.TestContainerOutput("Pod with same MCS label reading test file", pod, 0, []string{testContent})
// Confirm that the same pod with a different MCS
// label cannot access the volume
pod = scTestPod(hostIPC, hostPID)
pod.Spec.Volumes = volumes
pod.Spec.Containers[0].VolumeMounts = volumeMounts
pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c2,c3",
}
_, err = client.Create(pod)
framework.ExpectNoError(err, "Error creating pod %v", pod)
err = f.WaitForPodRunning(pod.Name)
framework.ExpectNoError(err, "Error waiting for pod to run %v", pod)
content, err = f.ReadFileViaContainer(pod.Name, "test-container", testFilePath)
Expect(content).NotTo(ContainSubstring(testContent))
}

110
vendor/k8s.io/kubernetes/test/e2e/node/ssh.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"strings"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const maxNodes = 100
var _ = SIGDescribe("SSH", func() {
f := framework.NewDefaultFramework("ssh")
BeforeEach(func() {
// When adding more providers here, also implement their functionality in util.go's framework.GetSigner(...).
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
// This test SSH's into the node for which it needs the $HOME/.ssh/id_rsa key to be present. So
// we should skip if the environment does not have the key (not all CI systems support this use case)
framework.SkipUnlessSSHKeyPresent()
})
It("should SSH to all nodes and run commands", func() {
// Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses")
hosts, err := framework.NodeSSHHosts(f.ClientSet)
if err != nil {
framework.Failf("Error getting node hostnames: %v", err)
}
testCases := []struct {
cmd string
checkStdout bool
expectedStdout string
expectedStderr string
expectedCode int
expectedError error
}{
// Keep this test first - this variant runs on all nodes.
{`echo "Hello from $(whoami)@$(hostname)"`, false, "", "", 0, nil},
{`echo "foo" | grep "bar"`, true, "", "", 1, nil},
{`echo "Out" && echo "Error" >&2 && exit 7`, true, "Out", "Error", 7, nil},
}
for i, testCase := range testCases {
// Only run the first testcase against max 100 nodes. Run
// the rest against the first node we find only, since
// they're basically testing SSH semantics (and we don't
// need to do that against each host in the cluster).
nodes := len(hosts)
if i > 0 {
nodes = 1
} else if nodes > maxNodes {
nodes = maxNodes
}
testhosts := hosts[:nodes]
By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd))
for _, host := range testhosts {
result, err := framework.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
if err != testCase.expectedError {
framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
}
if testCase.checkStdout && stdout != testCase.expectedStdout {
framework.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout)
}
if stderr != testCase.expectedStderr {
framework.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr)
}
if result.Code != testCase.expectedCode {
framework.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode)
}
// Show stdout, stderr for logging purposes.
if len(stdout) > 0 {
framework.Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout))
}
if len(stderr) > 0 {
framework.Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr))
}
}
}
// Quickly test that SSH itself errors correctly.
By("SSH'ing to a nonexistent host")
if _, err = framework.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
framework.Failf("Expected error trying to SSH to nonexistent host.")
}
})
})