mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
51
vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go
generated
vendored
51
vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go
generated
vendored
@ -94,42 +94,6 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
|
||||
})
|
||||
}
|
||||
|
||||
// updates labels of nodes given by nodeNames.
|
||||
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
|
||||
// it silently ignores it.
|
||||
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
|
||||
func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
|
||||
const maxRetries = 5
|
||||
for nodeName := range nodeNames {
|
||||
var node *v1.Node
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Error getting node %s: %v", nodeName, err)
|
||||
continue
|
||||
}
|
||||
if toAdd != nil {
|
||||
for k, v := range toAdd {
|
||||
node.ObjectMeta.Labels[k] = v
|
||||
}
|
||||
}
|
||||
if toRemove != nil {
|
||||
for k := range toRemove {
|
||||
delete(node.ObjectMeta.Labels, k)
|
||||
}
|
||||
}
|
||||
_, err = c.CoreV1().Nodes().Update(node)
|
||||
if err != nil {
|
||||
framework.Logf("Error updating node %s: %v", nodeName, err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
// Restart the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 1` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads from
|
||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||
@ -317,7 +281,11 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
for i := 0; i < numNodes; i++ {
|
||||
nodeNames.Insert(nodes.Items[i].Name)
|
||||
}
|
||||
updateNodeLabels(c, nodeNames, nodeLabels, nil)
|
||||
for nodeName := range nodeNames {
|
||||
for k, v := range nodeLabels {
|
||||
framework.AddOrUpdateLabelOnNode(c, nodeName, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Start resourceMonitor only in small clusters.
|
||||
if len(nodes.Items) <= maxNodesToCheck {
|
||||
@ -331,7 +299,11 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
resourceMonitor.Stop()
|
||||
}
|
||||
// If we added labels to nodes in this test, remove them now.
|
||||
updateNodeLabels(c, nodeNames, nil, nodeLabels)
|
||||
for nodeName := range nodeNames {
|
||||
for k := range nodeLabels {
|
||||
framework.RemoveLabelOffNode(c, nodeName, k)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for _, itArg := range deleteTests {
|
||||
@ -400,7 +372,6 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
var (
|
||||
nfsServerPod *v1.Pod
|
||||
nfsIP string
|
||||
NFSconfig framework.VolumeTestConfig
|
||||
pod *v1.Pod // client pod
|
||||
)
|
||||
|
||||
@ -418,7 +389,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
NFSconfig, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
_, nfsServerPod, nfsIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go
generated
vendored
@ -257,7 +257,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
|
||||
podsPerNode: 100,
|
||||
memLimits: framework.ResourceUsagePerContainer{
|
||||
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
|
||||
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
|
||||
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 350 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go
generated
vendored
@ -74,7 +74,7 @@ func preparePod(name string, node *v1.Node, propagation v1.MountPropagationMode,
|
||||
return pod
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Mount propagation [Feature:MountPropagation]", func() {
|
||||
var _ = SIGDescribe("Mount propagation", func() {
|
||||
f := framework.NewDefaultFramework("mount-propagation")
|
||||
|
||||
It("should propagate mounts to the host", func() {
|
||||
|
Reference in New Issue
Block a user