mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
Update to kube v1.17
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
327fcd1b1b
commit
3af1e26d7c
396
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
396
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -36,7 +37,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -104,28 +107,31 @@ type RunObjectConfig interface {
|
||||
GetReplicas() int
|
||||
GetLabelValue(string) (string, bool)
|
||||
GetGroupResource() schema.GroupResource
|
||||
GetGroupVersionResource() schema.GroupVersionResource
|
||||
}
|
||||
|
||||
type RCConfig struct {
|
||||
Affinity *v1.Affinity
|
||||
Client clientset.Interface
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
Image string
|
||||
Command []string
|
||||
Name string
|
||||
Namespace string
|
||||
PollInterval time.Duration
|
||||
Timeout time.Duration
|
||||
PodStatusFile *os.File
|
||||
Replicas int
|
||||
CpuRequest int64 // millicores
|
||||
CpuLimit int64 // millicores
|
||||
MemRequest int64 // bytes
|
||||
MemLimit int64 // bytes
|
||||
GpuLimit int64 // count
|
||||
ReadinessProbe *v1.Probe
|
||||
DNSPolicy *v1.DNSPolicy
|
||||
PriorityClassName string
|
||||
Affinity *v1.Affinity
|
||||
Client clientset.Interface
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
Image string
|
||||
Command []string
|
||||
Name string
|
||||
Namespace string
|
||||
PollInterval time.Duration
|
||||
Timeout time.Duration
|
||||
PodStatusFile *os.File
|
||||
Replicas int
|
||||
CpuRequest int64 // millicores
|
||||
CpuLimit int64 // millicores
|
||||
MemRequest int64 // bytes
|
||||
MemLimit int64 // bytes
|
||||
GpuLimit int64 // count
|
||||
ReadinessProbe *v1.Probe
|
||||
DNSPolicy *v1.DNSPolicy
|
||||
PriorityClassName string
|
||||
TerminationGracePeriodSeconds *int64
|
||||
Lifecycle *v1.Lifecycle
|
||||
|
||||
// Env vars, set the same for every pod.
|
||||
Env map[string]string
|
||||
@ -155,6 +161,9 @@ type RCConfig struct {
|
||||
// Maximum allowable container failures. If exceeded, RunRC returns an error.
|
||||
// Defaults to replicas*0.1 if unspecified.
|
||||
MaxContainerFailures *int
|
||||
// Maximum allowed pod deletions count. If exceeded, RunRC returns an error.
|
||||
// Defaults to 0.
|
||||
MaxAllowedPodDeletions int
|
||||
|
||||
// If set to false starting RC will print progress, otherwise only errors will be printed.
|
||||
Silent bool
|
||||
@ -295,6 +304,10 @@ func (config *DeploymentConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) create() error {
|
||||
deployment := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -313,13 +326,15 @@ func (config *DeploymentConfig) create() error {
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: config.Affinity,
|
||||
Affinity: config.Affinity,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -371,6 +386,10 @@ func (config *ReplicaSetConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) create() error {
|
||||
rs := &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -389,13 +408,15 @@ func (config *ReplicaSetConfig) create() error {
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: config.Affinity,
|
||||
Affinity: config.Affinity,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -443,6 +464,10 @@ func (config *JobConfig) GetGroupResource() schema.GroupResource {
|
||||
return batchinternal.Resource("jobs")
|
||||
}
|
||||
|
||||
func (config *JobConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return batchinternal.SchemeGroupVersion.WithResource("jobs")
|
||||
}
|
||||
|
||||
func (config *JobConfig) create() error {
|
||||
job := &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -457,12 +482,14 @@ func (config *JobConfig) create() error {
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: config.Affinity,
|
||||
Affinity: config.Affinity,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
@ -519,6 +546,10 @@ func (config *RCConfig) GetGroupResource() schema.GroupResource {
|
||||
return api.Resource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return api.SchemeGroupVersion.WithResource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetClient() clientset.Interface {
|
||||
return config.Client
|
||||
}
|
||||
@ -573,12 +604,13 @@ func (config *RCConfig) create() error {
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: config.ReadinessProbe,
|
||||
Lifecycle: config.Lifecycle,
|
||||
},
|
||||
},
|
||||
DNSPolicy: *config.DNSPolicy,
|
||||
NodeSelector: config.NodeSelector,
|
||||
Tolerations: config.Tolerations,
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(&one),
|
||||
PriorityClassName: config.PriorityClassName,
|
||||
},
|
||||
},
|
||||
@ -655,6 +687,9 @@ func (config *RCConfig) applyTo(template *v1.PodTemplateSpec) {
|
||||
if config.GpuLimit > 0 {
|
||||
template.Spec.Containers[0].Resources.Limits["nvidia.com/gpu"] = *resource.NewQuantity(config.GpuLimit, resource.DecimalSI)
|
||||
}
|
||||
if config.Lifecycle != nil {
|
||||
template.Spec.Containers[0].Lifecycle = config.Lifecycle
|
||||
}
|
||||
if len(config.Volumes) > 0 {
|
||||
template.Spec.Volumes = config.Volumes
|
||||
}
|
||||
@ -763,6 +798,7 @@ func (config *RCConfig) start() error {
|
||||
oldPods := make([]*v1.Pod, 0)
|
||||
oldRunning := 0
|
||||
lastChange := time.Now()
|
||||
podDeletionsCount := 0
|
||||
for oldRunning != config.Replicas {
|
||||
time.Sleep(interval)
|
||||
|
||||
@ -793,9 +829,10 @@ func (config *RCConfig) start() error {
|
||||
|
||||
diff := Diff(oldPods, pods)
|
||||
deletedPods := diff.DeletedPods()
|
||||
if len(deletedPods) != 0 {
|
||||
// There are some pods that have disappeared.
|
||||
err := fmt.Errorf("%d pods disappeared for %s: %v", len(deletedPods), config.Name, strings.Join(deletedPods, ", "))
|
||||
podDeletionsCount += len(deletedPods)
|
||||
if podDeletionsCount > config.MaxAllowedPodDeletions {
|
||||
// Number of pods which disappeared is over threshold
|
||||
err := fmt.Errorf("%d pods disappeared for %s: %v", podDeletionsCount, config.Name, strings.Join(deletedPods, ", "))
|
||||
config.RCConfigLog(err.Error())
|
||||
config.RCConfigLog(diff.String(sets.NewString()))
|
||||
return err
|
||||
@ -908,12 +945,22 @@ type TestNodePreparer interface {
|
||||
}
|
||||
|
||||
type PrepareNodeStrategy interface {
|
||||
// Modify pre-created Node objects before the test starts.
|
||||
PreparePatch(node *v1.Node) []byte
|
||||
// Create or modify any objects that depend on the node before the test starts.
|
||||
// Caller will re-try when http.StatusConflict error is returned.
|
||||
PrepareDependentObjects(node *v1.Node, client clientset.Interface) error
|
||||
// Clean up any node modifications after the test finishes.
|
||||
CleanupNode(node *v1.Node) *v1.Node
|
||||
// Clean up any objects that depend on the node after the test finishes.
|
||||
// Caller will re-try when http.StatusConflict error is returned.
|
||||
CleanupDependentObjects(nodeName string, client clientset.Interface) error
|
||||
}
|
||||
|
||||
type TrivialNodePrepareStrategy struct{}
|
||||
|
||||
var _ PrepareNodeStrategy = &TrivialNodePrepareStrategy{}
|
||||
|
||||
func (*TrivialNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
|
||||
return []byte{}
|
||||
}
|
||||
@ -923,11 +970,21 @@ func (*TrivialNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
return &nodeCopy
|
||||
}
|
||||
|
||||
func (*TrivialNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type LabelNodePrepareStrategy struct {
|
||||
labelKey string
|
||||
labelValue string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{}
|
||||
|
||||
func NewLabelNodePrepareStrategy(labelKey string, labelValue string) *LabelNodePrepareStrategy {
|
||||
return &LabelNodePrepareStrategy{
|
||||
labelKey: labelKey,
|
||||
@ -949,6 +1006,148 @@ func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
return nodeCopy
|
||||
}
|
||||
|
||||
func (*LabelNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*LabelNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeAllocatableStrategy fills node.status.allocatable and csiNode.spec.drivers[*].allocatable.
|
||||
// csiNode is created if it does not exist. On cleanup, any csiNode.spec.drivers[*].allocatable is
|
||||
// set to nil.
|
||||
type NodeAllocatableStrategy struct {
|
||||
// Node.status.allocatable to fill to all nodes.
|
||||
nodeAllocatable map[v1.ResourceName]string
|
||||
// Map <driver_name> -> VolumeNodeResources to fill into csiNode.spec.drivers[<driver_name>].
|
||||
csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources
|
||||
// List of in-tree volume plugins migrated to CSI.
|
||||
migratedPlugins []string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &NodeAllocatableStrategy{}
|
||||
|
||||
func NewNodeAllocatableStrategy(nodeAllocatable map[v1.ResourceName]string, csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources, migratedPlugins []string) *NodeAllocatableStrategy {
|
||||
return &NodeAllocatableStrategy{nodeAllocatable, csiNodeAllocatable, migratedPlugins}
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte {
|
||||
newNode := node.DeepCopy()
|
||||
for name, value := range s.nodeAllocatable {
|
||||
newNode.Status.Allocatable[name] = resource.MustParse(value)
|
||||
}
|
||||
|
||||
oldJSON, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
newJSON, err := json.Marshal(newNode)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(oldJSON, newJSON, v1.Node{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return patch
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
for name := range s.nodeAllocatable {
|
||||
delete(nodeCopy.Status.Allocatable, name)
|
||||
}
|
||||
return nodeCopy
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientset.Interface) error {
|
||||
csiNode := &storagev1beta1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Annotations: map[string]string{
|
||||
v1.MigratedPluginsAnnotationKey: strings.Join(s.migratedPlugins, ","),
|
||||
},
|
||||
},
|
||||
Spec: storagev1beta1.CSINodeSpec{
|
||||
Drivers: []storagev1beta1.CSINodeDriver{},
|
||||
},
|
||||
}
|
||||
|
||||
for driver, allocatable := range s.csiNodeAllocatable {
|
||||
d := storagev1beta1.CSINodeDriver{
|
||||
Name: driver,
|
||||
Allocatable: allocatable,
|
||||
NodeID: nodeName,
|
||||
}
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
|
||||
}
|
||||
|
||||
_, err := client.StorageV1beta1().CSINodes().Create(csiNode)
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
// Something created CSINode instance after we checked it did not exist.
|
||||
// Make the caller to re-try PrepareDependentObjects by returning Conflict error
|
||||
err = apierrs.NewConflict(storagev1beta1.Resource("csinodes"), nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, client clientset.Interface) error {
|
||||
for driverName, allocatable := range s.csiNodeAllocatable {
|
||||
found := false
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
found = true
|
||||
csiNode.Spec.Drivers[i].Allocatable = allocatable
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
d := storagev1beta1.CSINodeDriver{
|
||||
Name: driverName,
|
||||
Allocatable: allocatable,
|
||||
}
|
||||
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
|
||||
}
|
||||
}
|
||||
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.migratedPlugins, ",")
|
||||
|
||||
_, err := client.StorageV1beta1().CSINodes().Update(csiNode)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return s.createCSINode(node.Name, client)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return s.updateCSINode(csiNode, client)
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
csiNode, err := client.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for driverName := range s.csiNodeAllocatable {
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
csiNode.Spec.Drivers[i].Allocatable = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.updateCSINode(csiNode, client)
|
||||
}
|
||||
|
||||
func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
patch := strategy.PreparePatch(node)
|
||||
@ -957,17 +1156,34 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
}
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
if _, err = client.CoreV1().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil {
|
||||
return nil
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("To many conflicts when applying patch %v to Node %v", string(patch), node.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when applying patch %v to Node %v: %s", string(patch), node.Name, err)
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
if err = strategy.PrepareDependentObjects(node, client); err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error while preparing objects for node %s: %s", node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when creating objects for node %s: %s", node.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -978,14 +1194,31 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
|
||||
return nil
|
||||
}
|
||||
if _, err = client.CoreV1().Nodes().Update(updatedNode); err == nil {
|
||||
return nil
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("To many conflicts when trying to cleanup Node %v", nodeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when trying to cleanup Node %v: %s", nodeName, err)
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
err = strategy.CleanupDependentObjects(nodeName, client)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrs.IsConflict(err) {
|
||||
return fmt.Errorf("Error when cleaning up Node %v objects: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when trying to cleanup Node %v objects: %s", nodeName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TestPodCreateStrategy func(client clientset.Interface, namespace string, podCount int) error
|
||||
@ -1077,6 +1310,70 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe
|
||||
return createError
|
||||
}
|
||||
|
||||
func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int) error {
|
||||
var createError error
|
||||
lock := sync.Mutex{}
|
||||
createPodFunc := func(i int) {
|
||||
pvcName := fmt.Sprintf("pvc-%d", i)
|
||||
|
||||
// pv
|
||||
pv := factory(i)
|
||||
// bind to "pvc-$i"
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
Namespace: namespace,
|
||||
Name: pvcName,
|
||||
APIVersion: "v1",
|
||||
}
|
||||
pv.Status.Phase = v1.VolumeBound
|
||||
if err := CreatePersistentVolumeWithRetries(client, pv); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error creating PV: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// pvc
|
||||
pvc := claimTemplate.DeepCopy()
|
||||
pvc.Name = pvcName
|
||||
// bind to "pv-$i"
|
||||
pvc.Spec.VolumeName = pv.Name
|
||||
pvc.Status.Phase = v1.ClaimBound
|
||||
if err := CreatePersistentVolumeClaimWithRetries(client, namespace, pvc); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error creating PVC: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// pod
|
||||
pod := podTemplate.DeepCopy()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "vol",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvcName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := makeCreatePod(client, namespace, pod); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if count < 30 {
|
||||
workqueue.ParallelizeUntil(context.TODO(), count, count, createPodFunc)
|
||||
} else {
|
||||
workqueue.ParallelizeUntil(context.TODO(), 30, count, createPodFunc)
|
||||
}
|
||||
return createError
|
||||
}
|
||||
|
||||
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error {
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1105,6 +1402,14 @@ func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
}
|
||||
}
|
||||
|
||||
// volumeFactory creates an unique PersistentVolume for given integer.
|
||||
type volumeFactory func(uniqueID int) *v1.PersistentVolume
|
||||
|
||||
func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
return func(client clientset.Interface, namespace string, podCount int) error {
|
||||
return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factory, podTemplate, podCount)
|
||||
}
|
||||
}
|
||||
func NewSimpleCreatePodStrategy() TestPodCreateStrategy {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1249,6 +1554,13 @@ func attachConfigMaps(template *v1.PodTemplateSpec, configMapNames []string) {
|
||||
template.Spec.Containers[0].VolumeMounts = mounts
|
||||
}
|
||||
|
||||
func (config *RCConfig) getTerminationGracePeriodSeconds(defaultGrace *int64) *int64 {
|
||||
if config.TerminationGracePeriodSeconds == nil || *config.TerminationGracePeriodSeconds < 0 {
|
||||
return defaultGrace
|
||||
}
|
||||
return config.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
func attachServiceAccountTokenProjection(template *v1.PodTemplateSpec, name string) {
|
||||
template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
@ -1382,7 +1694,7 @@ func (config *DaemonConfig) Run() error {
|
||||
return running == len(nodes.Items), nil
|
||||
})
|
||||
if err != nil {
|
||||
config.LogFunc("Timed out while waiting for DaemonsSet %v/%v to be running.", config.Namespace, config.Name)
|
||||
config.LogFunc("Timed out while waiting for DaemonSet %v/%v to be running.", config.Namespace, config.Name)
|
||||
} else {
|
||||
config.LogFunc("Created Daemon %v/%v", config.Namespace, config.Name)
|
||||
}
|
||||
|
Reference in New Issue
Block a user