mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
56
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
Normal file
56
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["downwardapi.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/downwardapi",
|
||||
deps = [
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["downwardapi_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/downwardapi",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/empty_dir:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
approvers:
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- thockin
|
||||
- matchstick
|
||||
reviewers:
|
||||
- ivan4th
|
||||
- rata
|
||||
- sjenning
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- rootfs
|
||||
- jingxu97
|
||||
- msau42
|
303
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
Normal file
303
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package downwardapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the entry point for plugin detection in a package.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&downwardAPIPlugin{}}
|
||||
}
|
||||
|
||||
const (
|
||||
downwardAPIPluginName = "kubernetes.io/downward-api"
|
||||
)
|
||||
|
||||
// downwardAPIPlugin implements the VolumePlugin interface.
|
||||
type downwardAPIPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &downwardAPIPlugin{}
|
||||
|
||||
func wrappedVolumeSpec() volume.Spec {
|
||||
return volume.Spec{
|
||||
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}},
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) GetPluginName() string {
|
||||
return downwardAPIPluginName
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||
volumeSource, _ := getVolumeSource(spec)
|
||||
if volumeSource == nil {
|
||||
return "", fmt.Errorf("Spec does not reference a DownwardAPI volume type")
|
||||
}
|
||||
|
||||
// Return user defined volume name, since this is an ephemeral volume type
|
||||
return spec.Name(), nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return spec.Volume != nil && spec.Volume.DownwardAPI != nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) RequiresRemount() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) SupportsMountOption() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
v := &downwardAPIVolume{
|
||||
volName: spec.Name(),
|
||||
items: spec.Volume.DownwardAPI.Items,
|
||||
pod: pod,
|
||||
podUID: pod.UID,
|
||||
plugin: plugin,
|
||||
}
|
||||
return &downwardAPIVolumeMounter{
|
||||
downwardAPIVolume: v,
|
||||
source: *spec.Volume.DownwardAPI,
|
||||
opts: &opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &downwardAPIVolumeUnmounter{
|
||||
&downwardAPIVolume{
|
||||
volName: volName,
|
||||
podUID: podUID,
|
||||
plugin: plugin,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
downwardAPIVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(downwardAPIVolume), nil
|
||||
}
|
||||
|
||||
// downwardAPIVolume retrieves downward API data and placing them into the volume on the host.
|
||||
type downwardAPIVolume struct {
|
||||
volName string
|
||||
items []v1.DownwardAPIVolumeFile
|
||||
pod *v1.Pod
|
||||
podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *v1.POD and not only types.UID
|
||||
plugin *downwardAPIPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// downwardAPIVolumeMounter fetches info from downward API from the pod
|
||||
// and dumps it in files
|
||||
type downwardAPIVolumeMounter struct {
|
||||
*downwardAPIVolume
|
||||
source v1.DownwardAPIVolumeSource
|
||||
opts *volume.VolumeOptions
|
||||
}
|
||||
|
||||
// downwardAPIVolumeMounter implements volume.Mounter interface
|
||||
var _ volume.Mounter = &downwardAPIVolumeMounter{}
|
||||
|
||||
// downward API volumes are always ReadOnlyManaged
|
||||
func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: true,
|
||||
Managed: true,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *downwardAPIVolumeMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp puts in place the volume plugin.
|
||||
// This function is not idempotent by design. We want the data to be refreshed periodically.
|
||||
// The internal sync interval of kubelet will drive the refresh of data.
|
||||
// TODO: Add volume specific ticker and refresh loop
|
||||
func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
|
||||
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode)
|
||||
if err != nil {
|
||||
glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating atomic writer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Write(data)
|
||||
if err != nil {
|
||||
glog.Errorf("Error writing payload to dir: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectData collects requested downwardAPI in data map.
|
||||
// Map's key is the requested name of file to dump
|
||||
// Map's value is the (sorted) content of the field to be dumped in the file.
|
||||
//
|
||||
// Note: this function is exported so that it can be called from the projection volume driver
|
||||
func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.VolumeHost, defaultMode *int32) (map[string]volumeutil.FileProjection, error) {
|
||||
if defaultMode == nil {
|
||||
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
|
||||
}
|
||||
|
||||
errlist := []error{}
|
||||
data := make(map[string]volumeutil.FileProjection)
|
||||
for _, fileInfo := range items {
|
||||
var fileProjection volumeutil.FileProjection
|
||||
fPath := path.Clean(fileInfo.Path)
|
||||
if fileInfo.Mode != nil {
|
||||
fileProjection.Mode = *fileInfo.Mode
|
||||
} else {
|
||||
fileProjection.Mode = *defaultMode
|
||||
}
|
||||
if fileInfo.FieldRef != nil {
|
||||
// TODO: unify with Kubelet.podFieldSelectorRuntimeValue
|
||||
if values, err := fieldpath.ExtractFieldPathAsString(pod, fileInfo.FieldRef.FieldPath); err != nil {
|
||||
glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
fileProjection.Data = []byte(sortLines(values))
|
||||
}
|
||||
} else if fileInfo.ResourceFieldRef != nil {
|
||||
containerName := fileInfo.ResourceFieldRef.ContainerName
|
||||
nodeAllocatable, err := host.GetNodeAllocatable()
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
} else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil {
|
||||
glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
fileProjection.Data = []byte(sortLines(values))
|
||||
}
|
||||
}
|
||||
|
||||
data[fPath] = fileProjection
|
||||
}
|
||||
return data, utilerrors.NewAggregate(errlist)
|
||||
}
|
||||
|
||||
// sortLines sorts the strings generated from map based data
|
||||
// (annotations and labels)
|
||||
func sortLines(values string) string {
|
||||
splitted := strings.Split(values, "\n")
|
||||
sort.Strings(splitted)
|
||||
return strings.Join(splitted, "\n")
|
||||
}
|
||||
|
||||
func (d *downwardAPIVolume) GetPath() string {
|
||||
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), d.volName)
|
||||
}
|
||||
|
||||
// downwardAPIVolumeCleaner handles cleaning up downwardAPI volumes
|
||||
type downwardAPIVolumeUnmounter struct {
|
||||
*downwardAPIVolume
|
||||
}
|
||||
|
||||
// downwardAPIVolumeUnmounter implements volume.Unmounter interface
|
||||
var _ volume.Unmounter = &downwardAPIVolumeUnmounter{}
|
||||
|
||||
func (c *downwardAPIVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
||||
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.DownwardAPIVolumeSource, bool) {
|
||||
var readOnly bool
|
||||
var volumeSource *v1.DownwardAPIVolumeSource
|
||||
|
||||
if spec.Volume != nil && spec.Volume.DownwardAPI != nil {
|
||||
volumeSource = spec.Volume.DownwardAPI
|
||||
readOnly = spec.ReadOnly
|
||||
}
|
||||
|
||||
return volumeSource, readOnly
|
||||
}
|
399
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi_test.go
generated
vendored
Normal file
399
vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi_test.go
generated
vendored
Normal file
@ -0,0 +1,399 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package downwardapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/fieldpath"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
downwardAPIDir = "..data"
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testNamespace = "test_metadata_namespace"
|
||||
testName = "test_metadata_name"
|
||||
)
|
||||
|
||||
func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
|
||||
tempDir, err := utiltesting.MkTmpdir("downwardApi_volume_test.")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
}
|
||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
|
||||
}
|
||||
|
||||
func TestCanSupport(t *testing.T) {
|
||||
pluginMgr := volume.VolumePluginMgr{}
|
||||
tmpDir, host := newTestHost(t, nil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
if plugin.GetPluginName() != downwardAPIPluginName {
|
||||
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||
}
|
||||
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{DownwardAPI: &v1.DownwardAPIVolumeSource{}}}}) {
|
||||
t.Errorf("Expected true")
|
||||
}
|
||||
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||
t.Errorf("Expected false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDownwardAPI(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
}
|
||||
annotations := map[string]string{
|
||||
"a1": "value1",
|
||||
"a2": "value2",
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
files map[string]string
|
||||
modes map[string]int32
|
||||
podLabels map[string]string
|
||||
podAnnotations map[string]string
|
||||
steps []testStep
|
||||
}{
|
||||
{
|
||||
name: "test_labels",
|
||||
files: map[string]string{"labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
// for steps that involve files, stepName is also
|
||||
// used as the name of the file to verify
|
||||
verifyMapInFile{stepName{"labels"}, labels1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_annotations",
|
||||
files: map[string]string{"annotations": "metadata.annotations"},
|
||||
podAnnotations: annotations,
|
||||
steps: []testStep{
|
||||
verifyMapInFile{stepName{"annotations"}, annotations},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_name",
|
||||
files: map[string]string{"name_file_name": "metadata.name"},
|
||||
steps: []testStep{
|
||||
verifyLinesInFile{stepName{"name_file_name"}, testName},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_namespace",
|
||||
files: map[string]string{"namespace_file_name": "metadata.namespace"},
|
||||
steps: []testStep{
|
||||
verifyLinesInFile{stepName{"namespace_file_name"}, testNamespace},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_twice_no_update",
|
||||
files: map[string]string{"labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
reSetUp{stepName{"resetup"}, false, nil},
|
||||
verifyMapInFile{stepName{"labels"}, labels1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_twice_with_update",
|
||||
files: map[string]string{"labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
reSetUp{stepName{"resetup"}, true, labels2},
|
||||
verifyMapInFile{stepName{"labels"}, labels2},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_with_unix_path",
|
||||
files: map[string]string{
|
||||
"these/are/my/labels": "metadata.labels",
|
||||
"these/are/your/annotations": "metadata.annotations",
|
||||
},
|
||||
podLabels: labels1,
|
||||
podAnnotations: annotations,
|
||||
steps: []testStep{
|
||||
verifyMapInFile{stepName{"these/are/my/labels"}, labels1},
|
||||
verifyMapInFile{stepName{"these/are/your/annotations"}, annotations},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_write_with_two_consecutive_slashes_in_the_path",
|
||||
files: map[string]string{"this//labels": "metadata.labels"},
|
||||
podLabels: labels1,
|
||||
steps: []testStep{
|
||||
verifyMapInFile{stepName{"this/labels"}, labels1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_default_mode",
|
||||
files: map[string]string{"name_file_name": "metadata.name"},
|
||||
steps: []testStep{
|
||||
verifyMode{stepName{"name_file_name"}, 0644},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test_item_mode",
|
||||
files: map[string]string{"name_file_name": "metadata.name"},
|
||||
modes: map[string]int32{"name_file_name": 0400},
|
||||
steps: []testStep{
|
||||
verifyMode{stepName{"name_file_name"}, 0400},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
test := newDownwardAPITest(t, testCase.name, testCase.files, testCase.podLabels, testCase.podAnnotations, testCase.modes)
|
||||
for _, step := range testCase.steps {
|
||||
test.t.Logf("Test case: %q Step: %q", testCase.name, step.getName())
|
||||
step.run(test)
|
||||
}
|
||||
test.tearDown()
|
||||
}
|
||||
}
|
||||
|
||||
type downwardAPITest struct {
|
||||
t *testing.T
|
||||
name string
|
||||
plugin volume.VolumePlugin
|
||||
pod *v1.Pod
|
||||
mounter volume.Mounter
|
||||
volumePath string
|
||||
rootDir string
|
||||
}
|
||||
|
||||
func newDownwardAPITest(t *testing.T, name string, volumeFiles, podLabels, podAnnotations map[string]string, modes map[string]int32) *downwardAPITest {
|
||||
defaultMode := int32(0644)
|
||||
var files []v1.DownwardAPIVolumeFile
|
||||
for path, fieldPath := range volumeFiles {
|
||||
file := v1.DownwardAPIVolumeFile{
|
||||
Path: path,
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: fieldPath,
|
||||
},
|
||||
}
|
||||
if mode, found := modes[path]; found {
|
||||
file.Mode = &mode
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
podMeta := metav1.ObjectMeta{
|
||||
Name: testName,
|
||||
Namespace: testNamespace,
|
||||
Labels: podLabels,
|
||||
Annotations: podAnnotations,
|
||||
}
|
||||
clientset := fake.NewSimpleClientset(&v1.Pod{ObjectMeta: podMeta})
|
||||
|
||||
pluginMgr := volume.VolumePluginMgr{}
|
||||
rootDir, host := newTestHost(t, clientset)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
volumeSpec := &v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{
|
||||
DefaultMode: &defaultMode,
|
||||
Items: files,
|
||||
},
|
||||
},
|
||||
}
|
||||
podMeta.UID = testPodUID
|
||||
pod := &v1.Pod{ObjectMeta: podMeta}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
||||
// downwardAPI volume should create its own empty wrapper path
|
||||
podWrapperMetadataDir := fmt.Sprintf("%v/pods/%v/plugins/kubernetes.io~empty-dir/wrapped_%v", rootDir, testPodUID, name)
|
||||
|
||||
if _, err := os.Stat(podWrapperMetadataDir); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, empty-dir wrapper path was not created: %s", podWrapperMetadataDir)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &downwardAPITest{
|
||||
t: t,
|
||||
plugin: plugin,
|
||||
pod: pod,
|
||||
mounter: mounter,
|
||||
volumePath: volumePath,
|
||||
rootDir: rootDir,
|
||||
}
|
||||
}
|
||||
|
||||
func (test *downwardAPITest) tearDown() {
|
||||
unmounter, err := test.plugin.NewUnmounter(test.name, testPodUID)
|
||||
if err != nil {
|
||||
test.t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
test.t.Fatalf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
test.t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(test.volumePath); err == nil {
|
||||
test.t.Errorf("TearDown() failed, volume path still exists: %s", test.volumePath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
test.t.Errorf("TearDown() failed: %v", err)
|
||||
}
|
||||
os.RemoveAll(test.rootDir)
|
||||
}
|
||||
|
||||
// testStep represents a named step of downwardAPITest.
|
||||
// For steps that deal with files, step name also serves
|
||||
// as the name of the file that's used by the step.
|
||||
type testStep interface {
|
||||
getName() string
|
||||
run(*downwardAPITest)
|
||||
}
|
||||
|
||||
type stepName struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (step stepName) getName() string { return step.name }
|
||||
|
||||
func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected string) {
|
||||
data, err := ioutil.ReadFile(path.Join(volumePath, filename))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
actualStr := sortLines(string(data))
|
||||
expectedStr := sortLines(expected)
|
||||
if actualStr != expectedStr {
|
||||
t.Errorf("Found `%s`, expected `%s`", actualStr, expectedStr)
|
||||
}
|
||||
}
|
||||
|
||||
type verifyLinesInFile struct {
|
||||
stepName
|
||||
expected string
|
||||
}
|
||||
|
||||
func (step verifyLinesInFile) run(test *downwardAPITest) {
|
||||
doVerifyLinesInFile(test.t, test.volumePath, step.name, step.expected)
|
||||
}
|
||||
|
||||
type verifyMapInFile struct {
|
||||
stepName
|
||||
expected map[string]string
|
||||
}
|
||||
|
||||
func (step verifyMapInFile) run(test *downwardAPITest) {
|
||||
doVerifyLinesInFile(test.t, test.volumePath, step.name, fieldpath.FormatMap(step.expected))
|
||||
}
|
||||
|
||||
type verifyMode struct {
|
||||
stepName
|
||||
expectedMode int32
|
||||
}
|
||||
|
||||
func (step verifyMode) run(test *downwardAPITest) {
|
||||
fileInfo, err := os.Stat(path.Join(test.volumePath, step.name))
|
||||
if err != nil {
|
||||
test.t.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
actualMode := fileInfo.Mode()
|
||||
expectedMode := os.FileMode(step.expectedMode)
|
||||
if actualMode != expectedMode {
|
||||
test.t.Errorf("Found mode `%v` expected %v", actualMode, expectedMode)
|
||||
}
|
||||
}
|
||||
|
||||
type reSetUp struct {
|
||||
stepName
|
||||
linkShouldChange bool
|
||||
newLabels map[string]string
|
||||
}
|
||||
|
||||
func (step reSetUp) run(test *downwardAPITest) {
|
||||
if step.newLabels != nil {
|
||||
test.pod.ObjectMeta.Labels = step.newLabels
|
||||
}
|
||||
|
||||
currentTarget, err := os.Readlink(path.Join(test.volumePath, downwardAPIDir))
|
||||
if err != nil {
|
||||
test.t.Errorf("labels file should be a link... %s\n", err.Error())
|
||||
}
|
||||
|
||||
// now re-run Setup
|
||||
if err = test.mounter.SetUp(nil); err != nil {
|
||||
test.t.Errorf("Failed to re-setup volume: %v", err)
|
||||
}
|
||||
|
||||
// get the link of the link
|
||||
currentTarget2, err := os.Readlink(path.Join(test.volumePath, downwardAPIDir))
|
||||
if err != nil {
|
||||
test.t.Errorf(".current should be a link... %s\n", err.Error())
|
||||
}
|
||||
|
||||
switch {
|
||||
case step.linkShouldChange && currentTarget2 == currentTarget:
|
||||
test.t.Errorf("Got and update between the two Setup... Target link should NOT be the same\n")
|
||||
case !step.linkShouldChange && currentTarget2 != currentTarget:
|
||||
test.t.Errorf("No update between the two Setup... Target link should be the same %s %s\n",
|
||||
currentTarget, currentTarget2)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user