vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,128 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"atomic_writer.go",
"attach_limit.go",
"device_util.go",
"doc.go",
"error.go",
"finalizer.go",
"io_util.go",
"metrics.go",
"nested_volumes.go",
"resize_util.go",
"util.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"device_util_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"device_util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"device_util_unsupported.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/util",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"nested_volumes_test.go",
"resize_util_test.go",
"util_test.go",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"atomic_writer_test.go",
"device_util_linux_test.go",
],
"//conditions:default": [],
}),
embed = [":go_default_library"],
deps = [
"//pkg/apis/core/install:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/volume:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/volume/util/fs:all-srcs",
"//pkg/volume/util/nestedpendingoperations:all-srcs",
"//pkg/volume/util/operationexecutor:all-srcs",
"//pkg/volume/util/recyclerclient:all-srcs",
"//pkg/volume/util/types:all-srcs",
"//pkg/volume/util/volumepathhandler:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,7 +0,0 @@
approvers:
- saad-ali
reviewers:
- saad-ali
- rootfs
- jingxu97
- screeley44

View File

@ -1,454 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
maxFileNameLength = 255
maxPathLength = 4096
)
// AtomicWriter handles atomically projecting content for a set of files into
// a target directory.
//
// Note:
//
// 1. AtomicWriter reserves the set of pathnames starting with `..`.
// 2. AtomicWriter offers no concurrency guarantees and must be synchronized
// by the caller.
//
// The visible files in this volume are symlinks to files in the writer's data
// directory. Actual files are stored in a hidden timestamped directory which
// is symlinked to by the data directory. The timestamped directory and
// data directory symlink are created in the writer's target dir.  This scheme
// allows the files to be atomically updated by changing the target of the
// data directory symlink.
//
// Consumers of the target directory can monitor the ..data symlink using
// inotify or fanotify to receive events when the content in the volume is
// updated.
type AtomicWriter struct {
targetDir string
logContext string
}
type FileProjection struct {
Data []byte
Mode int32
}
// NewAtomicWriter creates a new AtomicWriter configured to write to the given
// target directory, or returns an error if the target directory does not exist.
func NewAtomicWriter(targetDir string, logContext string) (*AtomicWriter, error) {
_, err := os.Stat(targetDir)
if os.IsNotExist(err) {
return nil, err
}
return &AtomicWriter{targetDir: targetDir, logContext: logContext}, nil
}
const (
dataDirName = "..data"
newDataDirName = "..data_tmp"
)
// Write does an atomic projection of the given payload into the writer's target
// directory. Input paths must not begin with '..'.
//
// The Write algorithm is:
//
// 1. The payload is validated; if the payload is invalid, the function returns
// 2.  The current timestamped directory is detected by reading the data directory
// symlink
// 3. The old version of the volume is walked to determine whether any
// portion of the payload was deleted and is still present on disk.
// 4. The data in the current timestamped directory is compared to the projected
// data to determine if an update is required.
// 5.  A new timestamped dir is created
// 6. The payload is written to the new timestamped directory
// 7.  Symlinks and directory for new user-visible files are created (if needed).
//
// For example, consider the files:
// <target-dir>/podName
// <target-dir>/user/labels
// <target-dir>/k8s/annotations
//
// The user visible files are symbolic links into the internal data directory:
// <target-dir>/podName -> ..data/podName
// <target-dir>/usr -> ..data/usr
// <target-dir>/k8s -> ..data/k8s
//
// The data directory itself is a link to a timestamped directory with
// the real data:
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
// 8.  A symlink to the new timestamped directory ..data_tmp is created that will
// become the new data directory
// 9.  The new data directory symlink is renamed to the data directory; rename is atomic
// 10. Old paths are removed from the user-visible portion of the target directory
// 11.  The previous timestamped directory is removed, if it exists
func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
// (1)
cleanPayload, err := validatePayload(payload)
if err != nil {
glog.Errorf("%s: invalid payload: %v", w.logContext, err)
return err
}
// (2)
dataDirPath := path.Join(w.targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil {
if !os.IsNotExist(err) {
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs)
// empty oldTsDir indicates that it didn't exist
oldTsDir = ""
}
oldTsPath := path.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.String
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
// (3)
pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath)
if err != nil {
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
}
// (4)
if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil {
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
return nil
} else {
glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
}
// (5)
tsDir, err := w.newTimestampDir()
if err != nil {
glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
return err
}
tsDirName := filepath.Base(tsDir)
// (6)
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
return err
} else {
glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
}
// (7)
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
return err
}
// (8)
newDataDirPath := path.Join(w.targetDir, newDataDirName)
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
os.RemoveAll(tsDir)
glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
return err
}
// (9)
if runtime.GOOS == "windows" {
os.Remove(dataDirPath)
err = os.Symlink(tsDirName, dataDirPath)
os.Remove(newDataDirPath)
} else {
err = os.Rename(newDataDirPath, dataDirPath)
}
if err != nil {
os.Remove(newDataDirPath)
os.RemoveAll(tsDir)
glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
return err
}
// (10)
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
return err
}
// (11)
if len(oldTsDir) > 0 {
if err = os.RemoveAll(oldTsPath); err != nil {
glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
return err
}
}
return nil
}
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
cleanPayload := make(map[string]FileProjection)
for k, content := range payload {
if err := validatePath(k); err != nil {
return nil, err
}
cleanPayload[filepath.Clean(k)] = content
}
return cleanPayload, nil
}
// validatePath validates a single path, returning an error if the path is
// invalid. paths may not:
//
// 1. be absolute
// 2. contain '..' as an element
// 3. start with '..'
// 4. contain filenames larger than 255 characters
// 5. be longer than 4096 characters
func validatePath(targetPath string) error {
// TODO: somehow unify this with the similar api validation,
// validateVolumeSourcePath; the error semantics are just different enough
// from this that it was time-prohibitive trying to find the right
// refactoring to re-use.
if targetPath == "" {
return fmt.Errorf("invalid path: must not be empty: %q", targetPath)
}
if path.IsAbs(targetPath) {
return fmt.Errorf("invalid path: must be relative path: %s", targetPath)
}
if len(targetPath) > maxPathLength {
return fmt.Errorf("invalid path: must be less than or equal to %d characters", maxPathLength)
}
items := strings.Split(targetPath, string(os.PathSeparator))
for _, item := range items {
if item == ".." {
return fmt.Errorf("invalid path: must not contain '..': %s", targetPath)
}
if len(item) > maxFileNameLength {
return fmt.Errorf("invalid path: filenames must be less than or equal to %d characters", maxFileNameLength)
}
}
if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
return fmt.Errorf("invalid path: must not start with '..': %s", targetPath)
}
return nil
}
// shouldWritePayload returns whether the payload should be written to disk.
func shouldWritePayload(payload map[string]FileProjection, oldTsDir string) (bool, error) {
for userVisiblePath, fileProjection := range payload {
shouldWrite, err := shouldWriteFile(path.Join(oldTsDir, userVisiblePath), fileProjection.Data)
if err != nil {
return false, err
}
if shouldWrite {
return true, nil
}
}
return false, nil
}
// shouldWriteFile returns whether a new version of a file should be written to disk.
func shouldWriteFile(path string, content []byte) (bool, error) {
_, err := os.Lstat(path)
if os.IsNotExist(err) {
return true, nil
}
contentOnFs, err := ioutil.ReadFile(path)
if err != nil {
return false, err
}
return (bytes.Compare(content, contentOnFs) != 0), nil
}
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.String, error) {
paths := sets.NewString()
visitor := func(path string, info os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, oldTsDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
if relativePath == "" {
return nil
}
paths.Insert(relativePath)
return nil
}
err := filepath.Walk(oldTsDir, visitor)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
newPaths := sets.NewString()
for file := range payload {
// add all subpaths for the payload to the set of new paths
// to avoid attempting to remove non-empty dirs
for subPath := file; subPath != ""; {
newPaths.Insert(subPath)
subPath, _ = filepath.Split(subPath)
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
}
}
glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
result := paths.Difference(newPaths)
glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
return result, nil
}
// newTimestampDir creates a new timestamp directory
func (w *AtomicWriter) newTimestampDir() (string, error) {
tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
if err != nil {
glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
return "", err
}
// 0755 permissions are needed to allow 'group' and 'other' to recurse the
// directory tree. do a chmod here to ensure that permissions are set correctly
// regardless of the process' umask.
err = os.Chmod(tsDir, 0755)
if err != nil {
glog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err)
return "", err
}
return tsDir, nil
}
// writePayloadToDir writes the given payload to the given directory. The
// directory must exist.
func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir string) error {
for userVisiblePath, fileProjection := range payload {
content := fileProjection.Data
mode := os.FileMode(fileProjection.Mode)
fullPath := path.Join(dir, userVisiblePath)
baseDir, _ := filepath.Split(fullPath)
err := os.MkdirAll(baseDir, os.ModePerm)
if err != nil {
glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
return err
}
err = ioutil.WriteFile(fullPath, content, mode)
if err != nil {
glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
return err
}
// Chmod is needed because ioutil.WriteFile() ends up calling
// open(2) to create the file, so the final mode used is "mode &
// ~umask". But we want to make sure the specified mode is used
// in the file no matter what the umask is.
err = os.Chmod(fullPath, mode)
if err != nil {
glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
}
}
return nil
}
// createUserVisibleFiles creates the relative symlinks for all the
// files configured in the payload. If the directory in a file path does not
// exist, it is created.
//
// Viz:
// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
// the following symlinks are created:
// bar -> ..data/bar
// foo -> ..data/foo
// baz -> ..data/baz
func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) error {
for userVisiblePath := range payload {
slashpos := strings.Index(userVisiblePath, string(os.PathSeparator))
if slashpos == -1 {
slashpos = len(userVisiblePath)
}
linkname := userVisiblePath[:slashpos]
_, err := os.Readlink(path.Join(w.targetDir, linkname))
if err != nil && os.IsNotExist(err) {
// The link into the data directory for this path doesn't exist; create it
visibleFile := path.Join(w.targetDir, linkname)
dataDirFile := path.Join(dataDirName, linkname)
err = os.Symlink(dataDirFile, visibleFile)
if err != nil {
return err
}
}
}
return nil
}
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {
// only remove symlinks from the volume root directory (i.e. items that don't contain '/')
if strings.Contains(p, ps) {
continue
}
if err := os.Remove(path.Join(w.targetDir, p)); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
lasterr = err
}
}
return lasterr
}

View File

@ -1,986 +0,0 @@
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/base64"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"testing"
"k8s.io/apimachinery/pkg/util/sets"
utiltesting "k8s.io/client-go/util/testing"
)
func TestNewAtomicWriter(t *testing.T) {
targetDir, err := utiltesting.MkTmpdir("atomic-write")
if err != nil {
t.Fatalf("unexpected error creating tmp dir: %v", err)
}
defer os.RemoveAll(targetDir)
_, err = NewAtomicWriter(targetDir, "-test-")
if err != nil {
t.Fatalf("unexpected error creating writer for existing target dir: %v", err)
}
nonExistentDir, err := utiltesting.MkTmpdir("atomic-write")
if err != nil {
t.Fatalf("unexpected error creating tmp dir: %v", err)
}
err = os.Remove(nonExistentDir)
if err != nil {
t.Fatalf("unexpected error ensuring dir %v does not exist: %v", nonExistentDir, err)
}
_, err = NewAtomicWriter(nonExistentDir, "-test-")
if err == nil {
t.Fatalf("unexpected success creating writer for nonexistent target dir: %v", err)
}
}
func TestValidatePath(t *testing.T) {
maxPath := strings.Repeat("a", maxPathLength+1)
maxFile := strings.Repeat("a", maxFileNameLength+1)
cases := []struct {
name string
path string
valid bool
}{
{
name: "valid 1",
path: "i/am/well/behaved.txt",
valid: true,
},
{
name: "valid 2",
path: "keepyourheaddownandfollowtherules.txt",
valid: true,
},
{
name: "max path length",
path: maxPath,
valid: false,
},
{
name: "max file length",
path: maxFile,
valid: false,
},
{
name: "absolute failure",
path: "/dev/null",
valid: false,
},
{
name: "reserved path",
path: "..sneaky.txt",
valid: false,
},
{
name: "contains doubledot 1",
path: "hello/there/../../../../../../etc/passwd",
valid: false,
},
{
name: "contains doubledot 2",
path: "hello/../etc/somethingbad",
valid: false,
},
{
name: "empty",
path: "",
valid: false,
},
}
for _, tc := range cases {
err := validatePath(tc.path)
if tc.valid && err != nil {
t.Errorf("%v: unexpected failure: %v", tc.name, err)
continue
}
if !tc.valid && err == nil {
t.Errorf("%v: unexpected success", tc.name)
}
}
}
func TestPathsToRemove(t *testing.T) {
cases := []struct {
name string
payload1 map[string]FileProjection
payload2 map[string]FileProjection
expected sets.String
}{
{
name: "simple",
payload1: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"bar.txt": {Mode: 0644, Data: []byte("bar")},
},
payload2: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
},
expected: sets.NewString("bar.txt"),
},
{
name: "simple 2",
payload1: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zip/bar.txt": {Mode: 0644, Data: []byte("zip/b}ar")},
},
payload2: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
},
expected: sets.NewString("zip/bar.txt", "zip"),
},
{
name: "subdirs 1",
payload1: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zip/zap/bar.txt": {Mode: 0644, Data: []byte("zip/bar")},
},
payload2: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
},
expected: sets.NewString("zip/zap/bar.txt", "zip", "zip/zap"),
},
{
name: "subdirs 2",
payload1: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zip/1/2/3/4/bar.txt": {Mode: 0644, Data: []byte("zip/b}ar")},
},
payload2: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
},
expected: sets.NewString("zip/1/2/3/4/bar.txt", "zip", "zip/1", "zip/1/2", "zip/1/2/3", "zip/1/2/3/4"),
},
{
name: "subdirs 3",
payload1: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zip/1/2/3/4/bar.txt": {Mode: 0644, Data: []byte("zip/b}ar")},
"zap/a/b/c/bar.txt": {Mode: 0644, Data: []byte("zap/bar")},
},
payload2: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
},
expected: sets.NewString("zip/1/2/3/4/bar.txt", "zip", "zip/1", "zip/1/2", "zip/1/2/3", "zip/1/2/3/4", "zap", "zap/a", "zap/a/b", "zap/a/b/c", "zap/a/b/c/bar.txt"),
},
{
name: "subdirs 4",
payload1: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zap/1/2/3/4/bar.txt": {Mode: 0644, Data: []byte("zip/bar")},
"zap/1/2/c/bar.txt": {Mode: 0644, Data: []byte("zap/bar")},
"zap/1/2/magic.txt": {Mode: 0644, Data: []byte("indigo")},
},
payload2: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zap/1/2/magic.txt": {Mode: 0644, Data: []byte("indigo")},
},
expected: sets.NewString("zap/1/2/3/4/bar.txt", "zap/1/2/3", "zap/1/2/3/4", "zap/1/2/3/4/bar.txt", "zap/1/2/c", "zap/1/2/c/bar.txt"),
},
{
name: "subdirs 5",
payload1: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zap/1/2/3/4/bar.txt": {Mode: 0644, Data: []byte("zip/bar")},
"zap/1/2/c/bar.txt": {Mode: 0644, Data: []byte("zap/bar")},
},
payload2: map[string]FileProjection{
"foo.txt": {Mode: 0644, Data: []byte("foo")},
"zap/1/2/magic.txt": {Mode: 0644, Data: []byte("indigo")},
},
expected: sets.NewString("zap/1/2/3/4/bar.txt", "zap/1/2/3", "zap/1/2/3/4", "zap/1/2/3/4/bar.txt", "zap/1/2/c", "zap/1/2/c/bar.txt"),
},
}
for _, tc := range cases {
targetDir, err := utiltesting.MkTmpdir("atomic-write")
if err != nil {
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
continue
}
defer os.RemoveAll(targetDir)
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
err = writer.Write(tc.payload1)
if err != nil {
t.Errorf("%v: unexpected error writing: %v", tc.name, err)
continue
}
dataDirPath := path.Join(targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil && os.IsNotExist(err) {
t.Errorf("Data symlink does not exist: %v", dataDirPath)
continue
} else if err != nil {
t.Errorf("Unable to read symlink %v: %v", dataDirPath, err)
continue
}
actual, err := writer.pathsToRemove(tc.payload2, path.Join(targetDir, oldTsDir))
if err != nil {
t.Errorf("%v: unexpected error determining paths to remove: %v", tc.name, err)
continue
}
if e, a := tc.expected, actual; !e.Equal(a) {
t.Errorf("%v: unexpected paths to remove:\nexpected: %v\n got: %v", tc.name, e, a)
}
}
}
func TestWriteOnce(t *testing.T) {
// $1 if you can tell me what this binary is
encodedMysteryBinary := `f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAeABAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAEAAOAAB
AAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAAAAAfQAAAAAAAAB9AAAAAAAAAAAA
IAAAAAAAsDyZDwU=`
mysteryBinaryBytes := make([]byte, base64.StdEncoding.DecodedLen(len(encodedMysteryBinary)))
numBytes, err := base64.StdEncoding.Decode(mysteryBinaryBytes, []byte(encodedMysteryBinary))
if err != nil {
t.Fatalf("Unexpected error decoding binary payload: %v", err)
}
if numBytes != 125 {
t.Fatalf("Unexpected decoded binary size: expected 125, got %v", numBytes)
}
cases := []struct {
name string
payload map[string]FileProjection
success bool
}{
{
name: "invalid payload 1",
payload: map[string]FileProjection{
"foo": {Mode: 0644, Data: []byte("foo")},
"..bar": {Mode: 0644, Data: []byte("bar")},
"binary.bin": {Mode: 0644, Data: mysteryBinaryBytes},
},
success: false,
},
{
name: "invalid payload 2",
payload: map[string]FileProjection{
"foo/../bar": {Mode: 0644, Data: []byte("foo")},
},
success: false,
},
{
name: "basic 1",
payload: map[string]FileProjection{
"foo": {Mode: 0644, Data: []byte("foo")},
"bar": {Mode: 0644, Data: []byte("bar")},
},
success: true,
},
{
name: "basic 2",
payload: map[string]FileProjection{
"binary.bin": {Mode: 0644, Data: mysteryBinaryBytes},
".binary.bin": {Mode: 0644, Data: mysteryBinaryBytes},
},
success: true,
},
{
name: "basic mode 1",
payload: map[string]FileProjection{
"foo": {Mode: 0777, Data: []byte("foo")},
"bar": {Mode: 0400, Data: []byte("bar")},
},
success: true,
},
{
name: "dotfiles",
payload: map[string]FileProjection{
"foo": {Mode: 0644, Data: []byte("foo")},
"bar": {Mode: 0644, Data: []byte("bar")},
".dotfile": {Mode: 0644, Data: []byte("dotfile")},
".dotfile.file": {Mode: 0644, Data: []byte("dotfile.file")},
},
success: true,
},
{
name: "dotfiles mode",
payload: map[string]FileProjection{
"foo": {Mode: 0407, Data: []byte("foo")},
"bar": {Mode: 0440, Data: []byte("bar")},
".dotfile": {Mode: 0777, Data: []byte("dotfile")},
".dotfile.file": {Mode: 0666, Data: []byte("dotfile.file")},
},
success: true,
},
{
name: "subdirectories 1",
payload: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
},
success: true,
},
{
name: "subdirectories mode 1",
payload: map[string]FileProjection{
"foo/bar.txt": {Mode: 0400, Data: []byte("foo/bar")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
},
success: true,
},
{
name: "subdirectories 2",
payload: map[string]FileProjection{
"foo//bar.txt": {Mode: 0644, Data: []byte("foo//bar")},
"bar///bar/zab.txt": {Mode: 0644, Data: []byte("bar/../bar/zab.txt")},
},
success: true,
},
{
name: "subdirectories 3",
payload: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar")},
"bar/zib/zab.txt": {Mode: 0644, Data: []byte("bar/zib/zab.txt")},
},
success: true,
},
{
name: "kitchen sink",
payload: map[string]FileProjection{
"foo.log": {Mode: 0644, Data: []byte("foo")},
"bar.zap": {Mode: 0644, Data: []byte("bar")},
".dotfile": {Mode: 0644, Data: []byte("dotfile")},
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar")},
"bar/zib/zab.txt": {Mode: 0400, Data: []byte("bar/zib/zab.txt")},
"1/2/3/4/5/6/7/8/9/10/.dotfile.lib": {Mode: 0777, Data: []byte("1-2-3-dotfile")},
},
success: true,
},
}
for _, tc := range cases {
targetDir, err := utiltesting.MkTmpdir("atomic-write")
if err != nil {
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
continue
}
defer os.RemoveAll(targetDir)
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
err = writer.Write(tc.payload)
if err != nil && tc.success {
t.Errorf("%v: unexpected error writing payload: %v", tc.name, err)
continue
} else if err == nil && !tc.success {
t.Errorf("%v: unexpected success", tc.name)
continue
} else if err != nil {
continue
}
checkVolumeContents(targetDir, tc.name, tc.payload, t)
}
}
func TestUpdate(t *testing.T) {
cases := []struct {
name string
first map[string]FileProjection
next map[string]FileProjection
shouldWrite bool
}{
{
name: "update",
first: map[string]FileProjection{
"foo": {Mode: 0644, Data: []byte("foo")},
"bar": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo": {Mode: 0644, Data: []byte("foo2")},
"bar": {Mode: 0640, Data: []byte("bar2")},
},
shouldWrite: true,
},
{
name: "no update",
first: map[string]FileProjection{
"foo": {Mode: 0644, Data: []byte("foo")},
"bar": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo": {Mode: 0644, Data: []byte("foo")},
"bar": {Mode: 0644, Data: []byte("bar")},
},
shouldWrite: false,
},
{
name: "no update 2",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
shouldWrite: false,
},
{
name: "add 1",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
"blu/zip.txt": {Mode: 0644, Data: []byte("zip")},
},
shouldWrite: true,
},
{
name: "add 2",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
"blu/two/2/3/4/5/zip.txt": {Mode: 0644, Data: []byte("zip")},
},
shouldWrite: true,
},
{
name: "add 3",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
"bar/2/3/4/5/zip.txt": {Mode: 0644, Data: []byte("zip")},
},
shouldWrite: true,
},
{
name: "delete 1",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
},
shouldWrite: true,
},
{
name: "delete 2",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/1/2/3/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
},
shouldWrite: true,
},
{
name: "delete 3",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/1/2/sip.txt": {Mode: 0644, Data: []byte("sip")},
"bar/1/2/3/zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/1/2/sip.txt": {Mode: 0644, Data: []byte("sip")},
},
shouldWrite: true,
},
{
name: "delete 4",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/1/2/sip.txt": {Mode: 0644, Data: []byte("sip")},
"bar/1/2/3/4/5/6zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/1/2/sip.txt": {Mode: 0644, Data: []byte("sip")},
},
shouldWrite: true,
},
{
name: "delete all",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
"bar/1/2/sip.txt": {Mode: 0644, Data: []byte("sip")},
"bar/1/2/3/4/5/6zab.txt": {Mode: 0644, Data: []byte("bar")},
},
next: map[string]FileProjection{},
shouldWrite: true,
},
{
name: "add and delete 1",
first: map[string]FileProjection{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo")},
},
next: map[string]FileProjection{
"bar/baz.txt": {Mode: 0644, Data: []byte("baz")},
},
shouldWrite: true,
},
}
for _, tc := range cases {
targetDir, err := utiltesting.MkTmpdir("atomic-write")
if err != nil {
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
continue
}
defer os.RemoveAll(targetDir)
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
err = writer.Write(tc.first)
if err != nil {
t.Errorf("%v: unexpected error writing: %v", tc.name, err)
continue
}
checkVolumeContents(targetDir, tc.name, tc.first, t)
if !tc.shouldWrite {
continue
}
err = writer.Write(tc.next)
if err != nil {
if tc.shouldWrite {
t.Errorf("%v: unexpected error writing: %v", tc.name, err)
continue
}
} else if !tc.shouldWrite {
t.Errorf("%v: unexpected success", tc.name)
continue
}
checkVolumeContents(targetDir, tc.name, tc.next, t)
}
}
func TestMultipleUpdates(t *testing.T) {
cases := []struct {
name string
payloads []map[string]FileProjection
}{
{
name: "update 1",
payloads: []map[string]FileProjection{
{
"foo": {Mode: 0644, Data: []byte("foo")},
"bar": {Mode: 0644, Data: []byte("bar")},
},
{
"foo": {Mode: 0400, Data: []byte("foo2")},
"bar": {Mode: 0400, Data: []byte("bar2")},
},
{
"foo": {Mode: 0600, Data: []byte("foo3")},
"bar": {Mode: 0600, Data: []byte("bar3")},
},
},
},
{
name: "update 2",
payloads: []map[string]FileProjection{
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
},
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar2")},
"bar/zab.txt": {Mode: 0400, Data: []byte("bar/zab.txt2")},
},
},
},
{
name: "clear sentinel",
payloads: []map[string]FileProjection{
{
"foo": {Mode: 0644, Data: []byte("foo")},
"bar": {Mode: 0644, Data: []byte("bar")},
},
{
"foo": {Mode: 0644, Data: []byte("foo2")},
"bar": {Mode: 0644, Data: []byte("bar2")},
},
{
"foo": {Mode: 0644, Data: []byte("foo3")},
"bar": {Mode: 0644, Data: []byte("bar3")},
},
{
"foo": {Mode: 0644, Data: []byte("foo4")},
"bar": {Mode: 0644, Data: []byte("bar4")},
},
},
},
{
name: "subdirectories 2",
payloads: []map[string]FileProjection{
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar")},
"bar/zib/zab.txt": {Mode: 0644, Data: []byte("bar/zib/zab.txt")},
},
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar2")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt2")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar2")},
"bar/zib/zab.txt": {Mode: 0644, Data: []byte("bar/zib/zab.txt2")},
},
},
},
{
name: "add 1",
payloads: []map[string]FileProjection{
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
"bar//zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar")},
"bar/zib////zib/zab.txt": {Mode: 0644, Data: []byte("bar/zib/zab.txt")},
},
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar2")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt2")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar2")},
"bar/zib/zab.txt": {Mode: 0644, Data: []byte("bar/zib/zab.txt2")},
"add/new/keys.txt": {Mode: 0644, Data: []byte("addNewKeys")},
},
},
},
{
name: "add 2",
payloads: []map[string]FileProjection{
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar2")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt2")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar2")},
"bar/zib/zab.txt": {Mode: 0644, Data: []byte("bar/zib/zab.txt2")},
"add/new/keys.txt": {Mode: 0644, Data: []byte("addNewKeys")},
},
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar2")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt2")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar2")},
"bar/zib/zab.txt": {Mode: 0644, Data: []byte("bar/zib/zab.txt2")},
"add/new/keys.txt": {Mode: 0644, Data: []byte("addNewKeys")},
"add/new/keys2.txt": {Mode: 0644, Data: []byte("addNewKeys2")},
"add/new/keys3.txt": {Mode: 0644, Data: []byte("addNewKeys3")},
},
},
},
{
name: "remove 1",
payloads: []map[string]FileProjection{
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
"bar//zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt")},
"foo/blaz/bar.txt": {Mode: 0644, Data: []byte("foo/blaz/bar")},
"zip/zap/zup/fop.txt": {Mode: 0644, Data: []byte("zip/zap/zup/fop.txt")},
},
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar2")},
"bar/zab.txt": {Mode: 0644, Data: []byte("bar/zab.txt2")},
},
{
"foo/bar.txt": {Mode: 0644, Data: []byte("foo/bar")},
},
},
},
}
for _, tc := range cases {
targetDir, err := utiltesting.MkTmpdir("atomic-write")
if err != nil {
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
continue
}
defer os.RemoveAll(targetDir)
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
for _, payload := range tc.payloads {
writer.Write(payload)
checkVolumeContents(targetDir, tc.name, payload, t)
}
}
}
func checkVolumeContents(targetDir, tcName string, payload map[string]FileProjection, t *testing.T) {
dataDirPath := path.Join(targetDir, dataDirName)
// use filepath.Walk to reconstruct the payload, then deep equal
observedPayload := make(map[string]FileProjection)
visitor := func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
relativePath := strings.TrimPrefix(path, dataDirPath)
relativePath = strings.TrimPrefix(relativePath, "/")
if strings.HasPrefix(relativePath, "..") {
return nil
}
content, err := ioutil.ReadFile(path)
if err != nil {
return err
}
fileInfo, err := os.Stat(path)
if err != nil {
return err
}
mode := int32(fileInfo.Mode())
observedPayload[relativePath] = FileProjection{Data: content, Mode: mode}
return nil
}
d, err := ioutil.ReadDir(targetDir)
if err != nil {
t.Errorf("Unable to read dir %v: %v", targetDir, err)
return
}
for _, info := range d {
if strings.HasPrefix(info.Name(), "..") {
continue
}
if info.Mode()&os.ModeSymlink != 0 {
p := path.Join(targetDir, info.Name())
actual, err := os.Readlink(p)
if err != nil {
t.Errorf("Unable to read symlink %v: %v", p, err)
continue
}
if err := filepath.Walk(path.Join(targetDir, actual), visitor); err != nil {
t.Errorf("%v: unexpected error walking directory: %v", tcName, err)
}
}
}
cleanPathPayload := make(map[string]FileProjection, len(payload))
for k, v := range payload {
cleanPathPayload[filepath.Clean(k)] = v
}
if !reflect.DeepEqual(cleanPathPayload, observedPayload) {
t.Errorf("%v: payload and observed payload do not match.", tcName)
}
}
func TestValidatePayload(t *testing.T) {
maxPath := strings.Repeat("a", maxPathLength+1)
cases := []struct {
name string
payload map[string]FileProjection
expected sets.String
valid bool
}{
{
name: "valid payload",
payload: map[string]FileProjection{
"foo": {},
"bar": {},
},
valid: true,
expected: sets.NewString("foo", "bar"),
},
{
name: "payload with path length > 4096 is invalid",
payload: map[string]FileProjection{
maxPath: {},
},
valid: false,
},
{
name: "payload with absolute path is invalid",
payload: map[string]FileProjection{
"/dev/null": {},
},
valid: false,
},
{
name: "payload with reserved path is invalid",
payload: map[string]FileProjection{
"..sneaky.txt": {},
},
valid: false,
},
{
name: "payload with doubledot path is invalid",
payload: map[string]FileProjection{
"foo/../etc/password": {},
},
valid: false,
},
{
name: "payload with empty path is invalid",
payload: map[string]FileProjection{
"": {},
},
valid: false,
},
{
name: "payload with unclean path should be cleaned",
payload: map[string]FileProjection{
"foo////bar": {},
},
valid: true,
expected: sets.NewString("foo/bar"),
},
}
getPayloadPaths := func(payload map[string]FileProjection) sets.String {
paths := sets.NewString()
for path := range payload {
paths.Insert(path)
}
return paths
}
for _, tc := range cases {
real, err := validatePayload(tc.payload)
if !tc.valid && err == nil {
t.Errorf("%v: unexpected success", tc.name)
}
if tc.valid {
if err != nil {
t.Errorf("%v: unexpected failure: %v", tc.name, err)
continue
}
realPaths := getPayloadPaths(real)
if !realPaths.Equal(tc.expected) {
t.Errorf("%v: unexpected payload paths: %v is not equal to %v", tc.name, realPaths, tc.expected)
}
}
}
}
func TestCreateUserVisibleFiles(t *testing.T) {
cases := []struct {
name string
payload map[string]FileProjection
expected map[string]string
}{
{
name: "simple path",
payload: map[string]FileProjection{
"foo": {},
"bar": {},
},
expected: map[string]string{
"foo": "..data/foo",
"bar": "..data/bar",
},
},
{
name: "simple nested path",
payload: map[string]FileProjection{
"foo/bar": {},
"foo/bar/txt": {},
"bar/txt": {},
},
expected: map[string]string{
"foo": "..data/foo",
"bar": "..data/bar",
},
},
{
name: "unclean nested path",
payload: map[string]FileProjection{
"./bar": {},
"foo///bar": {},
},
expected: map[string]string{
"bar": "..data/bar",
"foo": "..data/foo",
},
},
}
for _, tc := range cases {
targetDir, err := utiltesting.MkTmpdir("atomic-write")
if err != nil {
t.Errorf("%v: unexpected error creating tmp dir: %v", tc.name, err)
continue
}
defer os.RemoveAll(targetDir)
dataDirPath := path.Join(targetDir, dataDirName)
err = os.MkdirAll(dataDirPath, 0755)
if err != nil {
t.Fatalf("%v: unexpected error creating data path: %v", tc.name, err)
}
writer := &AtomicWriter{targetDir: targetDir, logContext: "-test-"}
payload, err := validatePayload(tc.payload)
if err != nil {
t.Fatalf("%v: unexpected error validating payload: %v", tc.name, err)
}
err = writer.createUserVisibleFiles(payload)
if err != nil {
t.Fatalf("%v: unexpected error creating visible files: %v", tc.name, err)
}
for subpath, expectedDest := range tc.expected {
visiblePath := path.Join(targetDir, subpath)
destination, err := os.Readlink(visiblePath)
if err != nil && os.IsNotExist(err) {
t.Fatalf("%v: visible symlink does not exist: %v", tc.name, visiblePath)
} else if err != nil {
t.Fatalf("%v: unable to read symlink %v: %v", tc.name, dataDirPath, err)
}
if expectedDest != destination {
t.Fatalf("%v: symlink destination %q not same with expected data dir %q", tc.name, destination, expectedDest)
}
}
}
}

View File

@ -1,29 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// This file is a common place holder for volume limit utility constants
// shared between volume package and scheduler
const (
// EBSVolumeLimitKey resource name that will store volume limits for EBS
EBSVolumeLimitKey = "attachable-volumes-aws-ebs"
// AzureVolumeLimitKey stores resource name that will store volume limits for Azure
AzureVolumeLimitKey = "attachable-volumes-azure-disk"
// GCEVolumeLimitKey stores resource name that will store volume limits for GCE node
GCEVolumeLimitKey = "attachable-volumes-gce-pd"
)

View File

@ -1,32 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
//DeviceUtil is a util for common device methods
type DeviceUtil interface {
FindMultipathDeviceForDevice(disk string) string
FindSlaveDevicesOnMultipath(disk string) []string
}
type deviceHandler struct {
get_io IoUtil
}
//NewDeviceHandler Create a new IoHandler implementation
func NewDeviceHandler(io IoUtil) DeviceUtil {
return &deviceHandler{get_io: io}
}

View File

@ -1,82 +0,0 @@
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"path"
"strings"
)
// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
io := handler.get_io
disk, err := findDeviceForPath(device, io)
if err != nil {
return ""
}
sysPath := "/sys/block/"
if dirs, err := io.ReadDir(sysPath); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.HasPrefix(name, "dm-") {
if _, err1 := io.Lstat(sysPath + name + "/slaves/" + disk); err1 == nil {
return "/dev/" + name
}
}
}
}
return ""
}
// findDeviceForPath Find the underlaying disk for a linked path such as /dev/disk/by-path/XXXX or /dev/mapper/XXXX
// will return sdX or hdX etc, if /dev/sdX is passed in then sdX will be returned
func findDeviceForPath(path string, io IoUtil) (string, error) {
devicePath, err := io.EvalSymlinks(path)
if err != nil {
return "", err
}
// if path /dev/hdX split into "", "dev", "hdX" then we will
// return just the last part
parts := strings.Split(devicePath, "/")
if len(parts) == 3 && strings.HasPrefix(parts[1], "dev") {
return parts[2], nil
}
return "", errors.New("Illegal path for device " + devicePath)
}
// FindSlaveDevicesOnMultipath given a dm name like /dev/dm-1, find all devices
// which are managed by the devicemapper dm-1.
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
var devices []string
io := handler.get_io
// Split path /dev/dm-1 into "", "dev", "dm-1"
parts := strings.Split(dm, "/")
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
return devices
}
disk := parts[2]
slavesPath := path.Join("/sys/block/", disk, "/slaves/")
if files, err := io.ReadDir(slavesPath); err == nil {
for _, f := range files {
devices = append(devices, path.Join("/dev/", f.Name()))
}
}
return devices
}

View File

@ -1,160 +0,0 @@
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"os"
"reflect"
"testing"
"time"
)
type mockOsIOHandler struct{}
func (handler *mockOsIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/sys/block/dm-1/slaves":
f1 := &fakeFileInfo{
name: "sda",
}
f2 := &fakeFileInfo{
name: "sdb",
}
return []os.FileInfo{f1, f2}, nil
case "/sys/block/":
f1 := &fakeFileInfo{
name: "sda",
}
f2 := &fakeFileInfo{
name: "dm-1",
}
return []os.FileInfo{f1, f2}, nil
}
return nil, nil
}
func (handler *mockOsIOHandler) Lstat(name string) (os.FileInfo, error) {
links := map[string]string{
"/sys/block/dm-1/slaves/sda": "sda",
"/dev/sda": "sda",
}
if dev, ok := links[name]; ok {
return &fakeFileInfo{name: dev}, nil
}
return nil, errors.New("Not Implemented for Mock")
}
func (handler *mockOsIOHandler) EvalSymlinks(path string) (string, error) {
links := map[string]string{
"/returns/a/dev": "/dev/sde",
"/returns/non/dev": "/sys/block",
"/dev/disk/by-path/127.0.0.1:3260-eui.02004567A425678D-lun-0": "/dev/sda",
"/dev/disk/by-path/127.0.0.3:3260-eui.03004567A425678D-lun-0": "/dev/sdb",
"/dev/dm-2": "/dev/dm-2",
"/dev/dm-3": "/dev/dm-3",
"/dev/sdc": "/dev/sdc",
"/dev/sde": "/dev/sde",
}
return links[path], nil
}
func (handler *mockOsIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return errors.New("Not Implemented for Mock")
}
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
func TestFindMultipathDeviceForDevice(t *testing.T) {
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
dev := mockDeviceUtil.FindMultipathDeviceForDevice("/dev/disk/by-path/127.0.0.1:3260-eui.02004567A425678D-lun-0")
if dev != "/dev/dm-1" {
t.Fatalf("mpio device not found dm-1 expected got [%s]", dev)
}
dev = mockDeviceUtil.FindMultipathDeviceForDevice("/dev/disk/by-path/empty")
if dev != "" {
t.Fatalf("mpio device not found '' expected got [%s]", dev)
}
}
func TestFindDeviceForPath(t *testing.T) {
io := &mockOsIOHandler{}
disk, err := findDeviceForPath("/dev/sde", io)
if err != nil {
t.Fatalf("error finding device for path /dev/sde:%v", err)
}
if disk != "sde" {
t.Fatalf("disk [%s] didn't match expected sde", disk)
}
disk, err = findDeviceForPath("/returns/a/dev", io)
if err != nil {
t.Fatalf("error finding device for path /returns/a/dev:%v", err)
}
if disk != "sde" {
t.Fatalf("disk [%s] didn't match expected sde", disk)
}
_, err = findDeviceForPath("/returns/non/dev", io)
if err == nil {
t.Fatalf("link is to incorrect dev")
}
_, err = findDeviceForPath("/path/doesnt/exist", &osIOHandler{})
if err == nil {
t.Fatalf("path shouldn't exist but still doesn't give an error")
}
}
func TestFindSlaveDevicesOnMultipath(t *testing.T) {
mockDeviceUtil := NewDeviceHandler(&mockOsIOHandler{})
devices := mockDeviceUtil.FindSlaveDevicesOnMultipath("/dev/dm-1")
if !reflect.DeepEqual(devices, []string{"/dev/sda", "/dev/sdb"}) {
t.Fatalf("failed to find devices managed by mpio device. /dev/sda, /dev/sdb expected got [%s]", devices)
}
dev := mockDeviceUtil.FindSlaveDevicesOnMultipath("/dev/sdc")
if len(dev) != 0 {
t.Fatalf("mpio device not found '' expected got [%s]", dev)
}
}

View File

@ -1,30 +0,0 @@
// +build !linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// FindMultipathDeviceForDevice unsupported returns ""
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
return ""
}
// FindSlaveDevicesOnMultipath unsupported returns ""
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(disk string) []string {
out := []string{}
return out
}

View File

@ -1,18 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Contains utility code for use by volume plugins.
package util // import "k8s.io/kubernetes/pkg/volume/util"

View File

@ -1,41 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
k8stypes "k8s.io/apimachinery/pkg/types"
)
// This error on attach indicates volume is attached to a different node
// than we expected.
type DanglingAttachError struct {
msg string
CurrentNode k8stypes.NodeName
DevicePath string
}
func (err *DanglingAttachError) Error() string {
return err.msg
}
func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error {
return &DanglingAttachError{
msg: msg,
CurrentNode: node,
DevicePath: devicePath,
}
}

View File

@ -1,25 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
const (
// Name of finalizer on PVCs that have a running pod.
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
// Name of finalizer on PVs that are bound by PVCs
PVProtectionFinalizer = "kubernetes.io/pv-protection"
)

View File

@ -1,96 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = select({
"@io_bazel_rules_go//go/platform:android": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"fs.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"fs.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"fs_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/util/fs",
visibility = ["//visibility:public"],
deps = select({
"@io_bazel_rules_go//go/platform:android": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:darwin": [
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:nacl": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:plan9": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:solaris": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:windows": [
"//vendor/golang.org/x/sys/windows:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,98 +0,0 @@
// +build linux darwin
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"bytes"
"fmt"
"os/exec"
"strings"
"golang.org/x/sys/unix"
"k8s.io/apimachinery/pkg/api/resource"
)
// FSInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
// for the filesystem that path resides upon.
func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
statfs := &unix.Statfs_t{}
err := unix.Statfs(path, statfs)
if err != nil {
return 0, 0, 0, 0, 0, 0, err
}
// Available is blocks available * fragment size
available := int64(statfs.Bavail) * int64(statfs.Bsize)
// Capacity is total block count * fragment size
capacity := int64(statfs.Blocks) * int64(statfs.Bsize)
// Usage is block being used * fragment size (aka block size).
usage := (int64(statfs.Blocks) - int64(statfs.Bfree)) * int64(statfs.Bsize)
inodes := int64(statfs.Files)
inodesFree := int64(statfs.Ffree)
inodesUsed := inodes - inodesFree
return available, capacity, usage, inodes, inodesFree, inodesUsed, nil
}
// DiskUsage gets disk usage of specified path.
func DiskUsage(path string) (*resource.Quantity, error) {
// Uses the same niceness level as cadvisor.fs does when running du
// Uses -B 1 to always scale to a blocksize of 1 byte
out, err := exec.Command("nice", "-n", "19", "du", "-s", "-B", "1", path).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("failed command 'du' ($ nice -n 19 du -s -B 1) on path %s with error %v", path, err)
}
used, err := resource.ParseQuantity(strings.Fields(string(out))[0])
if err != nil {
return nil, fmt.Errorf("failed to parse 'du' output %s due to error %v", out, err)
}
used.Format = resource.BinarySI
return &used, nil
}
// Find uses the equivalent of the command `find <path> -dev -printf '.' | wc -c` to count files and directories.
// While this is not an exact measure of inodes used, it is a very good approximation.
func Find(path string) (int64, error) {
if path == "" {
return 0, fmt.Errorf("invalid directory")
}
var counter byteCounter
var stderr bytes.Buffer
findCmd := exec.Command("find", path, "-xdev", "-printf", ".")
findCmd.Stdout, findCmd.Stderr = &counter, &stderr
if err := findCmd.Start(); err != nil {
return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
}
if err := findCmd.Wait(); err != nil {
return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stderr.String(), err)
}
return counter.bytesWritten, nil
}
// Simple io.Writer implementation that counts how many bytes were written.
type byteCounter struct{ bytesWritten int64 }
func (b *byteCounter) Write(p []byte) (int, error) {
b.bytesWritten += int64(len(p))
return len(p), nil
}

View File

@ -1,39 +0,0 @@
// +build !linux,!darwin,!windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
)
// FSInfo unsupported returns 0 values for available and capacity and an error.
func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
return 0, 0, 0, 0, 0, 0, fmt.Errorf("FsInfo not supported for this build.")
}
// DiskUsage gets disk usage of specified path.
func DiskUsage(path string) (*resource.Quantity, error) {
return nil, fmt.Errorf("Du not supported for this build.")
}
func Find(path string) (int64, error) {
return 0, fmt.Errorf("Find not supported for this build.")
}

View File

@ -1,77 +0,0 @@
// +build windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"fmt"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
"k8s.io/apimachinery/pkg/api/resource"
)
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procGetDiskFreeSpaceEx = modkernel32.NewProc("GetDiskFreeSpaceExW")
)
// FSInfo returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
// for the filesystem that path resides upon.
func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
var freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes int64
var err error
ret, _, err := syscall.Syscall6(
procGetDiskFreeSpaceEx.Addr(),
4,
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))),
uintptr(unsafe.Pointer(&freeBytesAvailable)),
uintptr(unsafe.Pointer(&totalNumberOfBytes)),
uintptr(unsafe.Pointer(&totalNumberOfFreeBytes)),
0,
0,
)
if ret == 0 {
return 0, 0, 0, 0, 0, 0, err
}
return freeBytesAvailable, totalNumberOfBytes, totalNumberOfBytes - freeBytesAvailable, 0, 0, 0, nil
}
// DiskUsage gets disk usage of specified path.
func DiskUsage(path string) (*resource.Quantity, error) {
_, _, usage, _, _, _, err := FsInfo(path)
if err != nil {
return nil, err
}
used, err := resource.ParseQuantity(fmt.Sprintf("%d", usage))
if err != nil {
return nil, fmt.Errorf("failed to parse fs usage %d due to %v", usage, err)
}
used.Format = resource.BinarySI
return &used, nil
}
// Always return zero since inodes is not supported on Windows.
func Find(path string) (int64, error) {
return 0, nil
}

View File

@ -1,47 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"io/ioutil"
"os"
"path/filepath"
)
// IoUtil is a mockable util for common IO operations
type IoUtil interface {
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
}
type osIOHandler struct{}
//NewIOHandler Create a new IoHandler implementation
func NewIOHandler() IoUtil {
return &osIOHandler{}
}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(name)
}
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
return filepath.EvalSymlinks(path)
}

View File

@ -1,64 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
var storageOperationMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "storage_operation_duration_seconds",
Help: "Storage operation duration",
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50},
},
[]string{"volume_plugin", "operation_name"},
)
var storageOperationErrorMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "storage_operation_errors_total",
Help: "Storage operation errors",
},
[]string{"volume_plugin", "operation_name"},
)
func init() {
registerMetrics()
}
func registerMetrics() {
prometheus.MustRegister(storageOperationMetric)
prometheus.MustRegister(storageOperationErrorMetric)
}
// OperationCompleteHook returns a hook to call when an operation is completed
func OperationCompleteHook(plugin, operationName string) func(*error) {
requestTime := time.Now()
opComplete := func(err *error) {
timeTaken := time.Since(requestTime).Seconds()
// Create metric with operation name and plugin name
if *err != nil {
storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc()
} else {
storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken)
}
}
return opComplete
}

View File

@ -1,99 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"k8s.io/api/core/v1"
"os"
"path"
"path/filepath"
"sort"
"strings"
)
// getNestedMountpoints returns a list of mountpoint directories that should be created
// for the volume indicated by name.
// note: the returned list is relative to baseDir
func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
var retval []string
checkContainer := func(container *v1.Container) error {
var allMountPoints []string // all mount points in this container
var myMountPoints []string // mount points that match name
for _, vol := range container.VolumeMounts {
cleaned := filepath.Clean(vol.MountPath)
allMountPoints = append(allMountPoints, cleaned)
if vol.Name == name {
myMountPoints = append(myMountPoints, cleaned)
}
}
sort.Strings(allMountPoints)
parentPrefix := ".." + string(os.PathSeparator)
// Examine each place where this volume is mounted
for _, myMountPoint := range myMountPoints {
if strings.HasPrefix(myMountPoint, parentPrefix) {
// Don't let a container trick us into creating directories outside of its rootfs
return fmt.Errorf("Invalid container mount point %v", myMountPoint)
}
myMPSlash := myMountPoint + string(os.PathSeparator)
// The previously found nested mountpoint (or "" if none found yet)
prevNestedMP := ""
// examine each mount point to see if it's nested beneath this volume
// (but skip any that are double-nested beneath this volume)
// For example, if this volume is mounted as /dir and other volumes are mounted
// as /dir/nested and /dir/nested/other, only create /dir/nested.
for _, mp := range allMountPoints {
if !strings.HasPrefix(mp, myMPSlash) {
continue // skip -- not nested beneath myMountPoint
}
if prevNestedMP != "" && strings.HasPrefix(mp, prevNestedMP) {
continue // skip -- double nested beneath myMountPoint
}
// since this mount point is nested, remember it so that we can check that following ones aren't nested beneath this one
prevNestedMP = mp + string(os.PathSeparator)
retval = append(retval, mp[len(myMPSlash):])
}
}
return nil
}
for _, container := range pod.Spec.InitContainers {
if err := checkContainer(&container); err != nil {
return nil, err
}
}
for _, container := range pod.Spec.Containers {
if err := checkContainer(&container); err != nil {
return nil, err
}
}
return retval, nil
}
// MakeNestedMountpoints creates mount points in baseDir for volumes mounted beneath name
func MakeNestedMountpoints(name, baseDir string, pod v1.Pod) error {
dirs, err := getNestedMountpoints(name, baseDir, pod)
if err != nil {
return err
}
for _, dir := range dirs {
err := os.MkdirAll(path.Join(baseDir, dir), 0755)
if err != nil {
return fmt.Errorf("Unable to create nested volume mountpoints: %v", err)
}
}
return nil
}

View File

@ -1,233 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"io/ioutil"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
type testCases struct {
name string
err bool
expected sets.String
volname string
pod v1.Pod
}
func TestGetNestedMountpoints(t *testing.T) {
var (
testNamespace = "test_namespace"
testPodUID = types.UID("test_pod_uid")
)
tc := []testCases{
{
name: "Simple Pod",
err: false,
expected: sets.NewString(),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir", Name: "vol1"},
},
},
},
},
},
},
{
name: "Simple Nested Pod",
err: false,
expected: sets.NewString("nested"),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir", Name: "vol1"},
{MountPath: "/dir/nested", Name: "vol2"},
},
},
},
},
},
},
{
name: "Unsorted Nested Pod",
err: false,
expected: sets.NewString("nested", "nested2"),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir/nested/double", Name: "vol3"},
{MountPath: "/ignore", Name: "vol4"},
{MountPath: "/dir/nested", Name: "vol2"},
{MountPath: "/ignore2", Name: "vol5"},
{MountPath: "/dir", Name: "vol1"},
{MountPath: "/dir/nested2", Name: "vol3"},
},
},
},
},
},
},
{
name: "Multiple vol1 mounts Pod",
err: false,
expected: sets.NewString("nested", "nested2"),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir", Name: "vol1"},
{MountPath: "/dir/nested", Name: "vol2"},
{MountPath: "/ignore", Name: "vol4"},
{MountPath: "/other", Name: "vol1"},
{MountPath: "/other/nested2", Name: "vol3"},
},
},
},
},
},
},
{
name: "Big Pod",
err: false,
volname: "vol1",
expected: sets.NewString("sub1/sub2/sub3", "sub1/sub2/sub4", "sub1/sub2/sub6", "sub"),
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/mnt", Name: "vol1"},
{MountPath: "/ignore", Name: "vol2"},
{MountPath: "/mnt/sub1/sub2/sub3", Name: "vol3"},
{MountPath: "/mnt/sub1/sub2/sub4", Name: "vol4"},
{MountPath: "/mnt/sub1/sub2/sub4/skip", Name: "vol5"},
{MountPath: "/mnt/sub1/sub2/sub4/skip2", Name: "vol5a"},
{MountPath: "/mnt/sub1/sub2/sub6", Name: "vol6"},
{MountPath: "/mnt7", Name: "vol7"},
},
},
},
InitContainers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/mnt/dir", Name: "vol1"},
{MountPath: "/mnt/dir_ignore", Name: "vol8"},
{MountPath: "/ignore", Name: "vol9"},
{MountPath: "/mnt/dir/sub", Name: "vol11"},
},
},
},
},
},
},
{
name: "Naughty Pod",
err: true,
expected: nil,
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "foo/../../dir", Name: "vol1"},
{MountPath: "foo/../../dir/skip", Name: "vol10"},
},
},
},
},
},
},
}
for _, test := range tc {
dir, err := ioutil.TempDir("", "TestMakeNestedMountpoints.")
if err != nil {
t.Errorf("Unexpected error trying to create temp directory: %v", err)
return
}
defer os.RemoveAll(dir)
rootdir := path.Join(dir, "vol")
err = os.Mkdir(rootdir, 0755)
if err != nil {
t.Errorf("Unexpected error trying to create temp root directory: %v", err)
return
}
dirs, err := getNestedMountpoints(test.volname, rootdir, test.pod)
if test.err {
if err == nil {
t.Errorf("%v: expected error, got nil", test.name)
}
continue
} else {
if err != nil {
t.Errorf("%v: expected no error, got %v", test.name, err)
continue
}
}
actual := sets.NewString(dirs...)
if !test.expected.Equal(actual) {
t.Errorf("%v: unexpected nested directories created:\nexpected: %v\n got: %v", test.name, test.expected, actual)
}
}
}

View File

@ -1,44 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["nestedpendingoperations.go"],
importpath = "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations",
deps = [
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["nestedpendingoperations_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/volume/util/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,2 +0,0 @@
approvers:
- saad-ali

View File

@ -1,322 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package nestedpendingoperations is a modified implementation of
pkg/util/goroutinemap. It implements a data structure for managing go routines
by volume/pod name. It prevents the creation of new go routines if an existing
go routine for the volume already exists. It also allows multiple operations to
execute in parallel for the same volume as long as they are operating on
different pods.
*/
package nestedpendingoperations
import (
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
k8sRuntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
// EmptyUniquePodName is a UniquePodName for empty string.
EmptyUniquePodName types.UniquePodName = types.UniquePodName("")
// EmptyUniqueVolumeName is a UniqueVolumeName for empty string
EmptyUniqueVolumeName v1.UniqueVolumeName = v1.UniqueVolumeName("")
)
// NestedPendingOperations defines the supported set of operations.
type NestedPendingOperations interface {
// Run adds the concatenation of volumeName and podName to the list of
// running operations and spawns a new go routine to execute operationFunc.
// If an operation with the same volumeName and same or empty podName
// exists, an AlreadyExists or ExponentialBackoff error is returned.
// This enables multiple operations to execute in parallel for the same
// volumeName as long as they have different podName.
// Once the operation is complete, the go routine is terminated and the
// concatenation of volumeName and podName is removed from the list of
// executing operations allowing a new operation to be started with the
// volumeName without error.
Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, generatedOperations types.GeneratedOperations) error
// Wait blocks until all operations are completed. This is typically
// necessary during tests - the test should wait until all operations finish
// and evaluate results after that.
Wait()
// IsOperationPending returns true if an operation for the given volumeName and podName is pending,
// otherwise it returns false
IsOperationPending(volumeName v1.UniqueVolumeName, podName types.UniquePodName) bool
}
// NewNestedPendingOperations returns a new instance of NestedPendingOperations.
func NewNestedPendingOperations(exponentialBackOffOnError bool) NestedPendingOperations {
g := &nestedPendingOperations{
operations: []operation{},
exponentialBackOffOnError: exponentialBackOffOnError,
}
g.cond = sync.NewCond(&g.lock)
return g
}
type nestedPendingOperations struct {
operations []operation
exponentialBackOffOnError bool
cond *sync.Cond
lock sync.RWMutex
}
type operation struct {
volumeName v1.UniqueVolumeName
podName types.UniquePodName
operationPending bool
expBackoff exponentialbackoff.ExponentialBackoff
}
func (grm *nestedPendingOperations) Run(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName,
generatedOperations types.GeneratedOperations) error {
grm.lock.Lock()
defer grm.lock.Unlock()
opExists, previousOpIndex := grm.isOperationExists(volumeName, podName)
if opExists {
previousOp := grm.operations[previousOpIndex]
// Operation already exists
if previousOp.operationPending {
// Operation is pending
operationName := getOperationName(volumeName, podName)
return NewAlreadyExistsError(operationName)
}
operationName := getOperationName(volumeName, podName)
if err := previousOp.expBackoff.SafeToRetry(operationName); err != nil {
return err
}
// Update existing operation to mark as pending.
grm.operations[previousOpIndex].operationPending = true
grm.operations[previousOpIndex].volumeName = volumeName
grm.operations[previousOpIndex].podName = podName
} else {
// Create a new operation
grm.operations = append(grm.operations,
operation{
operationPending: true,
volumeName: volumeName,
podName: podName,
expBackoff: exponentialbackoff.ExponentialBackoff{},
})
}
go func() (eventErr, detailedErr error) {
// Handle unhandled panics (very unlikely)
defer k8sRuntime.HandleCrash()
// Handle completion of and error, if any, from operationFunc()
defer grm.operationComplete(volumeName, podName, &detailedErr)
if generatedOperations.CompleteFunc != nil {
defer generatedOperations.CompleteFunc(&detailedErr)
}
if generatedOperations.EventRecorderFunc != nil {
defer generatedOperations.EventRecorderFunc(&eventErr)
}
// Handle panic, if any, from operationFunc()
defer k8sRuntime.RecoverFromPanic(&detailedErr)
return generatedOperations.OperationFunc()
}()
return nil
}
func (grm *nestedPendingOperations) IsOperationPending(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) bool {
grm.lock.RLock()
defer grm.lock.RUnlock()
exist, previousOpIndex := grm.isOperationExists(volumeName, podName)
if exist && grm.operations[previousOpIndex].operationPending {
return true
}
return false
}
// This is an internal function and caller should acquire and release the lock
func (grm *nestedPendingOperations) isOperationExists(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) (bool, int) {
// If volumeName is empty, operation can be executed concurrently
if volumeName == EmptyUniqueVolumeName {
return false, -1
}
for previousOpIndex, previousOp := range grm.operations {
if previousOp.volumeName != volumeName {
// No match, keep searching
continue
}
if previousOp.podName != EmptyUniquePodName &&
podName != EmptyUniquePodName &&
previousOp.podName != podName {
// No match, keep searching
continue
}
// Match
return true, previousOpIndex
}
return false, -1
}
func (grm *nestedPendingOperations) getOperation(
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) (uint, error) {
// Assumes lock has been acquired by caller.
for i, op := range grm.operations {
if op.volumeName == volumeName &&
op.podName == podName {
return uint(i), nil
}
}
logOperationName := getOperationName(volumeName, podName)
return 0, fmt.Errorf("Operation %q not found", logOperationName)
}
func (grm *nestedPendingOperations) deleteOperation(
// Assumes lock has been acquired by caller.
volumeName v1.UniqueVolumeName,
podName types.UniquePodName) {
opIndex := -1
for i, op := range grm.operations {
if op.volumeName == volumeName &&
op.podName == podName {
opIndex = i
break
}
}
// Delete index without preserving order
grm.operations[opIndex] = grm.operations[len(grm.operations)-1]
grm.operations = grm.operations[:len(grm.operations)-1]
}
func (grm *nestedPendingOperations) operationComplete(
volumeName v1.UniqueVolumeName, podName types.UniquePodName, err *error) {
// Defer operations are executed in Last-In is First-Out order. In this case
// the lock is acquired first when operationCompletes begins, and is
// released when the method finishes, after the lock is released cond is
// signaled to wake waiting goroutine.
defer grm.cond.Signal()
grm.lock.Lock()
defer grm.lock.Unlock()
if *err == nil || !grm.exponentialBackOffOnError {
// Operation completed without error, or exponentialBackOffOnError disabled
grm.deleteOperation(volumeName, podName)
if *err != nil {
// Log error
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("operation %s failed with: %v",
logOperationName,
*err)
}
return
}
// Operation completed with error and exponentialBackOffOnError Enabled
existingOpIndex, getOpErr := grm.getOperation(volumeName, podName)
if getOpErr != nil {
// Failed to find existing operation
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.",
logOperationName,
*err)
return
}
grm.operations[existingOpIndex].expBackoff.Update(err)
grm.operations[existingOpIndex].operationPending = false
// Log error
operationName :=
getOperationName(volumeName, podName)
glog.Errorf("%v", grm.operations[existingOpIndex].expBackoff.
GenerateNoRetriesPermittedMsg(operationName))
}
func (grm *nestedPendingOperations) Wait() {
grm.lock.Lock()
defer grm.lock.Unlock()
for len(grm.operations) > 0 {
grm.cond.Wait()
}
}
func getOperationName(
volumeName v1.UniqueVolumeName, podName types.UniquePodName) string {
podNameStr := ""
if podName != EmptyUniquePodName {
podNameStr = fmt.Sprintf(" (%q)", podName)
}
return fmt.Sprintf("%q%s",
volumeName,
podNameStr)
}
// NewAlreadyExistsError returns a new instance of AlreadyExists error.
func NewAlreadyExistsError(operationName string) error {
return alreadyExistsError{operationName}
}
// IsAlreadyExists returns true if an error returned from
// NestedPendingOperations indicates a new operation can not be started because
// an operation with the same operation name is already executing.
func IsAlreadyExists(err error) bool {
switch err.(type) {
case alreadyExistsError:
return true
default:
return false
}
}
// alreadyExistsError is the error returned by NestedPendingOperations when a
// new operation can not be started because an operation with the same operation
// name is already executing.
type alreadyExistsError struct {
operationName string
}
var _ error = alreadyExistsError{}
func (err alreadyExistsError) Error() string {
return fmt.Sprintf(
"Failed to create operation with name %q. An operation with that name is already executing.",
err.operationName)
}

View File

@ -1,570 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nestedpendingoperations
import (
"fmt"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
// testTimeout is a timeout of goroutines to finish. This _should_ be just a
// "context switch" and it should take several ms, however, Clayton says "We
// have had flakes due to tests that assumed that 15s is long enough to sleep")
testTimeout time.Duration = 1 * time.Minute
// initialOperationWaitTimeShort is the initial amount of time the test will
// wait for an operation to complete (each successive failure results in
// exponential backoff).
initialOperationWaitTimeShort time.Duration = 20 * time.Millisecond
// initialOperationWaitTimeLong is the initial amount of time the test will
// wait for an operation to complete (each successive failure results in
// exponential backoff).
initialOperationWaitTimeLong time.Duration = 500 * time.Millisecond
)
func Test_NewGoRoutineMap_Positive_SingleOp(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation := func() (error, error) { return nil, nil }
// Act
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation})
// Assert
if err != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err)
}
}
func Test_NewGoRoutineMap_Positive_TwoOps(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volume1Name := v1.UniqueVolumeName("volume1-name")
volume2Name := v1.UniqueVolumeName("volume2-name")
operation := func() (error, error) { return nil, nil }
// Act
err1 := grm.Run(volume1Name, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation})
err2 := grm.Run(volume2Name, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation})
// Assert
if err1 != nil {
t.Fatalf("NewGoRoutine %q failed. Expected: <no error> Actual: <%v>", volume1Name, err1)
}
if err2 != nil {
t.Fatalf("NewGoRoutine %q failed. Expected: <no error> Actual: <%v>", volume2Name, err2)
}
}
func Test_NewGoRoutineMap_Positive_TwoSubOps(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1PodName := types.UniquePodName("operation1-podname")
operation2PodName := types.UniquePodName("operation2-podname")
operation := func() (error, error) { return nil, nil }
// Act
err1 := grm.Run(volumeName, operation1PodName, types.GeneratedOperations{OperationFunc: operation})
err2 := grm.Run(volumeName, operation2PodName, types.GeneratedOperations{OperationFunc: operation})
// Assert
if err1 != nil {
t.Fatalf("NewGoRoutine %q failed. Expected: <no error> Actual: <%v>", operation1PodName, err1)
}
if err2 != nil {
t.Fatalf("NewGoRoutine %q failed. Expected: <no error> Actual: <%v>", operation2PodName, err2)
}
}
func Test_NewGoRoutineMap_Positive_SingleOpWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation := func() (error, error) { return nil, nil }
// Act
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation})
// Assert
if err != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err)
}
}
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateCallbackFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
<-operation1DoneCh // Force operation1 to complete
// Act
err2 := retryWithExponentialBackOff(
time.Duration(initialOperationWaitTimeShort),
func() (bool, error) {
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
if err != nil {
t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err)
return false, nil
}
return true, nil
},
)
// Assert
if err2 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err2)
}
}
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateCallbackFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
<-operation1DoneCh // Force operation1 to complete
// Act
err2 := retryWithExponentialBackOff(
time.Duration(initialOperationWaitTimeShort),
func() (bool, error) {
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
if err != nil {
t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err)
return false, nil
}
return true, nil
},
)
// Assert
if err2 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err2)
}
}
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1 := generatePanicFunc()
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
// Act
err2 := retryWithExponentialBackOff(
time.Duration(initialOperationWaitTimeShort),
func() (bool, error) {
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
if err != nil {
t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err)
return false, nil
}
return true, nil
},
)
// Assert
if err2 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err2)
}
}
func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1 := generatePanicFunc()
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
// Act
err2 := retryWithExponentialBackOff(
time.Duration(initialOperationWaitTimeLong), // Longer duration to accommodate for backoff
func() (bool, error) {
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
if err != nil {
t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err)
return false, nil
}
return true, nil
},
)
// Assert
if err2 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err2)
}
}
func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
// Act
err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
// Assert
if err2 == nil {
t.Fatalf("NewGoRoutine did not fail. Expected: <Failed to create operation with name \"%s\". An operation with that name already exists.> Actual: <no error>", volumeName)
}
if !IsAlreadyExists(err2) {
t.Fatalf("NewGoRoutine did not return alreadyExistsError, got: %v", err2)
}
}
func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes2(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operationPodName := types.UniquePodName("operation-podname")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
// Act
err2 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation2})
// Assert
if err2 == nil {
t.Fatalf("NewGoRoutine did not fail. Expected: <Failed to create operation with name \"%s\". An operation with that name already exists.> Actual: <no error>", volumeName)
}
if !IsAlreadyExists(err2) {
t.Fatalf("NewGoRoutine did not return alreadyExistsError, got: %v", err2)
}
}
func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operationPodName := types.UniquePodName("operation-podname")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
// Act
err2 := grm.Run(volumeName, operationPodName, types.GeneratedOperations{OperationFunc: operation2})
// Assert
if err2 == nil {
t.Fatalf("NewGoRoutine did not fail. Expected: <Failed to create operation with name \"%s\". An operation with that name already exists.> Actual: <no error>", volumeName)
}
if !IsAlreadyExists(err2) {
t.Fatalf("NewGoRoutine did not return alreadyExistsError, got: %v", err2)
}
}
func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletesWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
// Act
err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
// Assert
if err2 == nil {
t.Fatalf("NewGoRoutine did not fail. Expected: <Failed to create operation with name \"%s\". An operation with that name already exists.> Actual: <no error>", volumeName)
}
if !IsAlreadyExists(err2) {
t.Fatalf("NewGoRoutine did not return alreadyExistsError, got: %v", err2)
}
}
func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
operation3 := generateNoopFunc()
// Act
err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
// Assert
if err2 == nil {
t.Fatalf("NewGoRoutine did not fail. Expected: <Failed to create operation with name \"%s\". An operation with that name already exists.> Actual: <no error>", volumeName)
}
if !IsAlreadyExists(err2) {
t.Fatalf("NewGoRoutine did not return alreadyExistsError, got: %v", err2)
}
// Act
operation1DoneCh <- true // Force operation1 to complete
err3 := retryWithExponentialBackOff(
time.Duration(initialOperationWaitTimeShort),
func() (bool, error) {
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation3})
if err != nil {
t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err)
return false, nil
}
return true, nil
},
)
// Assert
if err3 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err3)
}
}
func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *testing.T) {
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err1 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err1 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err1)
}
operation2 := generateNoopFunc()
operation3 := generateNoopFunc()
// Act
err2 := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation2})
// Assert
if err2 == nil {
t.Fatalf("NewGoRoutine did not fail. Expected: <Failed to create operation with name \"%s\". An operation with that name already exists.> Actual: <no error>", volumeName)
}
if !IsAlreadyExists(err2) {
t.Fatalf("NewGoRoutine did not return alreadyExistsError, got: %v", err2)
}
// Act
operation1DoneCh <- true // Force operation1 to complete
err3 := retryWithExponentialBackOff(
time.Duration(initialOperationWaitTimeShort),
func() (bool, error) {
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation3})
if err != nil {
t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err)
return false, nil
}
return true, nil
},
)
// Assert
if err3 != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err3)
}
}
func Test_NewGoRoutineMap_Positive_WaitEmpty(t *testing.T) {
// Test than Wait() on empty GoRoutineMap always succeeds without blocking
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
// Act
waitDoneCh := make(chan interface{}, 1)
go func() {
grm.Wait()
waitDoneCh <- true
}()
// Assert
err := waitChannelWithTimeout(waitDoneCh, testTimeout)
if err != nil {
t.Errorf("Error waiting for GoRoutineMap.Wait: %v", err)
}
}
func Test_NewGoRoutineMap_Positive_WaitEmptyWithExpBackoff(t *testing.T) {
// Test than Wait() on empty GoRoutineMap always succeeds without blocking
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
// Act
waitDoneCh := make(chan interface{}, 1)
go func() {
grm.Wait()
waitDoneCh <- true
}()
// Assert
err := waitChannelWithTimeout(waitDoneCh, testTimeout)
if err != nil {
t.Errorf("Error waiting for GoRoutineMap.Wait: %v", err)
}
}
func Test_NewGoRoutineMap_Positive_Wait(t *testing.T) {
// Test that Wait() really blocks until the last operation succeeds
// Arrange
grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err)
}
// Act
waitDoneCh := make(chan interface{}, 1)
go func() {
grm.Wait()
waitDoneCh <- true
}()
// Finish the operation
operation1DoneCh <- true
// Assert
err = waitChannelWithTimeout(waitDoneCh, testTimeout)
if err != nil {
t.Fatalf("Error waiting for GoRoutineMap.Wait: %v", err)
}
}
func Test_NewGoRoutineMap_Positive_WaitWithExpBackoff(t *testing.T) {
// Test that Wait() really blocks until the last operation succeeds
// Arrange
grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */)
volumeName := v1.UniqueVolumeName("volume-name")
operation1DoneCh := make(chan interface{}, 0 /* bufferSize */)
operation1 := generateWaitFunc(operation1DoneCh)
err := grm.Run(volumeName, "" /* operationSubName */, types.GeneratedOperations{OperationFunc: operation1})
if err != nil {
t.Fatalf("NewGoRoutine failed. Expected: <no error> Actual: <%v>", err)
}
// Act
waitDoneCh := make(chan interface{}, 1)
go func() {
grm.Wait()
waitDoneCh <- true
}()
// Finish the operation
operation1DoneCh <- true
// Assert
err = waitChannelWithTimeout(waitDoneCh, testTimeout)
if err != nil {
t.Fatalf("Error waiting for GoRoutineMap.Wait: %v", err)
}
}
func generateCallbackFunc(done chan<- interface{}) func() (error, error) {
return func() (error, error) {
done <- true
return nil, nil
}
}
func generateWaitFunc(done <-chan interface{}) func() (error, error) {
return func() (error, error) {
<-done
return nil, nil
}
}
func generatePanicFunc() func() (error, error) {
return func() (error, error) {
panic("testing panic")
}
}
func generateNoopFunc() func() (error, error) {
return func() (error, error) { return nil, nil }
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 4,
}
return wait.ExponentialBackoff(backoff, fn)
}
func waitChannelWithTimeout(ch <-chan interface{}, timeout time.Duration) error {
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case <-ch:
// Success!
return nil
case <-timer.C:
return fmt.Errorf("timeout after %v", timeout)
}
}

View File

@ -1,65 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"operation_executor.go",
"operation_generator.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/util/operationexecutor",
deps = [
"//pkg/controller/volume/expand/cache:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/resizefs:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/nestedpendingoperations:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["operation_executor_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/controller/volume/expand/cache:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,2 +0,0 @@
approvers:
- saad-ali

View File

@ -1,942 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package operationexecutor implements interfaces that enable execution of
// attach, detach, mount, and unmount operations with a
// nestedpendingoperations so that more than one operation is never triggered
// on the same volume for the same pod.
package operationexecutor
import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
expandcache "k8s.io/kubernetes/pkg/controller/volume/expand/cache"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
// OperationExecutor defines a set of operations for attaching, detaching,
// mounting, or unmounting a volume that are executed with a NewNestedPendingOperations which
// prevents more than one operation from being triggered on the same volume.
//
// These operations should be idempotent (for example, AttachVolume should
// still succeed if the volume is already attached to the node, etc.). However,
// they depend on the volume plugins to implement this behavior.
//
// Once an operation completes successfully, the actualStateOfWorld is updated
// to indicate the volume is attached/detached/mounted/unmounted.
//
// If the OperationExecutor fails to start the operation because, for example,
// an operation with the same UniqueVolumeName is already pending, a non-nil
// error is returned.
//
// Once the operation is started, since it is executed asynchronously,
// errors are simply logged and the goroutine is terminated without updating
// actualStateOfWorld (callers are responsible for retrying as needed).
//
// Some of these operations may result in calls to the API server; callers are
// responsible for rate limiting on errors.
type OperationExecutor interface {
// AttachVolume attaches the volume to the node specified in volumeToAttach.
// It then updates the actual state of the world to reflect that.
AttachVolume(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// VerifyVolumesAreAttachedPerNode verifies the given list of volumes to see whether they are still attached to the node.
// If any volume is not attached right now, it will update the actual state of the world to reflect that.
// Note that this operation could be operated concurrently with other attach/detach operations.
// In theory (but very unlikely in practise), race condition among these operations might mark volume as detached
// even if it is attached. But reconciler can correct this in a short period of time.
VerifyVolumesAreAttachedPerNode(AttachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// VerifyVolumesAreAttached verifies volumes being used in entire cluster and if they are still attached to the node
// If any volume is not attached right now, it will update actual state of world to reflect that.
VerifyVolumesAreAttached(volumesToVerify map[types.NodeName][]AttachedVolume, actualStateOfWorld ActualStateOfWorldAttacherUpdater)
// DetachVolume detaches the volume from the node specified in
// volumeToDetach, and updates the actual state of the world to reflect
// that. If verifySafeToDetach is set, a call is made to the fetch the node
// object and it is used to verify that the volume does not exist in Node's
// Status.VolumesInUse list (operation fails with error if it is).
DetachVolume(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// If a volume has 'Filesystem' volumeMode, MountVolume mounts the
// volume to the pod specified in volumeToMount.
// Specifically it will:
// * Wait for the device to finish attaching (for attachable volumes only).
// * Mount device to global mount path (for attachable volumes only).
// * Update actual state of world to reflect volume is globally mounted (for
// attachable volumes only).
// * Mount the volume to the pod specific path.
// * Update actual state of world to reflect volume is mounted to the pod
// path.
// The parameter "isRemount" is informational and used to adjust logging
// verbosity. An initial mount is more log-worthy than a remount, for
// example.
//
// For 'Block' volumeMode, this method creates a symbolic link to
// the volume from both the pod specified in volumeToMount and global map path.
// Specifically it will:
// * Wait for the device to finish attaching (for attachable volumes only).
// * Update actual state of world to reflect volume is globally mounted/mapped.
// * Map volume to global map path using symbolic link.
// * Map the volume to the pod device map path using symbolic link.
// * Update actual state of world to reflect volume is mounted/mapped to the pod path.
MountVolume(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error
// If a volume has 'Filesystem' volumeMode, UnmountVolume unmounts the
// volume from the pod specified in volumeToUnmount and updates the actual
// state of the world to reflect that.
//
// For 'Block' volumeMode, this method unmaps symbolic link to the volume
// from both the pod device map path in volumeToUnmount and global map path.
// And then, updates the actual state of the world to reflect that.
UnmountVolume(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) error
// If a volume has 'Filesystem' volumeMode, UnmountDevice unmounts the
// volumes global mount path from the device (for attachable volumes only,
// freeing it for detach. It then updates the actual state of the world to
// reflect that.
//
// For 'Block' volumeMode, this method checks number of symbolic links under
// global map path. If number of reference is zero, remove global map path
// directory and free a volume for detach.
// It then updates the actual state of the world to reflect that.
UnmountDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error
// VerifyControllerAttachedVolume checks if the specified volume is present
// in the specified nodes AttachedVolumes Status field. It uses kubeClient
// to fetch the node object.
// If the volume is found, the actual state of the world is updated to mark
// the volume as attached.
// If the volume does not implement the attacher interface, it is assumed to
// be attached and the actual state of the world is updated accordingly.
// If the volume is not found or there is an error (fetching the node
// object, for example) then an error is returned which triggers exponential
// back off on retries.
VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// IsOperationPending returns true if an operation for the given volumeName and podName is pending,
// otherwise it returns false
IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool
// Expand Volume will grow size available to PVC
ExpandVolume(*expandcache.PVCWithResizeRequest, expandcache.VolumeResizeMap) error
// ExpandVolumeFSWithoutUnmounting will resize volume's file system to expected size without unmounting the volume.
ExpandVolumeFSWithoutUnmounting(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error
// ReconstructVolumeOperation construct a new volumeSpec and returns it created by plugin
ReconstructVolumeOperation(volumeMode v1.PersistentVolumeMode, plugin volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, mountPath string, pluginName string) (*volume.Spec, error)
// CheckVolumeExistenceOperation checks volume existence
CheckVolumeExistenceOperation(volumeSpec *volume.Spec, mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error)
}
// NewOperationExecutor returns a new instance of OperationExecutor.
func NewOperationExecutor(
operationGenerator OperationGenerator) OperationExecutor {
return &operationExecutor{
pendingOperations: nestedpendingoperations.NewNestedPendingOperations(
true /* exponentialBackOffOnError */),
operationGenerator: operationGenerator,
}
}
// ActualStateOfWorldMounterUpdater defines a set of operations updating the actual
// state of the world cache after successful mount/unmount.
type ActualStateOfWorldMounterUpdater interface {
// Marks the specified volume as mounted to the specified pod
MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string, volumeSpec *volume.Spec) error
// Marks the specified volume as unmounted from the specified pod
MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error
// Marks the specified volume as having been globally mounted.
MarkDeviceAsMounted(volumeName v1.UniqueVolumeName, devicePath, deviceMountPath string) error
// Marks the specified volume as having its global mount unmounted.
MarkDeviceAsUnmounted(volumeName v1.UniqueVolumeName) error
// Marks the specified volume's file system resize request is finished.
MarkVolumeAsResized(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error
}
// ActualStateOfWorldAttacherUpdater defines a set of operations updating the
// actual state of the world cache after successful attach/detach/mount/unmount.
type ActualStateOfWorldAttacherUpdater interface {
// Marks the specified volume as attached to the specified node. If the
// volume name is supplied, that volume name will be used. If not, the
// volume name is computed using the result from querying the plugin.
//
// TODO: in the future, we should be able to remove the volumeName
// argument to this method -- since it is used only for attachable
// volumes. See issue 29695.
MarkVolumeAsAttached(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
// Marks the specified volume as detached from the specified node
MarkVolumeAsDetached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// Marks desire to detach the specified volume (remove the volume from the node's
// volumesToReportAsAttached list)
RemoveVolumeFromReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) error
// Unmarks the desire to detach for the specified volume (add the volume back to
// the node's volumesToReportAsAttached list)
AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
}
// VolumeLogger defines a set of operations for generating volume-related logging and error msgs
type VolumeLogger interface {
// Creates a detailed msg that can be used in logs
// The msg format follows the pattern "<prefixMsg> <volume details> <suffixMsg>",
// where each implementation provides the volume details
GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string)
// Creates a detailed error that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <volume details>: <err> ",
GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error)
// Creates a simple msg that is user friendly and a detailed msg that can be used in logs
// The msg format follows the pattern "<prefixMsg> <volume details> <suffixMsg>",
// where each implementation provides the volume details
GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string)
// Creates a simple error that is user friendly and a detailed error that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <volume details>: <err> ",
GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error)
}
// Generates an error string with the format ": <err>" if err exists
func errSuffix(err error) string {
errStr := ""
if err != nil {
errStr = fmt.Sprintf(": %v", err)
}
return errStr
}
// Generate a detailed error msg for logs
func generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeName, details string) (detailedMsg string) {
return fmt.Sprintf("%v for volume %q %v %v", prefixMsg, volumeName, details, suffixMsg)
}
// Generate a simplified error msg for events and a detailed error msg for logs
func generateVolumeMsg(prefixMsg, suffixMsg, volumeName, details string) (simpleMsg, detailedMsg string) {
simpleMsg = fmt.Sprintf("%v for volume %q %v", prefixMsg, volumeName, suffixMsg)
return simpleMsg, generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeName, details)
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
// MultiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.
// It is used to to prevent reporting the error from being reported more than once for a given volume.
MultiAttachErrorReported bool
// VolumeName is the unique identifier for the volume that should be
// attached.
VolumeName v1.UniqueVolumeName
// VolumeSpec is a volume spec containing the specification for the volume
// that should be attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume should be
// attached to.
NodeName types.NodeName
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
ScheduledPods []*v1.Pod
}
// GenerateMsgDetailed returns detailed msgs for volumes to attach
func (volume *VolumeToAttach) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) from node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for volumes to attach
func (volume *VolumeToAttach) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) from node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsg(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateErrorDetailed returns detailed errors for volumes to attach
func (volume *VolumeToAttach) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for volumes to attach
func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
}
// VolumeToMount represents a volume that should be attached to this node and
// mounted to the PodName.
type VolumeToMount struct {
// VolumeName is the unique identifier for the volume that should be
// mounted.
VolumeName v1.UniqueVolumeName
// PodName is the unique identifier for the pod that the volume should be
// mounted to after it is attached.
PodName volumetypes.UniquePodName
// VolumeSpec is a volume spec containing the specification for the volume
// that should be mounted. Used to create NewMounter. Used to generate
// InnerVolumeSpecName.
VolumeSpec *volume.Spec
// outerVolumeSpecName is the podSpec.Volume[x].Name of the volume. If the
// volume was referenced through a persistent volume claim, this contains
// the podSpec.Volume[x].Name of the persistent volume claim.
OuterVolumeSpecName string
// Pod to mount the volume to. Used to create NewMounter.
Pod *v1.Pod
// PluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface
PluginIsAttachable bool
// VolumeGidValue contains the value of the GID annotation, if present.
VolumeGidValue string
// DevicePath contains the path on the node where the volume is attached.
// For non-attachable volumes this is empty.
DevicePath string
// ReportedInUse indicates that the volume was successfully added to the
// VolumesInUse field in the node's status.
ReportedInUse bool
}
// GenerateMsgDetailed returns detailed msgs for volumes to mount
func (volume *VolumeToMount) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.Pod.Name, volume.Pod.UID)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for volumes to mount
func (volume *VolumeToMount) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.Pod.Name, volume.Pod.UID)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsg(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateErrorDetailed returns detailed errors for volumes to mount
func (volume *VolumeToMount) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for volumes to mount
func (volume *VolumeToMount) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
// VolumeName is the unique identifier for the volume that is attached.
VolumeName v1.UniqueVolumeName
// VolumeSpec is the volume spec containing the specification for the
// volume that is attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume is attached to.
NodeName types.NodeName
// PluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface
PluginIsAttachable bool
// DevicePath contains the path on the node where the volume is attached.
// For non-attachable volumes this is empty.
DevicePath string
// DeviceMountPath contains the path on the node where the device should
// be mounted after it is attached.
DeviceMountPath string
// PluginName is the Unescaped Qualified name of the volume plugin used to
// attach and mount this volume.
PluginName string
}
// GenerateMsgDetailed returns detailed msgs for attached volumes
func (volume *AttachedVolume) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) on node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for attached volumes
func (volume *AttachedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) on node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsg(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateErrorDetailed returns detailed errors for attached volumes
func (volume *AttachedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for attached volumes
func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
}
// MountedVolume represents a volume that has successfully been mounted to a pod.
type MountedVolume struct {
// PodName is the unique identifier of the pod mounted to.
PodName volumetypes.UniquePodName
// VolumeName is the unique identifier of the volume mounted to the pod.
VolumeName v1.UniqueVolumeName
// InnerVolumeSpecName is the volume.Spec.Name() of the volume. If the
// volume was referenced through a persistent volume claims, this contains
// the name of the bound persistent volume object.
// It is the name that plugins use in their pod mount path, i.e.
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{innerVolumeSpecName}/
// PVC example,
// apiVersion: v1
// kind: PersistentVolume
// metadata:
// name: pv0003 <- InnerVolumeSpecName
// spec:
// capacity:
// storage: 5Gi
// accessModes:
// - ReadWriteOnce
// persistentVolumeReclaimPolicy: Recycle
// nfs:
// path: /tmp
// server: 172.17.0.2
// Non-PVC example:
// apiVersion: v1
// kind: Pod
// metadata:
// name: test-pd
// spec:
// containers:
// - image: k8s.gcr.io/test-webserver
// name: test-container
// volumeMounts:
// - mountPath: /test-pd
// name: test-volume
// volumes:
// - name: test-volume <- InnerVolumeSpecName
// gcePersistentDisk:
// pdName: my-data-disk
// fsType: ext4
InnerVolumeSpecName string
// outerVolumeSpecName is the podSpec.Volume[x].Name of the volume. If the
// volume was referenced through a persistent volume claim, this contains
// the podSpec.Volume[x].Name of the persistent volume claim.
// PVC example:
// kind: Pod
// apiVersion: v1
// metadata:
// name: mypod
// spec:
// containers:
// - name: myfrontend
// image: dockerfile/nginx
// volumeMounts:
// - mountPath: "/var/www/html"
// name: mypd
// volumes:
// - name: mypd <- OuterVolumeSpecName
// persistentVolumeClaim:
// claimName: myclaim
// Non-PVC example:
// apiVersion: v1
// kind: Pod
// metadata:
// name: test-pd
// spec:
// containers:
// - image: k8s.gcr.io/test-webserver
// name: test-container
// volumeMounts:
// - mountPath: /test-pd
// name: test-volume
// volumes:
// - name: test-volume <- OuterVolumeSpecName
// gcePersistentDisk:
// pdName: my-data-disk
// fsType: ext4
OuterVolumeSpecName string
// PluginName is the "Unescaped Qualified" name of the volume plugin used to
// mount and unmount this volume. It can be used to fetch the volume plugin
// to unmount with, on demand. It is also the name that plugins use, though
// escaped, in their pod mount path, i.e.
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{outerVolumeSpecName}/
PluginName string
// PodUID is the UID of the pod mounted to. It is also the string used by
// plugins in their pod mount path, i.e.
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{outerVolumeSpecName}/
PodUID types.UID
// Mounter is the volume mounter used to mount this volume. It is required
// by kubelet to create container.VolumeMap.
Mounter volume.Mounter
// BlockVolumeMapper is the volume mapper used to map this volume. It is required
// by kubelet to create container.VolumeMap.
BlockVolumeMapper volume.BlockVolumeMapper
// VolumeGidValue contains the value of the GID annotation, if present.
VolumeGidValue string
// VolumeSpec is a volume spec containing the specification for the volume
// that should be mounted.
VolumeSpec *volume.Spec
// DeviceMountPath contains the path on the node where the device should
// be mounted after it is attached.
DeviceMountPath string
}
// GenerateMsgDetailed returns detailed msgs for mounted volumes
func (volume *MountedVolume) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.PodName, volume.PodUID)
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volume.OuterVolumeSpecName, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for mounted volumes
func (volume *MountedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.PodName, volume.PodUID)
return generateVolumeMsg(prefixMsg, suffixMsg, volume.OuterVolumeSpecName, detailedStr)
}
// GenerateErrorDetailed returns simple and detailed errors for mounted volumes
func (volume *MountedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for mounted volumes
func (volume *MountedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
}
type operationExecutor struct {
// pendingOperations keeps track of pending attach and detach operations so
// multiple operations are not started on the same volume
pendingOperations nestedpendingoperations.NestedPendingOperations
// operationGenerator is an interface that provides implementations for
// generating volume function
operationGenerator OperationGenerator
}
func (oe *operationExecutor) IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool {
return oe.pendingOperations.IsOperationPending(volumeName, podName)
}
func (oe *operationExecutor) AttachVolume(
volumeToAttach VolumeToAttach,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateAttachVolumeFunc(volumeToAttach, actualStateOfWorld)
if err != nil {
return err
}
return oe.pendingOperations.Run(
volumeToAttach.VolumeName, "" /* podName */, generatedOperations)
}
func (oe *operationExecutor) DetachVolume(
volumeToDetach AttachedVolume,
verifySafeToDetach bool,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateDetachVolumeFunc(volumeToDetach, verifySafeToDetach, actualStateOfWorld)
if err != nil {
return err
}
return oe.pendingOperations.Run(
volumeToDetach.VolumeName, "" /* podName */, generatedOperations)
}
func (oe *operationExecutor) VerifyVolumesAreAttached(
attachedVolumes map[types.NodeName][]AttachedVolume,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) {
// A map of plugin names and nodes on which they exist with volumes they manage
bulkVerifyPluginsByNode := make(map[string]map[types.NodeName][]*volume.Spec)
volumeSpecMapByPlugin := make(map[string]map[*volume.Spec]v1.UniqueVolumeName)
for node, nodeAttachedVolumes := range attachedVolumes {
for _, volumeAttached := range nodeAttachedVolumes {
if volumeAttached.VolumeSpec == nil {
glog.Errorf("VerifyVolumesAreAttached: nil spec for volume %s", volumeAttached.VolumeName)
continue
}
volumePlugin, err :=
oe.operationGenerator.GetVolumePluginMgr().FindPluginBySpec(volumeAttached.VolumeSpec)
if err != nil || volumePlugin == nil {
glog.Errorf(
"VolumesAreAttached.FindPluginBySpec failed for volume %q (spec.Name: %q) on node %q with error: %v",
volumeAttached.VolumeName,
volumeAttached.VolumeSpec.Name(),
volumeAttached.NodeName,
err)
continue
}
pluginName := volumePlugin.GetPluginName()
if volumePlugin.SupportsBulkVolumeVerification() {
pluginNodes, pluginNodesExist := bulkVerifyPluginsByNode[pluginName]
if !pluginNodesExist {
pluginNodes = make(map[types.NodeName][]*volume.Spec)
}
volumeSpecList, nodeExists := pluginNodes[node]
if !nodeExists {
volumeSpecList = []*volume.Spec{}
}
volumeSpecList = append(volumeSpecList, volumeAttached.VolumeSpec)
pluginNodes[node] = volumeSpecList
bulkVerifyPluginsByNode[pluginName] = pluginNodes
volumeSpecMap, mapExists := volumeSpecMapByPlugin[pluginName]
if !mapExists {
volumeSpecMap = make(map[*volume.Spec]v1.UniqueVolumeName)
}
volumeSpecMap[volumeAttached.VolumeSpec] = volumeAttached.VolumeName
volumeSpecMapByPlugin[pluginName] = volumeSpecMap
continue
}
// If node doesn't support Bulk volume polling it is best to poll individually
nodeError := oe.VerifyVolumesAreAttachedPerNode(nodeAttachedVolumes, node, actualStateOfWorld)
if nodeError != nil {
glog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError)
}
break
}
}
for pluginName, pluginNodeVolumes := range bulkVerifyPluginsByNode {
generatedOperations, err := oe.operationGenerator.GenerateBulkVolumeVerifyFunc(
pluginNodeVolumes,
pluginName,
volumeSpecMapByPlugin[pluginName],
actualStateOfWorld)
if err != nil {
glog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err)
}
// Ugly hack to ensure - we don't do parallel bulk polling of same volume plugin
uniquePluginName := v1.UniqueVolumeName(pluginName)
err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, generatedOperations)
if err != nil {
glog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err)
}
}
}
func (oe *operationExecutor) VerifyVolumesAreAttachedPerNode(
attachedVolumes []AttachedVolume,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateVolumesAreAttachedFunc(attachedVolumes, nodeName, actualStateOfWorld)
if err != nil {
return err
}
// Give an empty UniqueVolumeName so that this operation could be executed concurrently.
return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, generatedOperations)
}
func (oe *operationExecutor) MountVolume(
waitForAttachTimeout time.Duration,
volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
isRemount bool) error {
fsVolume, err := util.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec)
if err != nil {
return err
}
var generatedOperations volumetypes.GeneratedOperations
if fsVolume {
// Filesystem volume case
// Mount/remount a volume when a volume is attached
generatedOperations, err = oe.operationGenerator.GenerateMountVolumeFunc(
waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount)
} else {
// Block volume case
// Creates a map to device if a volume is attached
generatedOperations, err = oe.operationGenerator.GenerateMapVolumeFunc(
waitForAttachTimeout, volumeToMount, actualStateOfWorld)
}
if err != nil {
return err
}
// Avoid executing mount/map from multiple pods referencing the
// same volume in parallel
podName := nestedpendingoperations.EmptyUniquePodName
// TODO: remove this -- not necessary
if !volumeToMount.PluginIsAttachable {
// Non-attachable volume plugins can execute mount for multiple pods
// referencing the same volume in parallel
podName = util.GetUniquePodName(volumeToMount.Pod)
}
// TODO mount_device
return oe.pendingOperations.Run(
volumeToMount.VolumeName, podName, generatedOperations)
}
func (oe *operationExecutor) UnmountVolume(
volumeToUnmount MountedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
podsDir string) error {
fsVolume, err := util.CheckVolumeModeFilesystem(volumeToUnmount.VolumeSpec)
if err != nil {
return err
}
var generatedOperations volumetypes.GeneratedOperations
if fsVolume {
// Filesystem volume case
// Unmount a volume if a volume is mounted
generatedOperations, err = oe.operationGenerator.GenerateUnmountVolumeFunc(
volumeToUnmount, actualStateOfWorld, podsDir)
} else {
// Block volume case
// Unmap a volume if a volume is mapped
generatedOperations, err = oe.operationGenerator.GenerateUnmapVolumeFunc(
volumeToUnmount, actualStateOfWorld)
}
if err != nil {
return err
}
// All volume plugins can execute unmount/unmap for multiple pods referencing the
// same volume in parallel
podName := volumetypes.UniquePodName(volumeToUnmount.PodUID)
return oe.pendingOperations.Run(
volumeToUnmount.VolumeName, podName, generatedOperations)
}
func (oe *operationExecutor) UnmountDevice(
deviceToDetach AttachedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
mounter mount.Interface) error {
fsVolume, err := util.CheckVolumeModeFilesystem(deviceToDetach.VolumeSpec)
if err != nil {
return err
}
var generatedOperations volumetypes.GeneratedOperations
if fsVolume {
// Filesystem volume case
// Unmount and detach a device if a volume isn't referenced
generatedOperations, err = oe.operationGenerator.GenerateUnmountDeviceFunc(
deviceToDetach, actualStateOfWorld, mounter)
} else {
// Block volume case
// Detach a device and remove loopback if a volume isn't referenced
generatedOperations, err = oe.operationGenerator.GenerateUnmapDeviceFunc(
deviceToDetach, actualStateOfWorld, mounter)
}
if err != nil {
return err
}
// Avoid executing unmount/unmap device from multiple pods referencing
// the same volume in parallel
podName := nestedpendingoperations.EmptyUniquePodName
return oe.pendingOperations.Run(
deviceToDetach.VolumeName, podName, generatedOperations)
}
func (oe *operationExecutor) ExpandVolume(pvcWithResizeRequest *expandcache.PVCWithResizeRequest, resizeMap expandcache.VolumeResizeMap) error {
generatedOperations, err := oe.operationGenerator.GenerateExpandVolumeFunc(pvcWithResizeRequest, resizeMap)
if err != nil {
return err
}
uniqueVolumeKey := v1.UniqueVolumeName(pvcWithResizeRequest.UniquePVCKey())
return oe.pendingOperations.Run(uniqueVolumeKey, "", generatedOperations)
}
func (oe *operationExecutor) ExpandVolumeFSWithoutUnmounting(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error {
generatedOperations, err := oe.operationGenerator.GenerateExpandVolumeFSWithoutUnmountingFunc(volumeToMount, actualStateOfWorld)
if err != nil {
return err
}
return oe.pendingOperations.Run(volumeToMount.VolumeName, "", generatedOperations)
}
func (oe *operationExecutor) VerifyControllerAttachedVolume(
volumeToMount VolumeToMount,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld)
if err != nil {
return err
}
return oe.pendingOperations.Run(
volumeToMount.VolumeName, "" /* podName */, generatedOperations)
}
// ReconstructVolumeOperation return a func to create volumeSpec from mount path
func (oe *operationExecutor) ReconstructVolumeOperation(
volumeMode v1.PersistentVolumeMode,
plugin volume.VolumePlugin,
mapperPlugin volume.BlockVolumePlugin,
uid types.UID,
podName volumetypes.UniquePodName,
volumeSpecName string,
mountPath string,
pluginName string) (*volume.Spec, error) {
// Filesystem Volume case
if volumeMode == v1.PersistentVolumeFilesystem {
// Create volumeSpec from mount path
glog.V(5).Infof("Starting operationExecutor.ReconstructVolumepodName")
volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath)
if err != nil {
return nil, err
}
return volumeSpec, nil
}
// Block Volume case
// Create volumeSpec from mount path
glog.V(5).Infof("Starting operationExecutor.ReconstructVolume")
if mapperPlugin == nil {
return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)",
pluginName,
volumeSpecName,
podName,
uid)
}
// mountPath contains volumeName on the path. In the case of block volume, {volumeName} is symbolic link
// corresponding to raw block device.
// ex. mountPath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
volumeSpec, err := mapperPlugin.ConstructBlockVolumeSpec(uid, volumeSpecName, mountPath)
if err != nil {
return nil, err
}
return volumeSpec, nil
}
// CheckVolumeExistenceOperation return a func() to check mount path directory if volume still exists
func (oe *operationExecutor) CheckVolumeExistenceOperation(
volumeSpec *volume.Spec,
mountPath, volumeName string,
mounter mount.Interface,
uniqueVolumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
podUID types.UID,
attachable volume.AttachableVolumePlugin) (bool, error) {
fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec)
if err != nil {
return false, err
}
// Filesystem Volume case
// For attachable volume case, check mount path directory if volume is still existing and mounted.
// Return true if volume is mounted.
if fsVolume {
if attachable != nil {
var isNotMount bool
var mountCheckErr error
if isNotMount, mountCheckErr = mounter.IsLikelyNotMountPoint(mountPath); mountCheckErr != nil {
return false, fmt.Errorf("Could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v",
uniqueVolumeName,
volumeName,
podName,
podUID,
mountCheckErr)
}
return !isNotMount, nil
}
// For non-attachable volume case, skip check and return true without mount point check
// since plugins may not have volume mount point.
return true, nil
}
// Block Volume case
// Check mount path directory if volume still exists, then return true if volume
// is there. Either plugin is attachable or non-attachable, the plugin should
// have symbolic link associated to raw block device under pod device map
// if volume exists.
blkutil := volumepathhandler.NewBlockVolumePathHandler()
var islinkExist bool
var checkErr error
if islinkExist, checkErr = blkutil.IsSymlinkExist(mountPath); checkErr != nil {
return false, fmt.Errorf("Could not check whether the block volume %q (spec.Name: %q) pod %q (UID: %q) is mapped to: %v",
uniqueVolumeName,
volumeName,
podName,
podUID,
checkErr)
}
return islinkExist, nil
}

View File

@ -1,621 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operationexecutor
import (
"strconv"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
expandcache "k8s.io/kubernetes/pkg/controller/volume/expand/cache"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
const (
numVolumesToMount = 2
numAttachableVolumesToUnmount = 2
numNonAttachableVolumesToUnmount = 2
numDevicesToUnmount = 2
numVolumesToAttach = 2
numVolumesToDetach = 2
numVolumesToVerifyAttached = 2
numVolumesToVerifyControllerAttached = 2
numVolumesToMap = 2
numAttachableVolumesToUnmap = 2
numNonAttachableVolumesToUnmap = 2
numDevicesToUnmap = 2
)
var _ OperationGenerator = &fakeOperationGenerator{}
func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToMount := make([]VolumeToMount, numVolumesToMount)
secretName := "secret-volume"
volumeName := v1.UniqueVolumeName(secretName)
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
pod := getTestPodWithSecret(podName, secretName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
VolumeName: volumeName,
PluginIsAttachable: false, // this field determines whether the plugin is attachable
ReportedInUse: true,
}
oe.MountVolume(0 /* waitForAttachTimeOut */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false /* isRemount */)
}
// Assert
if !isOperationRunConcurrently(ch, quit, numVolumesToMount) {
t.Fatalf("Unable to start mount operations in Concurrent for non-attachable volumes")
}
}
func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToMount := make([]VolumeToMount, numVolumesToAttach)
pdName := "pd-volume"
volumeName := v1.UniqueVolumeName(pdName)
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
VolumeName: volumeName,
PluginIsAttachable: true, // this field determines whether the plugin is attachable
ReportedInUse: true,
}
oe.MountVolume(0 /* waitForAttachTimeout */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false /* isRemount */)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("Mount operations should not start concurrently for attachable volumes")
}
}
func TestOperationExecutor_UnmountVolume_ConcurrentUnmountForAllPlugins(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToUnmount := make([]MountedVolume, numAttachableVolumesToUnmount+numNonAttachableVolumesToUnmount)
pdName := "pd-volume"
secretName := "secret-volume"
// Act
for i := 0; i < numNonAttachableVolumesToUnmount+numAttachableVolumesToUnmount; i++ {
podName := "pod-" + strconv.Itoa(i+1)
if i < numNonAttachableVolumesToUnmount {
pod := getTestPodWithSecret(podName, secretName)
volumesToUnmount[i] = MountedVolume{
PodName: volumetypes.UniquePodName(podName),
VolumeName: v1.UniqueVolumeName(secretName),
PodUID: pod.UID,
}
} else {
pod := getTestPodWithGCEPD(podName, pdName)
volumesToUnmount[i] = MountedVolume{
PodName: volumetypes.UniquePodName(podName),
VolumeName: v1.UniqueVolumeName(pdName),
PodUID: pod.UID,
}
}
oe.UnmountVolume(volumesToUnmount[i], nil /* actualStateOfWorldMounterUpdater */, "" /*podsDir*/)
}
// Assert
if !isOperationRunConcurrently(ch, quit, numNonAttachableVolumesToUnmount+numAttachableVolumesToUnmount) {
t.Fatalf("Unable to start unmount operations concurrently for volume plugins")
}
}
func TestOperationExecutor_UnmountDeviceConcurrently(t *testing.T) {
// Arrange
ch, quit, oe := setup()
attachedVolumes := make([]AttachedVolume, numDevicesToUnmount)
pdName := "pd-volume"
// Act
for i := range attachedVolumes {
attachedVolumes[i] = AttachedVolume{
VolumeName: v1.UniqueVolumeName(pdName),
NodeName: "node-name",
}
oe.UnmountDevice(attachedVolumes[i], nil /* actualStateOfWorldMounterUpdater */, nil /* mount.Interface */)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("Unmount device operations should not start concurrently")
}
}
func TestOperationExecutor_AttachVolumeConcurrently(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToAttach := make([]VolumeToAttach, numVolumesToAttach)
pdName := "pd-volume"
// Act
for i := range volumesToAttach {
volumesToAttach[i] = VolumeToAttach{
VolumeName: v1.UniqueVolumeName(pdName),
NodeName: "node",
}
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("Attach volume operations should not start concurrently")
}
}
func TestOperationExecutor_DetachVolumeConcurrently(t *testing.T) {
// Arrange
ch, quit, oe := setup()
attachedVolumes := make([]AttachedVolume, numVolumesToDetach)
pdName := "pd-volume"
// Act
for i := range attachedVolumes {
attachedVolumes[i] = AttachedVolume{
VolumeName: v1.UniqueVolumeName(pdName),
NodeName: "node",
}
oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("DetachVolume operations should not run concurrently")
}
}
func TestOperationExecutor_VerifyVolumesAreAttachedConcurrently(t *testing.T) {
// Arrange
ch, quit, oe := setup()
// Act
for i := 0; i < numVolumesToVerifyAttached; i++ {
oe.VerifyVolumesAreAttachedPerNode(nil /* attachedVolumes */, "node-name", nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
if !isOperationRunConcurrently(ch, quit, numVolumesToVerifyAttached) {
t.Fatalf("VerifyVolumesAreAttached operation is not being run concurrently")
}
}
func TestOperationExecutor_VerifyControllerAttachedVolumeConcurrently(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToMount := make([]VolumeToMount, numVolumesToVerifyControllerAttached)
pdName := "pd-volume"
// Act
for i := range volumesToMount {
volumesToMount[i] = VolumeToMount{
VolumeName: v1.UniqueVolumeName(pdName),
}
oe.VerifyControllerAttachedVolume(volumesToMount[i], types.NodeName("node-name"), nil /* actualStateOfWorldMounterUpdater */)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("VerifyControllerAttachedVolume should not run concurrently")
}
}
func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins_VolumeMode_Block(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToMount := make([]VolumeToMount, numVolumesToMap)
secretName := "secret-volume"
volumeName := v1.UniqueVolumeName(secretName)
volumeMode := v1.PersistentVolumeBlock
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
pod := getTestPodWithSecret(podName, secretName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
VolumeName: volumeName,
PluginIsAttachable: false, // this field determines whether the plugin is attachable
ReportedInUse: true,
VolumeSpec: tmpSpec,
}
oe.MountVolume(0 /* waitForAttachTimeOut */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false)
}
// Assert
if !isOperationRunConcurrently(ch, quit, numVolumesToMap) {
t.Fatalf("Unable to start map operations in Concurrent for non-attachable volumes")
}
}
func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins_VolumeMode_Block(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToMount := make([]VolumeToMount, numVolumesToAttach)
pdName := "pd-volume"
volumeName := v1.UniqueVolumeName(pdName)
volumeMode := v1.PersistentVolumeBlock
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
VolumeName: volumeName,
PluginIsAttachable: true, // this field determines whether the plugin is attachable
ReportedInUse: true,
VolumeSpec: tmpSpec,
}
oe.MountVolume(0 /* waitForAttachTimeout */, volumesToMount[i], nil /* actualStateOfWorldMounterUpdater */, false)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("Map operations should not start concurrently for attachable volumes")
}
}
func TestOperationExecutor_UnmountVolume_ConcurrentUnmountForAllPlugins_VolumeMode_Block(t *testing.T) {
// Arrange
ch, quit, oe := setup()
volumesToUnmount := make([]MountedVolume, numAttachableVolumesToUnmap+numNonAttachableVolumesToUnmap)
pdName := "pd-volume"
secretName := "secret-volume"
volumeMode := v1.PersistentVolumeBlock
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
// Act
for i := 0; i < numNonAttachableVolumesToUnmap+numAttachableVolumesToUnmap; i++ {
podName := "pod-" + strconv.Itoa(i+1)
if i < numNonAttachableVolumesToUnmap {
pod := getTestPodWithSecret(podName, secretName)
volumesToUnmount[i] = MountedVolume{
PodName: volumetypes.UniquePodName(podName),
VolumeName: v1.UniqueVolumeName(secretName),
PodUID: pod.UID,
VolumeSpec: tmpSpec,
}
} else {
pod := getTestPodWithGCEPD(podName, pdName)
volumesToUnmount[i] = MountedVolume{
PodName: volumetypes.UniquePodName(podName),
VolumeName: v1.UniqueVolumeName(pdName),
PodUID: pod.UID,
VolumeSpec: tmpSpec,
}
}
oe.UnmountVolume(volumesToUnmount[i], nil /* actualStateOfWorldMounterUpdater */, "" /* podsDir */)
}
// Assert
if !isOperationRunConcurrently(ch, quit, numNonAttachableVolumesToUnmap+numAttachableVolumesToUnmap) {
t.Fatalf("Unable to start unmap operations concurrently for volume plugins")
}
}
func TestOperationExecutor_UnmountDeviceConcurrently_VolumeMode_Block(t *testing.T) {
// Arrange
ch, quit, oe := setup()
attachedVolumes := make([]AttachedVolume, numDevicesToUnmap)
pdName := "pd-volume"
volumeMode := v1.PersistentVolumeBlock
tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
// Act
for i := range attachedVolumes {
attachedVolumes[i] = AttachedVolume{
VolumeName: v1.UniqueVolumeName(pdName),
NodeName: "node-name",
VolumeSpec: tmpSpec,
}
oe.UnmountDevice(attachedVolumes[i], nil /* actualStateOfWorldMounterUpdater */, nil /* mount.Interface */)
}
// Assert
if !isOperationRunSerially(ch, quit) {
t.Fatalf("Unmap device operations should not start concurrently")
}
}
type fakeOperationGenerator struct {
ch chan interface{}
quit chan interface{}
}
func newFakeOperationGenerator(ch chan interface{}, quit chan interface{}) OperationGenerator {
return &fakeOperationGenerator{
ch: ch,
quit: quit,
}
}
func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateExpandVolumeFunc(pvcWithResizeRequest *expandcache.PVCWithResizeRequest,
resizeMap expandcache.VolumeResizeMap) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateBulkVolumeVerifyFunc(
pluginNodeVolumes map[types.NodeName][]*volume.Spec,
pluginNane string,
volumeSpecMap map[*volume.Spec]v1.UniqueVolumeName,
actualStateOfWorldAttacherUpdater ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (volumetypes.GeneratedOperations, error) {
opFunc := func() (error, error) {
startOperationAndBlock(fopg.ch, fopg.quit)
return nil, nil
}
return volumetypes.GeneratedOperations{
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr {
return nil
}
func getTestPodWithSecret(podName, secretName string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
UID: types.UID(podName),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: secretName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "k8s.gcr.io/mounttest:0.8",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: secretName,
MountPath: "/data",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
}
func getTestPodWithGCEPD(podName, pdName string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
UID: types.UID(podName + string(uuid.NewUUID())),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: pdName,
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: pdName,
FSType: "ext4",
ReadOnly: false,
},
},
},
},
Containers: []v1.Container{
{
Name: "pd-volume-test",
Image: "k8s.gcr.io/mounttest:0.8",
Args: []string{
"--file_content=/etc/pd-volume/data-1",
},
VolumeMounts: []v1.VolumeMount{
{
Name: pdName,
MountPath: "/data",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
}
func isOperationRunSerially(ch <-chan interface{}, quit chan<- interface{}) bool {
defer close(quit)
numOperationsStarted := 0
loop:
for {
select {
case <-ch:
numOperationsStarted++
if numOperationsStarted > 1 {
return false
}
case <-time.After(5 * time.Second):
break loop
}
}
return true
}
func isOperationRunConcurrently(ch <-chan interface{}, quit chan<- interface{}, numOperationsToRun int) bool {
defer close(quit)
numOperationsStarted := 0
loop:
for {
select {
case <-ch:
numOperationsStarted++
if numOperationsStarted == numOperationsToRun {
return true
}
case <-time.After(5 * time.Second):
break loop
}
}
return false
}
func setup() (chan interface{}, chan interface{}, OperationExecutor) {
ch, quit := make(chan interface{}), make(chan interface{})
return ch, quit, NewOperationExecutor(newFakeOperationGenerator(ch, quit))
}
// This function starts by writing to ch and blocks on the quit channel
// until it is closed by the currently running test
func startOperationAndBlock(ch chan<- interface{}, quit <-chan interface{}) {
ch <- nil
<-quit
}

File diff suppressed because it is too large Load Diff

View File

@ -1,44 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["recycler_client.go"],
importpath = "k8s.io/kubernetes/pkg/volume/util/recyclerclient",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["recycler_client_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,267 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package recyclerclient
import (
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
)
type RecycleEventRecorder func(eventtype, message string)
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
// Recyclers. This function will save the given Pod to the API and watch it
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
// whichever comes first. An attempt to delete a recycler pod is always
// attempted before returning.
//
// In case there is a pod with the same namespace+name already running, this
// function deletes it as it is not able to judge if it is an old recycler
// or user has forged a fake recycler to block Kubernetes from recycling.//
//
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
// will be overwritten with unique name based on PV.Name.
// client - kube client for API operations.
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
}
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
// exists" error when a previous controller has already started recycling
// the volume. Here we assume that pv.Name is already unique.
pod.Name = "recycler-for-" + pvName
pod.GenerateName = ""
stopChannel := make(chan struct{})
defer close(stopChannel)
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
if err != nil {
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return err
}
// Start the pod
_, err = recyclerClient.CreatePod(pod)
if err != nil {
if errors.IsAlreadyExists(err) {
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
}
// Recycler will try again and the old pod will be hopefully deleted
// at that time.
return fmt.Errorf("old recycler pod found, will retry later")
}
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
}
err = waitForPod(pod, recyclerClient, podCh)
// In all cases delete the recycler pod and log its result.
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Returning recycler error is preferred, the pod will be deleted again on
// the next retry.
if err != nil {
return fmt.Errorf("failed to recycle volume: %s", err)
}
// Recycle succeeded but we failed to delete the recycler pod. Report it,
// the controller will re-try recycling the PV again shortly.
if deleteErr != nil {
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
}
return nil
}
// waitForPod watches the pod it until it finishes and send all events on the
// pod to the PV.
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
for {
event, ok := <-podCh
if !ok {
return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
}
switch event.Object.(type) {
case *v1.Pod:
// POD changed
pod := event.Object.(*v1.Pod)
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == v1.PodSucceeded {
// Recycle succeeded.
return nil
}
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Message != "" {
return fmt.Errorf(pod.Status.Message)
} else {
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
}
}
case watch.Deleted:
return fmt.Errorf("recycler pod was deleted")
case watch.Error:
return fmt.Errorf("recycler pod watcher failed")
}
case *v1.Event:
// Event received
podEvent := event.Object.(*v1.Event)
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
}
}
}
}
// recyclerClient abstracts access to a Pod by providing a narrower interface.
// This makes it easier to mock a client for testing.
type recyclerClient interface {
CreatePod(pod *v1.Pod) (*v1.Pod, error)
GetPod(name, namespace string) (*v1.Pod, error)
DeletePod(name, namespace string) error
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
// to close the reflector backing the watch. The caller is responsible for
// derring a close on the channel to stop the reflector.
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
// Event sends an event to the volume that is being recycled.
Event(eventtype, message string)
}
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
return &realRecyclerClient{
client,
recorder,
}
}
type realRecyclerClient struct {
client clientset.Interface
recorder RecycleEventRecorder
}
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
}
func (c *realRecyclerClient) Event(eventtype, message string) {
c.recorder(eventtype, message)
}
// WatchPod watches a pod and events related to it. It sends pod updates and events over the returned channel
// It will continue until stopChannel is closed
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, err := fields.ParseSelector("metadata.name=" + name)
if err != nil {
return nil, err
}
options := metav1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
if err != nil {
return nil, err
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})
if err != nil {
podWatch.Stop()
return nil, err
}
eventCh := make(chan watch.Event, 30)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer close(eventCh)
wg.Wait()
}()
go func() {
defer eventWatch.Stop()
defer wg.Done()
for {
select {
case _ = <-stopChannel:
return
case eventEvent, ok := <-eventWatch.ResultChan():
if !ok {
return
} else {
eventCh <- eventEvent
}
}
}
}()
go func() {
defer podWatch.Stop()
defer wg.Done()
for {
select {
case <-stopChannel:
return
case podEvent, ok := <-podWatch.ResultChan():
if !ok {
return
} else {
eventCh <- podEvent
}
}
}
}()
return eventCh, nil
}

View File

@ -1,235 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package recyclerclient
import (
"fmt"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
api "k8s.io/kubernetes/pkg/apis/core"
)
type testcase struct {
// Input of the test
name string
existingPod *v1.Pod
createPod *v1.Pod
// eventSequence is list of events that are simulated during recycling. It
// can be either event generated by a recycler pod or a state change of
// the pod. (see newPodEvent and newEvent below).
eventSequence []watch.Event
// Expected output.
// expectedEvents is list of events that were sent to the volume that was
// recycled.
expectedEvents []mockEvent
expectedError string
}
func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event {
return watch.Event{
Type: eventtype,
Object: newPod(name, phase, message),
}
}
func newEvent(eventtype, message string) watch.Event {
return watch.Event{
Type: watch.Added,
Object: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
},
Reason: "MockEvent",
Message: message,
Type: eventtype,
},
}
}
func newPod(name string, phase v1.PodPhase, message string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: name,
},
Status: v1.PodStatus{
Phase: phase,
Message: message,
},
}
}
func TestRecyclerPod(t *testing.T) {
tests := []testcase{
{
// Test recycler success with some events
name: "RecyclerSuccess",
createPod: newPod("podRecyclerSuccess", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""),
newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""),
newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"),
newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
{v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""},
{v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""},
{v1.EventTypeNormal, "Created container with docker id 83d929aeac82"},
{v1.EventTypeNormal, "Started container with docker id 83d929aeac82"},
},
expectedError: "",
},
{
// Test recycler failure with some events
name: "RecyclerFailure",
createPod: newPod("podRecyclerFailure", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
{v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
{v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"},
},
expectedError: "failed to recycle volume: Pod was active on the node longer than specified deadline",
},
{
// Recycler pod gets deleted
name: "RecyclerDeleted",
createPod: newPod("podRecyclerDeleted", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
},
expectedError: "failed to recycle volume: recycler pod was deleted",
},
{
// Another recycler pod is already running
name: "RecyclerRunning",
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
eventSequence: []watch.Event{},
expectedError: "old recycler pod found, will retry later",
},
}
for _, test := range tests {
t.Logf("Test %q", test.name)
client := &mockRecyclerClient{
events: test.eventSequence,
pod: test.existingPod,
}
err := internalRecycleVolumeByWatchingPodUntilCompletion(test.createPod.Name, test.createPod, client)
receivedError := ""
if err != nil {
receivedError = err.Error()
}
if receivedError != test.expectedError {
t.Errorf("Test %q failed, expected error %q, got %q", test.name, test.expectedError, receivedError)
continue
}
if !client.deletedCalled {
t.Errorf("Test %q failed, expected deferred client.Delete to be called on recycler pod", test.name)
continue
}
for i, expectedEvent := range test.expectedEvents {
if len(client.receivedEvents) <= i {
t.Errorf("Test %q failed, expected event %d: %q not received", test.name, i, expectedEvent.message)
continue
}
receivedEvent := client.receivedEvents[i]
if expectedEvent.eventtype != receivedEvent.eventtype {
t.Errorf("Test %q failed, event %d does not match: expected eventtype %q, got %q", test.name, i, expectedEvent.eventtype, receivedEvent.eventtype)
}
if expectedEvent.message != receivedEvent.message {
t.Errorf("Test %q failed, event %d does not match: expected message %q, got %q", test.name, i, expectedEvent.message, receivedEvent.message)
}
}
for i := len(test.expectedEvents); i < len(client.receivedEvents); i++ {
t.Errorf("Test %q failed, unexpected event received: %s, %q", test.name, client.receivedEvents[i].eventtype, client.receivedEvents[i].message)
}
}
}
type mockRecyclerClient struct {
pod *v1.Pod
deletedCalled bool
receivedEvents []mockEvent
events []watch.Event
}
type mockEvent struct {
eventtype, message string
}
func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
if c.pod == nil {
c.pod = pod
return c.pod, nil
}
// Simulate "already exists" error
return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name)
}
func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
if c.pod != nil {
return c.pod, nil
} else {
return nil, fmt.Errorf("pod does not exist")
}
}
func (c *mockRecyclerClient) DeletePod(name, namespace string) error {
c.deletedCalled = true
return nil
}
func (c *mockRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
eventCh := make(chan watch.Event, 0)
go func() {
for _, e := range c.events {
eventCh <- e
}
}()
return eventCh, nil
}
func (c *mockRecyclerClient) Event(eventtype, message string) {
c.receivedEvents = append(c.receivedEvents, mockEvent{eventtype, message})
}

View File

@ -1,125 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
)
var (
knownResizeConditions map[v1.PersistentVolumeClaimConditionType]bool = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
}
)
type resizeProcessStatus struct {
condition v1.PersistentVolumeClaimCondition
processed bool
}
// ClaimToClaimKey return namespace/name string for pvc
func ClaimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
// MarkFSResizeFinished marks file system resizing as done
func MarkFSResizeFinished(
pvc *v1.PersistentVolumeClaim,
capacity v1.ResourceList,
kubeClient clientset.Interface) error {
newPVC := pvc.DeepCopy()
newPVC.Status.Capacity = capacity
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return err
}
// PatchPVCStatus updates PVC status using PATCH verb
func PatchPVCStatus(
oldPVC *v1.PersistentVolumeClaim,
newPVC *v1.PersistentVolumeClaim,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
pvcName := oldPVC.Name
oldData, err := json.Marshal(oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal oldData for pvc %q with %v", pvcName, err)
}
newData, err := json.Marshal(newPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal newData for pvc %q with %v", pvcName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to CreateTwoWayMergePatch for pvc %q with %v ", pvcName, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
Patch(pvcName, types.StrategicMergePatchType, patchBytes, "status")
if updateErr != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to patch PVC %q with %v", pvcName, updateErr)
}
return updatedClaim, nil
}
// MergeResizeConditionOnPVC updates pvc with requested resize conditions
// leaving other conditions untouched.
func MergeResizeConditionOnPVC(
pvc *v1.PersistentVolumeClaim,
resizeConditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
}
oldConditions := pvc.Status.Conditions
newConditions := []v1.PersistentVolumeClaimCondition{}
for _, condition := range oldConditions {
// If Condition is of not resize type, we keep it.
if _, ok := knownResizeConditions[condition.Type]; !ok {
newConditions = append(newConditions, condition)
continue
}
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
if newCondition.condition.Status != condition.Status {
newConditions = append(newConditions, newCondition.condition)
} else {
newConditions = append(newConditions, condition)
}
newCondition.processed = true
}
}
// append all unprocessed conditions
for _, newCondition := range resizeConditionMap {
if !newCondition.processed {
newConditions = append(newConditions, newCondition.condition)
}
}
pvc.Status.Conditions = newConditions
return pvc
}

View File

@ -1,167 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type conditionMergeTestCase struct {
description string
pvc *v1.PersistentVolumeClaim
newConditions []v1.PersistentVolumeClaimCondition
finalCondtions []v1.PersistentVolumeClaimCondition
}
func TestMergeResizeCondition(t *testing.T) {
currentTime := metav1.Now()
pvc := getPVC([]v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
})
noConditionPVC := getPVC([]v1.PersistentVolumeClaimCondition{})
conditionFalseTime := metav1.Now()
newTime := metav1.NewTime(time.Now().Add(1 * time.Hour))
testCases := []conditionMergeTestCase{
{
description: "when removing all conditions",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{},
finalCondtions: []v1.PersistentVolumeClaimCondition{},
},
{
description: "adding new condition",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimFileSystemResizePending,
Status: v1.ConditionTrue,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimFileSystemResizePending,
Status: v1.ConditionTrue,
},
},
},
{
description: "adding same condition with new timestamp",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: newTime,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
},
},
{
description: "adding same condition but with different status",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionFalse,
LastTransitionTime: conditionFalseTime,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionFalse,
LastTransitionTime: conditionFalseTime,
},
},
},
{
description: "when no condition exists on pvc",
pvc: noConditionPVC.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
},
},
}
for _, testcase := range testCases {
updatePVC := MergeResizeConditionOnPVC(testcase.pvc, testcase.newConditions)
updateConditions := updatePVC.Status.Conditions
if !reflect.DeepEqual(updateConditions, testcase.finalCondtions) {
t.Errorf("Expected updated conditions for test %s to be %v but got %v",
testcase.description,
testcase.finalCondtions, updateConditions)
}
}
}
func getPVC(conditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "resize"},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
},
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
Conditions: conditions,
Capacity: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("2Gi"),
},
},
}
return pvc
}

View File

@ -1,26 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["types.go"],
importpath = "k8s.io/kubernetes/pkg/volume/util/types",
deps = ["//vendor/k8s.io/apimachinery/pkg/types:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,34 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package types defines types used only by volume components
package types
import "k8s.io/apimachinery/pkg/types"
// UniquePodName defines the type to key pods off of
type UniquePodName types.UID
// UniquePVCName defines the type to key pvc off
type UniquePVCName types.UID
// GeneratedOperations contains the operation that is created as well as
// supporting functions required for the operation executor
type GeneratedOperations struct {
OperationFunc func() (eventErr error, detailedErr error)
EventRecorderFunc func(*error)
CompleteFunc func(*error)
}

View File

@ -1,771 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"github.com/golang/glog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"reflect"
"hash/fnv"
"math/rand"
"strconv"
"k8s.io/apimachinery/pkg/api/resource"
utypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
const (
// GB - GigaByte size
GB = 1000 * 1000 * 1000
// GIB - GibiByte size
GIB = 1024 * 1024 * 1024
readyFileName = "ready"
// ControllerManagedAttachAnnotation is the key of the annotation on Node
// objects that indicates attach/detach operations for the node should be
// managed by the attach/detach controller
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
// KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node
// that decides if pod volumes are unmounted when pod is terminated
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
// object that specifies a supplemental GID.
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
// VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume
// object created dynamically
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
)
// IsReady checks for the existence of a regular file
// called 'ready' in the given directory and returns
// true if that file exists.
func IsReady(dir string) bool {
readyFile := path.Join(dir, readyFileName)
s, err := os.Stat(readyFile)
if err != nil {
return false
}
if !s.Mode().IsRegular() {
glog.Errorf("ready-file is not a file: %s", readyFile)
return false
}
return true
}
// SetReady creates a file called 'ready' in the given
// directory. It logs an error if the file cannot be
// created.
func SetReady(dir string) {
if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) {
glog.Errorf("Can't mkdir %s: %v", dir, err)
return
}
readyFile := path.Join(dir, readyFileName)
file, err := os.Create(readyFile)
if err != nil {
glog.Errorf("Can't touch %s: %v", readyFile, err)
return
}
file.Close()
}
// UnmountPath is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
func UnmountPath(mountPath string, mounter mount.Interface) error {
return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */)
}
// UnmountMountPoint is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error {
pathExists, pathErr := PathExists(mountPath)
if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
corruptedMnt := isCorruptedMnt(pathErr)
if pathErr != nil && !corruptedMnt {
return fmt.Errorf("Error checking path: %v", pathErr)
}
return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
}
// doUnmountMountPoint is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing
func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
if !corruptedMnt {
var notMnt bool
var err error
if extensiveMountPointCheck {
notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
}
if err != nil {
return err
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
}
// Unmount the mount path
glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
if err := mounter.Unmount(mountPath); err != nil {
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
if mntErr != nil {
return mntErr
}
if notMnt {
glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return fmt.Errorf("Failed to unmount path %v", mountPath)
}
// PathExists returns true if the specified path exists.
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else if isCorruptedMnt(err) {
return true, err
} else {
return false, err
}
}
// isCorruptedMnt return true if err is about corrupted mount point
func isCorruptedMnt(err error) bool {
if err == nil {
return false
}
var underlyingError error
switch pe := err.(type) {
case nil:
return false
case *os.PathError:
underlyingError = pe.Err
case *os.LinkError:
underlyingError = pe.Err
case *os.SyscallError:
underlyingError = pe.Err
}
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
for name, data := range secrets.Data {
secret[name] = string(data)
}
return secret, nil
}
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
if secrets.Type != v1.SecretType(volumePluginName) {
return secret, fmt.Errorf("Cannot get secret of type %s", volumePluginName)
}
for name, data := range secrets.Data {
secret[name] = string(data)
}
return secret, nil
}
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
className := v1helper.GetPersistentVolumeClass(pv)
if className == "" {
return nil, fmt.Errorf("Volume has no storage class")
}
class, err := kubeClient.StorageV1().StorageClasses().Get(className, metav1.GetOptions{})
if err != nil {
return nil, err
}
return class, nil
}
// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels
// This ensures that we don't mount a volume that doesn't belong to this node
func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
return checkVolumeNodeAffinity(pv, nodeLabels)
}
func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
if pv.Spec.NodeAffinity == nil {
return nil
}
if pv.Spec.NodeAffinity.Required != nil {
terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms
glog.V(10).Infof("Match for Required node selector terms %+v", terms)
if !v1helper.MatchNodeSelectorTerms(terms, labels.Set(nodeLabels), nil) {
return fmt.Errorf("No matching NodeSelectorTerms")
}
}
return nil
}
// LoadPodFromFile will read, decode, and return a Pod from a file.
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
if filePath == "" {
return nil, fmt.Errorf("file path not specified")
}
podDef, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read file path %s: %+v", filePath, err)
}
if len(podDef) == 0 {
return nil, fmt.Errorf("file was empty: %s", filePath)
}
pod := &v1.Pod{}
codec := legacyscheme.Codecs.UniversalDecoder()
if err := runtime.DecodeInto(codec, podDef, pod); err != nil {
return nil, fmt.Errorf("failed decoding file: %v", err)
}
return pod, nil
}
func ZonesSetToLabelValue(strSet sets.String) string {
return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter)
}
// ZonesToSet converts a string containing a comma separated list of zones to set
func ZonesToSet(zonesString string) (sets.String, error) {
return stringToSet(zonesString, ",")
}
// LabelZonesToSet converts a PV label value from string containing a delimited list of zones to set
func LabelZonesToSet(labelZonesValue string) (sets.String, error) {
return stringToSet(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter)
}
// StringToSet converts a string containing list separated by specified delimiter to to a set
func stringToSet(str, delimiter string) (sets.String, error) {
zonesSlice := strings.Split(str, delimiter)
zonesSet := make(sets.String)
for _, zone := range zonesSlice {
trimmedZone := strings.TrimSpace(zone)
if trimmedZone == "" {
return make(sets.String), fmt.Errorf(
"%q separated list (%q) must not contain an empty string",
delimiter,
str)
}
zonesSet.Insert(trimmedZone)
}
return zonesSet, nil
}
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
// recycle operation. The calculation and return value is either the
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
// greater.
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
giQty := resource.MustParse("1Gi")
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
giSize := giQty.Value()
pvSize := pvQty.Value()
timeout := (pvSize / giSize) * int64(timeoutIncrement)
if timeout < int64(minimumTimeout) {
return int64(minimumTimeout)
}
return timeout
}
// RoundUpSize calculates how many allocation units are needed to accommodate
// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS
// allocates volumes in gibibyte-sized chunks,
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
}
// RoundUpToGB rounds up given quantity to chunks of GB
func RoundUpToGB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GB)
}
// RoundUpToGiB rounds up given quantity upto chunks of GiB
func RoundUpToGiB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GIB)
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
// string fits given length and cuts "dynamic" if not.
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
prefix := clusterName + "-dynamic"
pvLen := len(pvName)
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
// +1 for the '-' dash
if pvLen+1+len(prefix) > maxLength {
prefix = prefix[:maxLength-pvLen-1]
}
return prefix + "-" + pvName
}
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter volume.Mounter) (string, error) {
path := mounter.GetPath()
if path == "" {
return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
}
return path, nil
}
// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
// assuming the id values are consecutive.
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
return zone
}
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
replicaZones := sets.NewString()
startingIndex := index * numZones
for index = startingIndex; index < startingIndex+numZones; index++ {
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
replicaZones.Insert(zone)
}
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
pvcName, replicaZones.UnsortedList(), zoneSlice)
return replicaZones
}
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
if pvcName == "" {
// We should always be called with a name; this shouldn't happen
glog.Warningf("No name defined during volume create; choosing random zone")
hash = rand.Uint32()
} else {
hashString := pvcName
// Heuristic to make sure that volumes in a StatefulSet are spread across zones
// StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
// where Id is an integer index.
// Note though that if a StatefulSet pod has multiple claims, we need them to be
// in the same zone, because otherwise the pod will be unable to mount both volumes,
// and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when
// it looks like `ClaimName-StatefulSetName-Id`.
// We continue to round-robin volume names that look like `Name-Id` also; this is a useful
// feature for users that are creating statefulset-like functionality without using statefulsets.
lastDash := strings.LastIndexByte(pvcName, '-')
if lastDash != -1 {
statefulsetIDString := pvcName[lastDash+1:]
statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32)
if err == nil {
// Offset by the statefulsetID, so we round-robin across zones
index = uint32(statefulsetID)
// We still hash the volume name, but only the prefix
hashString = pvcName[:lastDash]
// In the special case where it looks like `ClaimName-StatefulSetName-Id`,
// hash only the StatefulSetName, so that different claims on the same StatefulSet
// member end up in the same zone.
// Note that StatefulSetName (and ClaimName) might themselves both have dashes.
// We actually just take the portion after the final - of ClaimName-StatefulSetName.
// For our purposes it doesn't much matter (just suboptimal spreading).
lastDash := strings.LastIndexByte(hashString, '-')
if lastDash != -1 {
hashString = hashString[lastDash+1:]
}
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
}
}
// We hash the (base) volume name, so we don't bias towards the first N zones
h := fnv.New32()
h.Write([]byte(hashString))
hash = h.Sum32()
}
return hash, index
}
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
// to empty_dir
func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
}
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
func MountOptionFromSpec(spec *volume.Spec, options ...string) []string {
pv := spec.PersistentVolume
if pv != nil {
// Use beta annotation first
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
moList := strings.Split(mo, ",")
return JoinMountOptions(moList, options)
}
if len(pv.Spec.MountOptions) > 0 {
return JoinMountOptions(pv.Spec.MountOptions, options)
}
}
return options
}
// JoinMountOptions joins mount options eliminating duplicates
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
allMountOptions := sets.NewString()
for _, mountOption := range userOptions {
if len(mountOption) > 0 {
allMountOptions.Insert(mountOption)
}
}
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return allMountOptions.UnsortedList()
}
// ValidateZone returns:
// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
// - nil otherwise
func ValidateZone(zone string) error {
if strings.TrimSpace(zone) == "" {
return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone)
}
return nil
}
// AccessModesContains returns whether the requested mode is contained by modes
func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
for _, mode := range requestedModes {
if !AccessModesContains(indexedModes, mode) {
return false
}
}
return true
}
// GetWindowsPath get a windows path
func GetWindowsPath(path string) string {
windowsPath := strings.Replace(path, "/", "\\", -1)
if strings.HasPrefix(windowsPath, "\\") {
windowsPath = "c:" + windowsPath
}
return windowsPath
}
// GetUniquePodName returns a unique identifier to reference a pod by
func GetUniquePodName(pod *v1.Pod) types.UniquePodName {
return types.UniquePodName(pod.UID)
}
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
// Caller should ensure that volumeName is a name/ID uniquely identifying the
// actual backing device, directory, path, etc. for a particular volume.
// The returned name can be used to uniquely reference the volume, for example,
// to prevent operations (attach/detach or mount/unmount) from being triggered
// on the same volume.
func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
}
// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name
// for a non-attachable volume.
func GetUniqueVolumeNameForNonAttachableVolume(
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
return v1.UniqueVolumeName(
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
}
// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique
// name representing the volume defined in the specified volume spec.
// This returned name can be used to uniquely reference the actual backing
// device, directory, path, etc. referenced by the given volumeSpec.
// If the given plugin does not support the volume spec, this returns an error.
func GetUniqueVolumeNameFromSpec(
volumePlugin volume.VolumePlugin,
volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) {
if volumePlugin == nil {
return "", fmt.Errorf(
"volumePlugin should not be nil. volumeSpec.Name=%q",
volumeSpec.Name())
}
volumeName, err := volumePlugin.GetVolumeName(volumeSpec)
if err != nil || volumeName == "" {
return "", fmt.Errorf(
"failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
return GetUniqueVolumeName(
volumePlugin.GetPluginName(),
volumeName),
nil
}
// IsPodTerminated checks if pod is terminated
func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool {
return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses))
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
// the unique volume names.
func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) {
components := strings.SplitN(string(uniqueName), "/", 3)
if len(components) != 3 {
return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName)
}
pluginName := fmt.Sprintf("%s/%s", components[0], components[1])
return pluginName, components[2], nil
}
// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter
// and Exec taken from given VolumeHost.
func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount {
mounter := host.GetMounter(pluginName)
exec := host.GetExec(pluginName)
return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}
}
// GetVolumeMode retrieves VolumeMode from pv.
// If the volume doesn't have PersistentVolume, it's an inline volume,
// should return volumeMode as filesystem to keep existing behavior.
func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) {
if volumeSpec == nil || volumeSpec.PersistentVolume == nil {
return v1.PersistentVolumeFilesystem, nil
}
if volumeSpec.PersistentVolume.Spec.VolumeMode != nil {
return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil
}
return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name())
}
// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc.
func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) {
if claim.Spec.VolumeMode != nil {
return *claim.Spec.VolumeMode, nil
}
return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name)
}
// CheckVolumeModeFilesystem checks VolumeMode.
// If the mode is Filesystem, return true otherwise return false.
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode, err := GetVolumeMode(volumeSpec)
if err != nil {
return true, err
}
if volumeMode == v1.PersistentVolumeBlock {
return false, nil
}
}
return true, nil
}
// CheckPersistentVolumeClaimModeBlock checks VolumeMode.
// If the mode is Block, return true otherwise return false.
func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool {
return utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock
}
// MakeAbsolutePath convert path to absolute path according to GOOS
func MakeAbsolutePath(goos, path string) string {
if goos != "windows" {
return filepath.Clean("/" + path)
}
// These are all for windows
// If there is a colon, give up.
if strings.Contains(path, ":") {
return path
}
// If there is a slash, but no drive, add 'c:'
if strings.HasPrefix(path, "/") || strings.HasPrefix(path, "\\") {
return "c:" + path
}
// Otherwise, add 'c:\'
return "c:\\" + path
}
// MapBlockVolume is a utility function to provide a common way of mounting
// block device path for a specified volume and pod. This function should be
// called by volume plugins that implements volume.BlockVolumeMapper.Map() method.
func MapBlockVolume(
devicePath,
globalMapPath,
podVolumeMapPath,
volumeMapName string,
podUID utypes.UID,
) error {
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
// map devicePath to global node path
mapErr := blkUtil.MapDevice(devicePath, globalMapPath, string(podUID))
if mapErr != nil {
return mapErr
}
// map devicePath to pod volume path
mapErr = blkUtil.MapDevice(devicePath, podVolumeMapPath, volumeMapName)
if mapErr != nil {
return mapErr
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,63 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"volume_path_handler.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"volume_path_handler_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"volume_path_handler_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"volume_path_handler_unsupported.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/util/volumepathhandler",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,233 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumepathhandler
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
)
const (
losetupPath = "losetup"
ErrDeviceNotFound = "device not found"
ErrDeviceNotSupported = "device not supported"
)
// BlockVolumePathHandler defines a set of operations for handling block volume-related operations
type BlockVolumePathHandler interface {
// MapDevice creates a symbolic link to block device under specified map path
MapDevice(devicePath string, mapPath string, linkName string) error
// UnmapDevice removes a symbolic link to block device under specified map path
UnmapDevice(mapPath string, linkName string) error
// RemovePath removes a file or directory on specified map path
RemoveMapPath(mapPath string) error
// IsSymlinkExist retruns true if specified symbolic link exists
IsSymlinkExist(mapPath string) (bool, error)
// GetDeviceSymlinkRefs searches symbolic links under global map path
GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error)
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
AttachFileDevice(path string) (string, error)
// GetLoopDevice returns the full path to the loop device associated with the given path.
GetLoopDevice(path string) (string, error)
// RemoveLoopDevice removes specified loopback device
RemoveLoopDevice(device string) error
}
// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler.
func NewBlockVolumePathHandler() BlockVolumePathHandler {
var volumePathHandler VolumePathHandler
return volumePathHandler
}
// VolumePathHandler is path related operation handlers for block volume
type VolumePathHandler struct {
}
// MapDevice creates a symbolic link to block device under specified map path
func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error {
// Example of global map path:
// globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid}
// linkName: {podUid}
//
// Example of pod device map path:
// podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
// linkName: {volumeName}
if len(devicePath) == 0 {
return fmt.Errorf("Failed to map device to map path. devicePath is empty")
}
if len(mapPath) == 0 {
return fmt.Errorf("Failed to map device to map path. mapPath is empty")
}
if !filepath.IsAbs(mapPath) {
return fmt.Errorf("The map path should be absolute: map path: %s", mapPath)
}
glog.V(5).Infof("MapDevice: devicePath %s", devicePath)
glog.V(5).Infof("MapDevice: mapPath %s", mapPath)
glog.V(5).Infof("MapDevice: linkName %s", linkName)
// Check and create mapPath
_, err := os.Stat(mapPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate map path: %s", mapPath)
return err
}
if err = os.MkdirAll(mapPath, 0750); err != nil {
return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err)
}
// Remove old symbolic link(or file) then create new one.
// This should be done because current symbolic link is
// stale across node reboot.
linkPath := path.Join(mapPath, string(linkName))
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return err
}
err = os.Symlink(devicePath, linkPath)
return err
}
// UnmapDevice removes a symbolic link associated to block device under specified map path
func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to unmap device from map path. mapPath is empty")
}
glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
glog.V(5).Infof("UnmapDevice: linkName %s", linkName)
// Check symbolic link exists
linkPath := path.Join(mapPath, string(linkName))
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
return checkErr
} else if !islinkExist {
glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
return nil
}
err := os.Remove(linkPath)
return err
}
// RemoveMapPath removes a file or directory on specified map path
func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to remove map path. mapPath is empty")
}
glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
err := os.RemoveAll(mapPath)
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// IsSymlinkExist returns true if specified file exists and the type is symbolik link.
// If file doesn't exist, or file exists but not symbolick link, return false with no error.
// On other cases, return false with error from Lstat().
func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
fi, err := os.Lstat(mapPath)
if err == nil {
// If file exits and it's symbolick link, return true and no error
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return true, nil
}
// If file exits but it's not symbolick link, return fale and no error
return false, nil
}
// If file doesn't exist, return false and no error
if os.IsNotExist(err) {
return false, nil
}
// Return error from Lstat()
return false, err
}
// GetDeviceSymlinkRefs searches symbolic links under global map path
func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) {
var refs []string
files, err := ioutil.ReadDir(mapPath)
if err != nil {
return nil, fmt.Errorf("Directory cannot read %v", err)
}
for _, file := range files {
if file.Mode()&os.ModeSymlink != os.ModeSymlink {
continue
}
filename := file.Name()
filepath, err := os.Readlink(path.Join(mapPath, filename))
if err != nil {
return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err)
}
glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
if filepath == devPath {
refs = append(refs, path.Join(mapPath, filename))
}
}
glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
return refs, nil
}
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX
// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
var globalMapPathUUID string
// Find symbolic link named pod uuid under plugin dir
err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) {
glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
if res, err := compareSymlinks(path, mapPath); err == nil && res {
globalMapPathUUID = path
}
}
return nil
})
if err != nil {
return "", err
}
glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
// Return path contains global map path + {pod uuid}
return globalMapPathUUID, nil
}
func compareSymlinks(global, pod string) (bool, error) {
devGlobal, err := os.Readlink(global)
if err != nil {
return false, err
}
devPod, err := os.Readlink(pod)
if err != nil {
return false, err
}
glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod)
if devGlobal == devPod {
return true, nil
}
return false, nil
}

View File

@ -1,108 +0,0 @@
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumepathhandler
import (
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/golang/glog"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
blockDevicePath, err := v.GetLoopDevice(path)
if err != nil && err.Error() != ErrDeviceNotFound {
return "", err
}
// If no existing loop device for the path, create one
if blockDevicePath == "" {
glog.V(4).Infof("Creating device for path: %s", path)
blockDevicePath, err = makeLoopDevice(path)
if err != nil {
return "", err
}
}
return blockDevicePath, nil
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return "", errors.New(ErrDeviceNotFound)
}
if err != nil {
return "", fmt.Errorf("not attachable: %v", err)
}
args := []string{"-j", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out)
return "", err
}
return parseLosetupOutputForDevice(out)
}
func makeLoopDevice(path string) (string, error) {
args := []string{"-f", "--show", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out)
return "", err
}
return parseLosetupOutputForDevice(out)
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
args := []string{"-d", device}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
if _, err := os.Stat(device); os.IsNotExist(err) {
return nil
}
glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
return err
}
return nil
}
func parseLosetupOutputForDevice(output []byte) (string, error) {
if len(output) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
// losetup returns device in the format:
// /dev/loop1: [0073]:148662 (/dev/sda)
device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0])
if len(device) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
return device, nil
}

View File

@ -1,39 +0,0 @@
// +build !linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumepathhandler
import (
"fmt"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
return "", fmt.Errorf("AttachFileDevice not supported for this build.")
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
return "", fmt.Errorf("GetLoopDevice not supported for this build.")
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
return fmt.Errorf("RemoveLoopDevice not supported for this build.")
}