mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
build: move e2e dependencies into e2e/go.mod
Several packages are only used while running the e2e suite. These packages are less important to update, as the they can not influence the final executable that is part of the Ceph-CSI container-image. By moving these dependencies out of the main Ceph-CSI go.mod, it is easier to identify if a reported CVE affects Ceph-CSI, or only the testing (like most of the Kubernetes CVEs). Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
15da101b1b
commit
bec6090996
506
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
506
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
@ -1,506 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
maxFileNameLength = 255
|
||||
maxPathLength = 4096
|
||||
)
|
||||
|
||||
// AtomicWriter handles atomically projecting content for a set of files into
|
||||
// a target directory.
|
||||
//
|
||||
// Note:
|
||||
//
|
||||
// 1. AtomicWriter reserves the set of pathnames starting with `..`.
|
||||
// 2. AtomicWriter offers no concurrency guarantees and must be synchronized
|
||||
// by the caller.
|
||||
//
|
||||
// The visible files in this volume are symlinks to files in the writer's data
|
||||
// directory. Actual files are stored in a hidden timestamped directory which
|
||||
// is symlinked to by the data directory. The timestamped directory and
|
||||
// data directory symlink are created in the writer's target dir. This scheme
|
||||
// allows the files to be atomically updated by changing the target of the
|
||||
// data directory symlink.
|
||||
//
|
||||
// Consumers of the target directory can monitor the ..data symlink using
|
||||
// inotify or fanotify to receive events when the content in the volume is
|
||||
// updated.
|
||||
type AtomicWriter struct {
|
||||
targetDir string
|
||||
logContext string
|
||||
}
|
||||
|
||||
// FileProjection contains file Data and access Mode
|
||||
type FileProjection struct {
|
||||
Data []byte
|
||||
Mode int32
|
||||
FsUser *int64
|
||||
}
|
||||
|
||||
// NewAtomicWriter creates a new AtomicWriter configured to write to the given
|
||||
// target directory, or returns an error if the target directory does not exist.
|
||||
func NewAtomicWriter(targetDir string, logContext string) (*AtomicWriter, error) {
|
||||
_, err := os.Stat(targetDir)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AtomicWriter{targetDir: targetDir, logContext: logContext}, nil
|
||||
}
|
||||
|
||||
const (
|
||||
dataDirName = "..data"
|
||||
newDataDirName = "..data_tmp"
|
||||
)
|
||||
|
||||
// Write does an atomic projection of the given payload into the writer's target
|
||||
// directory. Input paths must not begin with '..'.
|
||||
// setPerms is an optional pointer to a function that caller can provide to set the
|
||||
// permissions of the newly created files before they are published. The function is
|
||||
// passed subPath which is the name of the timestamped directory that was created
|
||||
// under target directory.
|
||||
//
|
||||
// The Write algorithm is:
|
||||
//
|
||||
// 1. The payload is validated; if the payload is invalid, the function returns
|
||||
//
|
||||
// 2. The current timestamped directory is detected by reading the data directory
|
||||
// symlink
|
||||
//
|
||||
// 3. The old version of the volume is walked to determine whether any
|
||||
// portion of the payload was deleted and is still present on disk.
|
||||
//
|
||||
// 4. The data in the current timestamped directory is compared to the projected
|
||||
// data to determine if an update to data directory is required.
|
||||
//
|
||||
// 5. A new timestamped dir is created if an update is required.
|
||||
//
|
||||
// 6. The payload is written to the new timestamped directory.
|
||||
//
|
||||
// 7. Permissions are set (if setPerms is not nil) on the new timestamped directory and files.
|
||||
//
|
||||
// 8. A symlink to the new timestamped directory ..data_tmp is created that will
|
||||
// become the new data directory.
|
||||
//
|
||||
// 9. The new data directory symlink is renamed to the data directory; rename is atomic.
|
||||
//
|
||||
// 10. Symlinks and directory for new user-visible files are created (if needed).
|
||||
//
|
||||
// For example, consider the files:
|
||||
// <target-dir>/podName
|
||||
// <target-dir>/user/labels
|
||||
// <target-dir>/k8s/annotations
|
||||
//
|
||||
// The user visible files are symbolic links into the internal data directory:
|
||||
// <target-dir>/podName -> ..data/podName
|
||||
// <target-dir>/usr -> ..data/usr
|
||||
// <target-dir>/k8s -> ..data/k8s
|
||||
//
|
||||
// The data directory itself is a link to a timestamped directory with
|
||||
// the real data:
|
||||
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
|
||||
// NOTE(claudiub): We need to create these symlinks AFTER we've finished creating and
|
||||
// linking everything else. On Windows, if a target does not exist, the created symlink
|
||||
// will not work properly if the target ends up being a directory.
|
||||
//
|
||||
// 11. Old paths are removed from the user-visible portion of the target directory.
|
||||
//
|
||||
// 12. The previous timestamped directory is removed, if it exists.
|
||||
func (w *AtomicWriter) Write(payload map[string]FileProjection, setPerms func(subPath string) error) error {
|
||||
// (1)
|
||||
cleanPayload, err := validatePayload(payload)
|
||||
if err != nil {
|
||||
klog.Errorf("%s: invalid payload: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (2)
|
||||
dataDirPath := filepath.Join(w.targetDir, dataDirName)
|
||||
oldTsDir, err := os.Readlink(dataDirPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
klog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
// although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs)
|
||||
// empty oldTsDir indicates that it didn't exist
|
||||
oldTsDir = ""
|
||||
}
|
||||
oldTsPath := filepath.Join(w.targetDir, oldTsDir)
|
||||
|
||||
var pathsToRemove sets.Set[string]
|
||||
shouldWrite := true
|
||||
// if there was no old version, there's nothing to remove
|
||||
if len(oldTsDir) != 0 {
|
||||
// (3)
|
||||
pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath)
|
||||
if err != nil {
|
||||
klog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (4)
|
||||
if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil {
|
||||
klog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
|
||||
return err
|
||||
} else if !should && len(pathsToRemove) == 0 {
|
||||
klog.V(4).Infof("%s: write not required for data directory %v", w.logContext, oldTsDir)
|
||||
// data directory is already up to date, but we need to make sure that
|
||||
// the user-visible symlinks are created.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/121472 for more details.
|
||||
// Reset oldTsDir to empty string to avoid removing the data directory.
|
||||
shouldWrite = false
|
||||
oldTsDir = ""
|
||||
} else {
|
||||
klog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
|
||||
}
|
||||
}
|
||||
|
||||
if shouldWrite {
|
||||
// (5)
|
||||
tsDir, err := w.newTimestampDir()
|
||||
if err != nil {
|
||||
klog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
tsDirName := filepath.Base(tsDir)
|
||||
|
||||
// (6)
|
||||
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
|
||||
klog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
|
||||
|
||||
// (7)
|
||||
if setPerms != nil {
|
||||
if err := setPerms(tsDirName); err != nil {
|
||||
klog.Errorf("%s: error applying ownership settings: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// (8)
|
||||
newDataDirPath := filepath.Join(w.targetDir, newDataDirName)
|
||||
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
|
||||
if err := os.RemoveAll(tsDir); err != nil {
|
||||
klog.Errorf("%s: error removing new ts directory %s: %v", w.logContext, tsDir, err)
|
||||
}
|
||||
klog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (9)
|
||||
if runtime.GOOS == "windows" {
|
||||
if err := os.Remove(dataDirPath); err != nil {
|
||||
klog.Errorf("%s: error removing data dir directory %s: %v", w.logContext, dataDirPath, err)
|
||||
}
|
||||
err = os.Symlink(tsDirName, dataDirPath)
|
||||
if err := os.Remove(newDataDirPath); err != nil {
|
||||
klog.Errorf("%s: error removing new data dir directory %s: %v", w.logContext, newDataDirPath, err)
|
||||
}
|
||||
} else {
|
||||
err = os.Rename(newDataDirPath, dataDirPath)
|
||||
}
|
||||
if err != nil {
|
||||
if err := os.Remove(newDataDirPath); err != nil && err != os.ErrNotExist {
|
||||
klog.Errorf("%s: error removing new data dir directory %s: %v", w.logContext, newDataDirPath, err)
|
||||
}
|
||||
if err := os.RemoveAll(tsDir); err != nil {
|
||||
klog.Errorf("%s: error removing new ts directory %s: %v", w.logContext, tsDir, err)
|
||||
}
|
||||
klog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// (10)
|
||||
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
|
||||
klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (11)
|
||||
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
|
||||
klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// (12)
|
||||
if len(oldTsDir) > 0 {
|
||||
if err = os.RemoveAll(oldTsPath); err != nil {
|
||||
klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
|
||||
func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
|
||||
cleanPayload := make(map[string]FileProjection)
|
||||
for k, content := range payload {
|
||||
if err := validatePath(k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cleanPayload[filepath.Clean(k)] = content
|
||||
}
|
||||
|
||||
return cleanPayload, nil
|
||||
}
|
||||
|
||||
// validatePath validates a single path, returning an error if the path is
|
||||
// invalid. paths may not:
|
||||
//
|
||||
// 1. be absolute
|
||||
// 2. contain '..' as an element
|
||||
// 3. start with '..'
|
||||
// 4. contain filenames larger than 255 characters
|
||||
// 5. be longer than 4096 characters
|
||||
func validatePath(targetPath string) error {
|
||||
// TODO: somehow unify this with the similar api validation,
|
||||
// validateVolumeSourcePath; the error semantics are just different enough
|
||||
// from this that it was time-prohibitive trying to find the right
|
||||
// refactoring to re-use.
|
||||
if targetPath == "" {
|
||||
return fmt.Errorf("invalid path: must not be empty: %q", targetPath)
|
||||
}
|
||||
if path.IsAbs(targetPath) {
|
||||
return fmt.Errorf("invalid path: must be relative path: %s", targetPath)
|
||||
}
|
||||
|
||||
if len(targetPath) > maxPathLength {
|
||||
return fmt.Errorf("invalid path: must be less than or equal to %d characters", maxPathLength)
|
||||
}
|
||||
|
||||
items := strings.Split(targetPath, string(os.PathSeparator))
|
||||
for _, item := range items {
|
||||
if item == ".." {
|
||||
return fmt.Errorf("invalid path: must not contain '..': %s", targetPath)
|
||||
}
|
||||
if len(item) > maxFileNameLength {
|
||||
return fmt.Errorf("invalid path: filenames must be less than or equal to %d characters", maxFileNameLength)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
|
||||
return fmt.Errorf("invalid path: must not start with '..': %s", targetPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldWritePayload returns whether the payload should be written to disk.
|
||||
func shouldWritePayload(payload map[string]FileProjection, oldTsDir string) (bool, error) {
|
||||
for userVisiblePath, fileProjection := range payload {
|
||||
shouldWrite, err := shouldWriteFile(filepath.Join(oldTsDir, userVisiblePath), fileProjection.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if shouldWrite {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// shouldWriteFile returns whether a new version of a file should be written to disk.
|
||||
func shouldWriteFile(path string, content []byte) (bool, error) {
|
||||
_, err := os.Lstat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
contentOnFs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return !bytes.Equal(content, contentOnFs), nil
|
||||
}
|
||||
|
||||
// pathsToRemove walks the current version of the data directory and
|
||||
// determines which paths should be removed (if any) after the payload is
|
||||
// written to the target directory.
|
||||
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTSDir string) (sets.Set[string], error) {
|
||||
paths := sets.New[string]()
|
||||
visitor := func(path string, info os.FileInfo, err error) error {
|
||||
relativePath := strings.TrimPrefix(path, oldTSDir)
|
||||
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
|
||||
if relativePath == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
paths.Insert(relativePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(oldTSDir, visitor)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(5).Infof("%s: current paths: %+v", w.targetDir, sets.List(paths))
|
||||
|
||||
newPaths := sets.New[string]()
|
||||
for file := range payload {
|
||||
// add all subpaths for the payload to the set of new paths
|
||||
// to avoid attempting to remove non-empty dirs
|
||||
for subPath := file; subPath != ""; {
|
||||
newPaths.Insert(subPath)
|
||||
subPath, _ = filepath.Split(subPath)
|
||||
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
|
||||
}
|
||||
}
|
||||
klog.V(5).Infof("%s: new paths: %+v", w.targetDir, sets.List(newPaths))
|
||||
|
||||
result := paths.Difference(newPaths)
|
||||
klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// newTimestampDir creates a new timestamp directory
|
||||
func (w *AtomicWriter) newTimestampDir() (string, error) {
|
||||
tsDir, err := os.MkdirTemp(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
|
||||
if err != nil {
|
||||
klog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 0755 permissions are needed to allow 'group' and 'other' to recurse the
|
||||
// directory tree. do a chmod here to ensure that permissions are set correctly
|
||||
// regardless of the process' umask.
|
||||
err = os.Chmod(tsDir, 0755)
|
||||
if err != nil {
|
||||
klog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return tsDir, nil
|
||||
}
|
||||
|
||||
// writePayloadToDir writes the given payload to the given directory. The
|
||||
// directory must exist.
|
||||
func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir string) error {
|
||||
for userVisiblePath, fileProjection := range payload {
|
||||
content := fileProjection.Data
|
||||
mode := os.FileMode(fileProjection.Mode)
|
||||
fullPath := filepath.Join(dir, userVisiblePath)
|
||||
baseDir, _ := filepath.Split(fullPath)
|
||||
|
||||
if err := os.MkdirAll(baseDir, os.ModePerm); err != nil {
|
||||
klog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(fullPath, content, mode); err != nil {
|
||||
klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
|
||||
return err
|
||||
}
|
||||
// Chmod is needed because os.WriteFile() ends up calling
|
||||
// open(2) to create the file, so the final mode used is "mode &
|
||||
// ~umask". But we want to make sure the specified mode is used
|
||||
// in the file no matter what the umask is.
|
||||
if err := os.Chmod(fullPath, mode); err != nil {
|
||||
klog.Errorf("%s: unable to change file %s with mode %v: %v", w.logContext, fullPath, mode, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if fileProjection.FsUser == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := w.chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
|
||||
klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createUserVisibleFiles creates the relative symlinks for all the
|
||||
// files configured in the payload. If the directory in a file path does not
|
||||
// exist, it is created.
|
||||
//
|
||||
// Viz:
|
||||
// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
|
||||
// the following symlinks are created:
|
||||
// bar -> ..data/bar
|
||||
// foo -> ..data/foo
|
||||
// baz -> ..data/baz
|
||||
func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) error {
|
||||
for userVisiblePath := range payload {
|
||||
slashpos := strings.Index(userVisiblePath, string(os.PathSeparator))
|
||||
if slashpos == -1 {
|
||||
slashpos = len(userVisiblePath)
|
||||
}
|
||||
linkname := userVisiblePath[:slashpos]
|
||||
_, err := os.Readlink(filepath.Join(w.targetDir, linkname))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// The link into the data directory for this path doesn't exist; create it
|
||||
visibleFile := filepath.Join(w.targetDir, linkname)
|
||||
dataDirFile := filepath.Join(dataDirName, linkname)
|
||||
|
||||
err = os.Symlink(dataDirFile, visibleFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeUserVisiblePaths removes the set of paths from the user-visible
|
||||
// portion of the writer's target directory.
|
||||
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.Set[string]) error {
|
||||
ps := string(os.PathSeparator)
|
||||
var lasterr error
|
||||
for p := range paths {
|
||||
// only remove symlinks from the volume root directory (i.e. items that don't contain '/')
|
||||
if strings.Contains(p, ps) {
|
||||
continue
|
||||
}
|
||||
if err := os.Remove(filepath.Join(w.targetDir, p)); err != nil {
|
||||
klog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
|
||||
lasterr = err
|
||||
}
|
||||
}
|
||||
|
||||
return lasterr
|
||||
}
|
27
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_linux.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_linux.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import "os"
|
||||
|
||||
// chown changes the numeric uid and gid of the named file.
|
||||
func (w *AtomicWriter) chown(name string, uid, gid int) error {
|
||||
return os.Chown(name, uid, gid)
|
||||
}
|
33
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_unsupported.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer_unsupported.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// chown changes the numeric uid and gid of the named file.
|
||||
// This is a no-op on unsupported platforms.
|
||||
func (w *AtomicWriter) chown(name string, uid, _ /* gid */ int) error {
|
||||
klog.Warningf("%s: skipping change of Linux owner %v for file %s; unsupported on %s", w.logContext, uid, name, runtime.GOOS)
|
||||
return nil
|
||||
}
|
70
vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go
generated
vendored
@ -1,70 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
// This file is a common place holder for volume limit utility constants
|
||||
// shared between volume package and scheduler
|
||||
|
||||
const (
|
||||
// EBSVolumeLimitKey resource name that will store volume limits for EBS
|
||||
EBSVolumeLimitKey = "attachable-volumes-aws-ebs"
|
||||
// EBSNitroLimitRegex finds nitro instance types with different limit than EBS defaults
|
||||
EBSNitroLimitRegex = "^[cmr]5.*|t3|z1d"
|
||||
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
|
||||
// Amazon recommends no more than 40; the system root volume uses at least one.
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
|
||||
DefaultMaxEBSVolumes = 39
|
||||
// DefaultMaxEBSNitroVolumeLimit is default EBS volume limit on m5 and c5 instances
|
||||
DefaultMaxEBSNitroVolumeLimit = 25
|
||||
// AzureVolumeLimitKey stores resource name that will store volume limits for Azure
|
||||
AzureVolumeLimitKey = "attachable-volumes-azure-disk"
|
||||
// GCEVolumeLimitKey stores resource name that will store volume limits for GCE node
|
||||
GCEVolumeLimitKey = "attachable-volumes-gce-pd"
|
||||
|
||||
// CinderVolumeLimitKey contains Volume limit key for Cinder
|
||||
CinderVolumeLimitKey = "attachable-volumes-cinder"
|
||||
// DefaultMaxCinderVolumes defines the maximum number of PD Volumes for Cinder
|
||||
// For Openstack we are keeping this to a high enough value so as depending on backend
|
||||
// cluster admins can configure it.
|
||||
DefaultMaxCinderVolumes = 256
|
||||
|
||||
// CSIAttachLimitPrefix defines prefix used for CSI volumes
|
||||
CSIAttachLimitPrefix = "attachable-volumes-csi-"
|
||||
|
||||
// ResourceNameLengthLimit stores maximum allowed Length for a ResourceName
|
||||
ResourceNameLengthLimit = 63
|
||||
)
|
||||
|
||||
// GetCSIAttachLimitKey returns limit key used for CSI volumes
|
||||
func GetCSIAttachLimitKey(driverName string) string {
|
||||
csiPrefixLength := len(CSIAttachLimitPrefix)
|
||||
totalkeyLength := csiPrefixLength + len(driverName)
|
||||
if totalkeyLength >= ResourceNameLengthLimit {
|
||||
charsFromDriverName := driverName[:23]
|
||||
hash := sha1.New()
|
||||
hash.Write([]byte(driverName))
|
||||
hashed := hex.EncodeToString(hash.Sum(nil))
|
||||
hashed = hashed[:16]
|
||||
return CSIAttachLimitPrefix + charsFromDriverName + hashed
|
||||
}
|
||||
return CSIAttachLimitPrefix + driverName
|
||||
}
|
34
vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go
generated
vendored
34
vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
// DeviceUtil is a util for common device methods
|
||||
type DeviceUtil interface {
|
||||
FindMultipathDeviceForDevice(disk string) string
|
||||
FindSlaveDevicesOnMultipath(disk string) []string
|
||||
GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error)
|
||||
FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error)
|
||||
}
|
||||
|
||||
type deviceHandler struct {
|
||||
getIo IoUtil
|
||||
}
|
||||
|
||||
// NewDeviceHandler Create a new IoHandler implementation
|
||||
func NewDeviceHandler(io IoUtil) DeviceUtil {
|
||||
return &deviceHandler{getIo: io}
|
||||
}
|
306
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
generated
vendored
306
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
generated
vendored
@ -1,306 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent. If called with a device
|
||||
// already resolved to devicemapper, do nothing.
|
||||
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
|
||||
if strings.HasPrefix(device, "/dev/dm-") {
|
||||
return device
|
||||
}
|
||||
|
||||
io := handler.getIo
|
||||
disk, err := findDeviceForPath(device, io)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
sysPath := "/sys/block/"
|
||||
if dirs, err := io.ReadDir(sysPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if strings.HasPrefix(name, "dm-") {
|
||||
if _, err1 := io.Lstat(sysPath + name + "/slaves/" + disk); err1 == nil {
|
||||
return "/dev/" + name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// findDeviceForPath Find the underlying disk for a linked path such as /dev/disk/by-path/XXXX or /dev/mapper/XXXX
|
||||
// will return sdX or hdX etc, if /dev/sdX is passed in then sdX will be returned
|
||||
func findDeviceForPath(path string, io IoUtil) (string, error) {
|
||||
devicePath, err := io.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// if path /dev/hdX split into "", "dev", "hdX" then we will
|
||||
// return just the last part
|
||||
parts := strings.Split(devicePath, "/")
|
||||
if len(parts) == 3 && strings.HasPrefix(parts[1], "dev") {
|
||||
return parts[2], nil
|
||||
}
|
||||
return "", errors.New("Illegal path for device " + devicePath)
|
||||
}
|
||||
|
||||
// FindSlaveDevicesOnMultipath given a dm name like /dev/dm-1, find all devices
|
||||
// which are managed by the devicemapper dm-1.
|
||||
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
|
||||
var devices []string
|
||||
io := handler.getIo
|
||||
// Split path /dev/dm-1 into "", "dev", "dm-1"
|
||||
parts := strings.Split(dm, "/")
|
||||
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
|
||||
return devices
|
||||
}
|
||||
disk := parts[2]
|
||||
slavesPath := filepath.Join("/sys/block/", disk, "/slaves/")
|
||||
if files, err := io.ReadDir(slavesPath); err == nil {
|
||||
for _, f := range files {
|
||||
devices = append(devices, filepath.Join("/dev/", f.Name()))
|
||||
}
|
||||
}
|
||||
return devices
|
||||
}
|
||||
|
||||
// GetISCSIPortalHostMapForTarget given a target iqn, find all the scsi hosts logged into
|
||||
// that target. Returns a map of iSCSI portals (string) to SCSI host numbers (integers).
|
||||
//
|
||||
// For example: {
|
||||
// "192.168.30.7:3260": 2,
|
||||
// "192.168.30.8:3260": 3,
|
||||
// }
|
||||
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
|
||||
portalHostMap := make(map[string]int)
|
||||
io := handler.getIo
|
||||
|
||||
// Iterate over all the iSCSI hosts in sysfs
|
||||
sysPath := "/sys/class/iscsi_host"
|
||||
hostDirs, err := io.ReadDir(sysPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return portalHostMap, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
for _, hostDir := range hostDirs {
|
||||
// iSCSI hosts are always of the format "host%d"
|
||||
// See drivers/scsi/hosts.c in Linux
|
||||
hostName := hostDir.Name()
|
||||
if !strings.HasPrefix(hostName, "host") {
|
||||
continue
|
||||
}
|
||||
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
|
||||
if err != nil {
|
||||
klog.Errorf("Could not get number from iSCSI host: %s", hostName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate over the children of the iscsi_host device
|
||||
// We are looking for the associated session
|
||||
devicePath := sysPath + "/" + hostName + "/device"
|
||||
deviceDirs, err := io.ReadDir(devicePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, deviceDir := range deviceDirs {
|
||||
// Skip over files that aren't the session
|
||||
// Sessions are of the format "session%u"
|
||||
// See drivers/scsi/scsi_transport_iscsi.c in Linux
|
||||
sessionName := deviceDir.Name()
|
||||
if !strings.HasPrefix(sessionName, "session") {
|
||||
continue
|
||||
}
|
||||
|
||||
sessionPath := devicePath + "/" + sessionName
|
||||
|
||||
// Read the target name for the iSCSI session
|
||||
targetNamePath := sessionPath + "/iscsi_session/" + sessionName + "/targetname"
|
||||
targetName, err := io.ReadFile(targetNamePath)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to process session %s, assuming this session is unavailable: %s", sessionName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Ignore hosts that don't matchthe target we were looking for.
|
||||
if strings.TrimSpace(string(targetName)) != targetIqn {
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate over the children of the iSCSI session looking
|
||||
// for the iSCSI connection.
|
||||
dirs2, err := io.ReadDir(sessionPath)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to process session %s, assuming this session is unavailable: %s", sessionName, err)
|
||||
continue
|
||||
}
|
||||
for _, dir2 := range dirs2 {
|
||||
// Skip over files that aren't the connection
|
||||
// Connections are of the format "connection%d:%u"
|
||||
// See drivers/scsi/scsi_transport_iscsi.c in Linux
|
||||
dirName := dir2.Name()
|
||||
if !strings.HasPrefix(dirName, "connection") {
|
||||
continue
|
||||
}
|
||||
|
||||
connectionPath := sessionPath + "/" + dirName + "/iscsi_connection/" + dirName
|
||||
|
||||
// Read the current and persistent portal information for the connection.
|
||||
addrPath := connectionPath + "/address"
|
||||
addr, err := io.ReadFile(addrPath)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
portPath := connectionPath + "/port"
|
||||
port, err := io.ReadFile(portPath)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
persistentAddrPath := connectionPath + "/persistent_address"
|
||||
persistentAddr, err := io.ReadFile(persistentAddrPath)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
persistentPortPath := connectionPath + "/persistent_port"
|
||||
persistentPort, err := io.ReadFile(persistentPortPath)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add entries to the map for both the current and persistent portals
|
||||
// pointing to the SCSI host for those connections
|
||||
// JoinHostPort will add `[]` around IPv6 addresses.
|
||||
portal := net.JoinHostPort(strings.TrimSpace(string(addr)), strings.TrimSpace(string(port)))
|
||||
portalHostMap[portal] = hostNumber
|
||||
|
||||
persistentPortal := net.JoinHostPort(strings.TrimSpace(string(persistentAddr)), strings.TrimSpace(string(persistentPort)))
|
||||
portalHostMap[persistentPortal] = hostNumber
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return portalHostMap, nil
|
||||
}
|
||||
|
||||
// FindDevicesForISCSILun given an iqn, and lun number, find all the devices
|
||||
// corresponding to that LUN.
|
||||
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
|
||||
devices := make([]string, 0)
|
||||
io := handler.getIo
|
||||
|
||||
// Iterate over all the iSCSI hosts in sysfs
|
||||
sysPath := "/sys/class/iscsi_host"
|
||||
hostDirs, err := io.ReadDir(sysPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, hostDir := range hostDirs {
|
||||
// iSCSI hosts are always of the format "host%d"
|
||||
// See drivers/scsi/hosts.c in Linux
|
||||
hostName := hostDir.Name()
|
||||
if !strings.HasPrefix(hostName, "host") {
|
||||
continue
|
||||
}
|
||||
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
|
||||
if err != nil {
|
||||
klog.Errorf("Could not get number from iSCSI host: %s", hostName)
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate over the children of the iscsi_host device
|
||||
// We are looking for the associated session
|
||||
devicePath := sysPath + "/" + hostName + "/device"
|
||||
deviceDirs, err := io.ReadDir(devicePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, deviceDir := range deviceDirs {
|
||||
// Skip over files that aren't the session
|
||||
// Sessions are of the format "session%u"
|
||||
// See drivers/scsi/scsi_transport_iscsi.c in Linux
|
||||
sessionName := deviceDir.Name()
|
||||
if !strings.HasPrefix(sessionName, "session") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the target name for the iSCSI session
|
||||
targetNamePath := devicePath + "/" + sessionName + "/iscsi_session/" + sessionName + "/targetname"
|
||||
targetName, err := io.ReadFile(targetNamePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only if the session matches the target we were looking for,
|
||||
// add it to the map
|
||||
if strings.TrimSpace(string(targetName)) != targetIqn {
|
||||
continue
|
||||
}
|
||||
|
||||
// The list of block devices on the scsi bus will be in a
|
||||
// directory called "target%d:%d:%d".
|
||||
// See drivers/scsi/scsi_scan.c in Linux
|
||||
// We assume the channel/bus and device/controller are always zero for iSCSI
|
||||
targetPath := devicePath + "/" + sessionName + fmt.Sprintf("/target%d:0:0", hostNumber)
|
||||
|
||||
// The block device for a given lun will be "%d:%d:%d:%d" --
|
||||
// host:channel:bus:LUN
|
||||
blockDevicePath := targetPath + fmt.Sprintf("/%d:0:0:%d", hostNumber, lun)
|
||||
|
||||
// If the LUN doesn't exist on this bus, continue on
|
||||
_, err = io.Lstat(blockDevicePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the block directory, there should only be one child --
|
||||
// the block device "sd*"
|
||||
path := blockDevicePath + "/block"
|
||||
dirs, err := io.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if 0 < len(dirs) {
|
||||
devices = append(devices, dirs[0].Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
}
|
43
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_unsupported.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_unsupported.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
// FindMultipathDeviceForDevice unsupported returns ""
|
||||
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// FindSlaveDevicesOnMultipath unsupported returns ""
|
||||
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(disk string) []string {
|
||||
out := []string{}
|
||||
return out
|
||||
}
|
||||
|
||||
// GetISCSIPortalHostMapForTarget unsupported returns nil
|
||||
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
|
||||
portalHostMap := make(map[string]int)
|
||||
return portalHostMap, nil
|
||||
}
|
||||
|
||||
// FindDevicesForISCSILun unsupported returns nil
|
||||
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
|
||||
devices := []string{}
|
||||
return devices, nil
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/volume/util/doc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/volume/util/doc.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package util contains utility code for use by volume plugins.
|
||||
package util // import "k8s.io/kubernetes/pkg/volume/util"
|
28
vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
const (
|
||||
// PVCProtectionFinalizer is the name of finalizer on PVCs that have a running pod.
|
||||
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
|
||||
|
||||
// PVProtectionFinalizer is the name of finalizer on PVs that are bound by PVCs
|
||||
PVProtectionFinalizer = "kubernetes.io/pv-protection"
|
||||
|
||||
// VACProtectionFinalizer is the name of finalizer on VACs that are used by PVs or PVCs
|
||||
VACProtectionFinalizer = "kubernetes.io/vac-protection"
|
||||
)
|
51
vendor/k8s.io/kubernetes/pkg/volume/util/io_util.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/volume/util/io_util.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// IoUtil is a mockable util for common IO operations
|
||||
type IoUtil interface {
|
||||
ReadFile(filename string) ([]byte, error)
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
Lstat(name string) (os.FileInfo, error)
|
||||
EvalSymlinks(path string) (string, error)
|
||||
}
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
// NewIOHandler Create a new IoHandler implementation
|
||||
func NewIOHandler() IoUtil {
|
||||
return &osIOHandler{}
|
||||
}
|
||||
|
||||
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
|
||||
return os.ReadFile(filename)
|
||||
}
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
|
||||
return os.Lstat(name)
|
||||
}
|
||||
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
|
||||
return filepath.EvalSymlinks(path)
|
||||
}
|
161
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
161
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
const (
|
||||
statusSuccess = "success"
|
||||
statusFailUnknown = "fail-unknown"
|
||||
)
|
||||
|
||||
/*
|
||||
* By default, all the following metrics are defined as falling under
|
||||
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
|
||||
*
|
||||
* Promoting the stability level of the metric is a responsibility of the component owner, since it
|
||||
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
|
||||
* the metric stability policy.
|
||||
*/
|
||||
|
||||
var StorageOperationMetric = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Name: "storage_operation_duration_seconds",
|
||||
Help: "Storage operation duration",
|
||||
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"volume_plugin", "operation_name", "status", "migrated"},
|
||||
)
|
||||
|
||||
var storageOperationEndToEndLatencyMetric = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Name: "volume_operation_total_seconds",
|
||||
Help: "Storage operation end to end duration in seconds",
|
||||
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"},
|
||||
)
|
||||
|
||||
var csiOperationsLatencyMetric = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: "csi",
|
||||
Name: "operations_seconds",
|
||||
Help: "Container Storage Interface operation duration with gRPC error code status total",
|
||||
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"driver_name", "method_name", "grpc_status_code", "migrated"},
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMetrics()
|
||||
}
|
||||
|
||||
func registerMetrics() {
|
||||
// legacyregistry is the internal k8s wrapper around the prometheus
|
||||
// global registry, used specifically for metric stability enforcement
|
||||
legacyregistry.MustRegister(StorageOperationMetric)
|
||||
legacyregistry.MustRegister(storageOperationEndToEndLatencyMetric)
|
||||
legacyregistry.MustRegister(csiOperationsLatencyMetric)
|
||||
}
|
||||
|
||||
// OperationCompleteHook returns a hook to call when an operation is completed
|
||||
func OperationCompleteHook(plugin, operationName string) func(types.CompleteFuncParam) {
|
||||
requestTime := time.Now()
|
||||
opComplete := func(c types.CompleteFuncParam) {
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
// Create metric with operation name and plugin name
|
||||
status := statusSuccess
|
||||
if *c.Err != nil {
|
||||
// TODO: Establish well-known error codes to be able to distinguish
|
||||
// user configuration errors from system errors.
|
||||
status = statusFailUnknown
|
||||
}
|
||||
migrated := false
|
||||
if c.Migrated != nil {
|
||||
migrated = *c.Migrated
|
||||
}
|
||||
StorageOperationMetric.WithLabelValues(plugin, operationName, status, strconv.FormatBool(migrated)).Observe(timeTaken)
|
||||
}
|
||||
return opComplete
|
||||
}
|
||||
|
||||
// FSGroupCompleteHook returns a hook to call when volume recursive permission is changed
|
||||
func FSGroupCompleteHook(plugin volume.VolumePlugin, spec *volume.Spec) func(types.CompleteFuncParam) {
|
||||
return OperationCompleteHook(GetFullQualifiedPluginNameForVolume(plugin.GetPluginName(), spec), "volume_apply_access_control")
|
||||
}
|
||||
|
||||
// GetFullQualifiedPluginNameForVolume returns full qualified plugin name for
|
||||
// given volume. For CSI plugin, it appends plugin driver name at the end of
|
||||
// plugin name, e.g. kubernetes.io/csi:csi-hostpath. It helps to distinguish
|
||||
// between metrics emitted for CSI volumes which may be handled by different
|
||||
// CSI plugin drivers.
|
||||
func GetFullQualifiedPluginNameForVolume(pluginName string, spec *volume.Spec) string {
|
||||
if spec != nil {
|
||||
if spec.Volume != nil && spec.Volume.CSI != nil {
|
||||
return fmt.Sprintf("%s:%s", pluginName, spec.Volume.CSI.Driver)
|
||||
}
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil {
|
||||
return fmt.Sprintf("%s:%s", pluginName, spec.PersistentVolume.Spec.CSI.Driver)
|
||||
}
|
||||
}
|
||||
return pluginName
|
||||
}
|
||||
|
||||
// RecordOperationLatencyMetric records the end to end latency for certain operation
|
||||
// into metric volume_operation_total_seconds
|
||||
func RecordOperationLatencyMetric(plugin, operationName string, secondsTaken float64) {
|
||||
storageOperationEndToEndLatencyMetric.WithLabelValues(plugin, operationName).Observe(secondsTaken)
|
||||
}
|
||||
|
||||
// RecordCSIOperationLatencyMetrics records the CSI operation latency and grpc status
|
||||
// into metric csi_kubelet_operations_seconds
|
||||
func RecordCSIOperationLatencyMetrics(driverName string,
|
||||
operationName string,
|
||||
operationErr error,
|
||||
operationDuration time.Duration,
|
||||
migrated string) {
|
||||
csiOperationsLatencyMetric.WithLabelValues(driverName, operationName, getErrorCode(operationErr), migrated).Observe(operationDuration.Seconds())
|
||||
}
|
||||
|
||||
func getErrorCode(err error) string {
|
||||
if err == nil {
|
||||
return codes.OK.String()
|
||||
}
|
||||
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
// This is not gRPC error. The operation must have failed before gRPC
|
||||
// method was called, otherwise we would get gRPC error.
|
||||
return "unknown-non-grpc"
|
||||
}
|
||||
|
||||
return st.Code().String()
|
||||
}
|
114
vendor/k8s.io/kubernetes/pkg/volume/util/nested_volumes.go
generated
vendored
114
vendor/k8s.io/kubernetes/pkg/volume/util/nested_volumes.go
generated
vendored
@ -1,114 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
// getNestedMountpoints returns a list of mountpoint directories that should be created
|
||||
// for the volume indicated by name.
|
||||
// note: the returned list is relative to baseDir
|
||||
func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
|
||||
var retval []string
|
||||
checkContainer := func(container *v1.Container) error {
|
||||
var allMountPoints []string // all mount points in this container
|
||||
var myMountPoints []string // mount points that match name
|
||||
for _, vol := range container.VolumeMounts {
|
||||
cleaned := filepath.Clean(vol.MountPath)
|
||||
allMountPoints = append(allMountPoints, cleaned)
|
||||
if vol.Name == name {
|
||||
myMountPoints = append(myMountPoints, cleaned)
|
||||
}
|
||||
}
|
||||
sort.Strings(allMountPoints)
|
||||
parentPrefix := ".." + string(os.PathSeparator)
|
||||
// Examine each place where this volume is mounted
|
||||
for _, myMountPoint := range myMountPoints {
|
||||
if strings.HasPrefix(myMountPoint, parentPrefix) {
|
||||
// Don't let a container trick us into creating directories outside of its rootfs
|
||||
return fmt.Errorf("invalid container mount point %v", myMountPoint)
|
||||
}
|
||||
myMPSlash := myMountPoint + string(os.PathSeparator)
|
||||
// The previously found nested mountpoints.
|
||||
// NOTE: We can't simply rely on sort.Strings to have all the mountpoints sorted and
|
||||
// grouped. For example, the following strings are sorted in this exact order:
|
||||
// /dir/nested, /dir/nested-vol, /dir/nested.vol, /dir/nested/double, /dir/nested2
|
||||
// The issue is a bit worse for Windows paths, since the \'s value is higher than /'s:
|
||||
// \dir\nested, \dir\nested-vol, \dir\nested.vol, \dir\nested2, \dir\nested\double
|
||||
// Because of this, we should use a list of previously mounted mountpoints, rather than only one.
|
||||
prevNestedMPs := []string{}
|
||||
// examine each mount point to see if it's nested beneath this volume
|
||||
// (but skip any that are double-nested beneath this volume)
|
||||
// For example, if this volume is mounted as /dir and other volumes are mounted
|
||||
// as /dir/nested and /dir/nested/other, only create /dir/nested.
|
||||
for _, mp := range allMountPoints {
|
||||
if !strings.HasPrefix(mp, myMPSlash) {
|
||||
continue // skip -- not nested beneath myMountPoint
|
||||
}
|
||||
|
||||
isNested := false
|
||||
for _, prevNestedMP := range prevNestedMPs {
|
||||
if strings.HasPrefix(mp, prevNestedMP) {
|
||||
isNested = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isNested {
|
||||
continue // skip -- double nested beneath myMountPoint
|
||||
}
|
||||
// since this mount point is nested, remember it so that we can check that following ones aren't nested beneath this one
|
||||
prevNestedMPs = append(prevNestedMPs, mp+string(os.PathSeparator))
|
||||
retval = append(retval, mp[len(myMPSlash):])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var retErr error
|
||||
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
|
||||
retErr = checkContainer(c)
|
||||
return retErr == nil
|
||||
})
|
||||
if retErr != nil {
|
||||
return nil, retErr
|
||||
}
|
||||
|
||||
return retval, nil
|
||||
}
|
||||
|
||||
// MakeNestedMountpoints creates mount points in baseDir for volumes mounted beneath name
|
||||
func MakeNestedMountpoints(name, baseDir string, pod v1.Pod) error {
|
||||
dirs, err := getNestedMountpoints(name, baseDir, pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
err := os.MkdirAll(filepath.Join(baseDir, dir), 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create nested volume mountpoints: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
441
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
441
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
@ -1,441 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
var (
|
||||
knownResizeConditions = map[v1.PersistentVolumeClaimConditionType]bool{
|
||||
v1.PersistentVolumeClaimFileSystemResizePending: true,
|
||||
v1.PersistentVolumeClaimResizing: true,
|
||||
v1.PersistentVolumeClaimControllerResizeError: true,
|
||||
v1.PersistentVolumeClaimNodeResizeError: true,
|
||||
}
|
||||
|
||||
// AnnPreResizeCapacity annotation is added to a PV when expanding volume.
|
||||
// Its value is status capacity of the PVC prior to the volume expansion
|
||||
// Its value will be set by the external-resizer when it deems that filesystem resize is required after resizing volume.
|
||||
// Its value will be used by pv_controller to determine pvc's status capacity when binding pvc and pv.
|
||||
AnnPreResizeCapacity = "volume.alpha.kubernetes.io/pre-resize-capacity"
|
||||
)
|
||||
|
||||
type resizeProcessStatus struct {
|
||||
condition v1.PersistentVolumeClaimCondition
|
||||
processed bool
|
||||
}
|
||||
|
||||
// UpdatePVSize updates just pv size after cloudprovider resizing is successful
|
||||
func UpdatePVSize(
|
||||
pv *v1.PersistentVolume,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolume, error) {
|
||||
pvClone := pv.DeepCopy()
|
||||
pvClone.Spec.Capacity[v1.ResourceStorage] = newSize
|
||||
|
||||
return PatchPV(pv, pvClone, kubeClient)
|
||||
}
|
||||
|
||||
// AddAnnPreResizeCapacity adds volume.alpha.kubernetes.io/pre-resize-capacity from the pv
|
||||
func AddAnnPreResizeCapacity(
|
||||
pv *v1.PersistentVolume,
|
||||
oldCapacity resource.Quantity,
|
||||
kubeClient clientset.Interface) error {
|
||||
// if the pv already has a resize annotation skip the process
|
||||
if metav1.HasAnnotation(pv.ObjectMeta, AnnPreResizeCapacity) {
|
||||
return nil
|
||||
}
|
||||
|
||||
pvClone := pv.DeepCopy()
|
||||
if pvClone.ObjectMeta.Annotations == nil {
|
||||
pvClone.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
pvClone.ObjectMeta.Annotations[AnnPreResizeCapacity] = oldCapacity.String()
|
||||
|
||||
_, err := PatchPV(pv, pvClone, kubeClient)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAnnPreResizeCapacity deletes volume.alpha.kubernetes.io/pre-resize-capacity from the pv
|
||||
func DeleteAnnPreResizeCapacity(
|
||||
pv *v1.PersistentVolume,
|
||||
kubeClient clientset.Interface) error {
|
||||
// if the pv does not have a resize annotation skip the entire process
|
||||
if !metav1.HasAnnotation(pv.ObjectMeta, AnnPreResizeCapacity) {
|
||||
return nil
|
||||
}
|
||||
pvClone := pv.DeepCopy()
|
||||
delete(pvClone.ObjectMeta.Annotations, AnnPreResizeCapacity)
|
||||
_, err := PatchPV(pv, pvClone, kubeClient)
|
||||
return err
|
||||
}
|
||||
|
||||
// PatchPV creates and executes a patch for pv
|
||||
func PatchPV(
|
||||
oldPV *v1.PersistentVolume,
|
||||
newPV *v1.PersistentVolume,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolume, error) {
|
||||
oldData, err := json.Marshal(oldPV)
|
||||
if err != nil {
|
||||
return oldPV, fmt.Errorf("unexpected error marshaling old PV %q with error : %v", oldPV.Name, err)
|
||||
}
|
||||
|
||||
newData, err := json.Marshal(newPV)
|
||||
if err != nil {
|
||||
return oldPV, fmt.Errorf("unexpected error marshaling new PV %q with error : %v", newPV.Name, err)
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPV)
|
||||
if err != nil {
|
||||
return oldPV, fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", oldPV.Name, err)
|
||||
}
|
||||
|
||||
updatedPV, err := kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), oldPV.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return oldPV, fmt.Errorf("error Patching PV %q with error : %v", oldPV.Name, err)
|
||||
}
|
||||
return updatedPV, nil
|
||||
}
|
||||
|
||||
// MarkResizeInProgressWithResizer marks cloudprovider resizing as in progress
|
||||
// and also annotates the PVC with the name of the resizer.
|
||||
func MarkResizeInProgressWithResizer(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
resizerName string,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
// Mark PVC as Resize Started
|
||||
progressCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimResizing,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
}
|
||||
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
|
||||
newPVC = setResizer(newPVC, resizerName)
|
||||
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
}
|
||||
|
||||
func MarkControllerReisizeInProgress(pvc *v1.PersistentVolumeClaim, resizerName string, newSize resource.Quantity, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
// Mark PVC as Resize Started
|
||||
progressCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimResizing,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
}
|
||||
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
|
||||
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimControllerResizeInProgress)
|
||||
newPVC = mergeStorageAllocatedResources(newPVC, newSize)
|
||||
newPVC = setResizer(newPVC, resizerName)
|
||||
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
}
|
||||
|
||||
// SetClaimResizer sets resizer annotation on PVC
|
||||
func SetClaimResizer(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
resizerName string,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = setResizer(newPVC, resizerName)
|
||||
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
}
|
||||
|
||||
func setResizer(pvc *v1.PersistentVolumeClaim, resizerName string) *v1.PersistentVolumeClaim {
|
||||
if val, ok := pvc.Annotations[volumetypes.VolumeResizerKey]; ok && val == resizerName {
|
||||
return pvc
|
||||
}
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volumetypes.VolumeResizerKey, resizerName)
|
||||
return pvc
|
||||
}
|
||||
|
||||
// MarkForFSResize marks file system resizing as pending
|
||||
func MarkForFSResize(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
pvcCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimFileSystemResizePending,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Message: "Waiting for user to (re-)start a pod to finish file system resize of volume on node.",
|
||||
}
|
||||
conditions := []v1.PersistentVolumeClaimCondition{pvcCondition}
|
||||
newPVC := pvc.DeepCopy()
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
|
||||
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizePending)
|
||||
}
|
||||
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, true /* keepOldResizeConditions */)
|
||||
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return updatedPVC, err
|
||||
}
|
||||
|
||||
// MarkResizeFinished marks all resizing as done
|
||||
func MarkResizeFinished(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
return MarkFSResizeFinished(pvc, newSize, kubeClient)
|
||||
}
|
||||
|
||||
// MarkFSResizeFinished marks file system resizing as done
|
||||
func MarkFSResizeFinished(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
newPVC := pvc.DeepCopy()
|
||||
|
||||
newPVC.Status.Capacity[v1.ResourceStorage] = newSize
|
||||
|
||||
// if RecoverVolumeExpansionFailure is enabled, we need to reset ResizeStatus back to nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
|
||||
allocatedResourceStatusMap := newPVC.Status.AllocatedResourceStatuses
|
||||
delete(allocatedResourceStatusMap, v1.ResourceStorage)
|
||||
if len(allocatedResourceStatusMap) == 0 {
|
||||
newPVC.Status.AllocatedResourceStatuses = nil
|
||||
} else {
|
||||
newPVC.Status.AllocatedResourceStatuses = allocatedResourceStatusMap
|
||||
}
|
||||
}
|
||||
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{}, false /* keepOldResizeConditions */)
|
||||
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return updatedPVC, err
|
||||
}
|
||||
|
||||
// MarkNodeExpansionInfeasible marks a PVC for node expansion as failed. Kubelet should not retry expansion
|
||||
// of volumes which are in failed state.
|
||||
func MarkNodeExpansionInfeasible(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeInfeasible)
|
||||
errorCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimNodeResizeError,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Message: fmt.Sprintf("failed to expand pvc with %v", err),
|
||||
}
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC,
|
||||
[]v1.PersistentVolumeClaimCondition{errorCondition},
|
||||
true /* keepOldResizeConditions */)
|
||||
|
||||
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
|
||||
if err != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
|
||||
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
if updateErr != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
func MarkNodeExpansionFailedCondition(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
|
||||
newPVC := pvc.DeepCopy()
|
||||
errorCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimNodeResizeError,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Message: fmt.Sprintf("failed to expand pvc with %v", err),
|
||||
}
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC,
|
||||
[]v1.PersistentVolumeClaimCondition{errorCondition},
|
||||
true /* keepOldResizeConditions */)
|
||||
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
|
||||
if err != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
|
||||
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
if updateErr != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
// MarkNodeExpansionInProgress marks pvc expansion in progress on node
|
||||
func MarkNodeExpansionInProgress(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeInProgress)
|
||||
updatedPVC, err := PatchPVCStatus(pvc /* oldPVC */, newPVC, kubeClient)
|
||||
return updatedPVC, err
|
||||
}
|
||||
|
||||
// PatchPVCStatus updates PVC status using PATCH verb
|
||||
// Don't use Update because this can be called from kubelet and if kubelet has an older client its
|
||||
// Updates will overwrite new fields. And to avoid writing to a stale object, add ResourceVersion
|
||||
// to the patch so that Patch will fail if the patch's RV != actual up-to-date RV like Update would
|
||||
func PatchPVCStatus(
|
||||
oldPVC *v1.PersistentVolumeClaim,
|
||||
newPVC *v1.PersistentVolumeClaim,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
patchBytes, err := createPVCPatch(oldPVC, newPVC, true /* addResourceVersionCheck */)
|
||||
if err != nil {
|
||||
return oldPVC, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
|
||||
Patch(context.TODO(), oldPVC.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
if updateErr != nil {
|
||||
return oldPVC, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
func createPVCPatch(
|
||||
oldPVC *v1.PersistentVolumeClaim,
|
||||
newPVC *v1.PersistentVolumeClaim, addResourceVersionCheck bool) ([]byte, error) {
|
||||
oldData, err := json.Marshal(oldPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal old data: %v", err)
|
||||
}
|
||||
|
||||
newData, err := json.Marshal(newPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal new data: %v", err)
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create 2 way merge patch: %v", err)
|
||||
}
|
||||
|
||||
if addResourceVersionCheck {
|
||||
patchBytes, err = addResourceVersion(patchBytes, oldPVC.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add resource version: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return patchBytes, nil
|
||||
}
|
||||
|
||||
func addResourceVersion(patchBytes []byte, resourceVersion string) ([]byte, error) {
|
||||
var patchMap map[string]interface{}
|
||||
err := json.Unmarshal(patchBytes, &patchMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling patch: %v", err)
|
||||
}
|
||||
u := unstructured.Unstructured{Object: patchMap}
|
||||
a, err := meta.Accessor(&u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating accessor: %v", err)
|
||||
}
|
||||
a.SetResourceVersion(resourceVersion)
|
||||
versionBytes, err := json.Marshal(patchMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling json patch: %v", err)
|
||||
}
|
||||
return versionBytes, nil
|
||||
}
|
||||
|
||||
// MergeResizeConditionOnPVC updates pvc with requested resize conditions
|
||||
// leaving other conditions untouched.
|
||||
func MergeResizeConditionOnPVC(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
resizeConditions []v1.PersistentVolumeClaimCondition, keepOldResizeConditions bool) *v1.PersistentVolumeClaim {
|
||||
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
|
||||
|
||||
for _, condition := range resizeConditions {
|
||||
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
|
||||
}
|
||||
|
||||
oldConditions := pvc.Status.Conditions
|
||||
newConditions := []v1.PersistentVolumeClaimCondition{}
|
||||
for _, condition := range oldConditions {
|
||||
// If Condition is of not resize type, we keep it.
|
||||
if _, ok := knownResizeConditions[condition.Type]; !ok {
|
||||
newConditions = append(newConditions, condition)
|
||||
continue
|
||||
}
|
||||
|
||||
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
|
||||
if newCondition.condition.Status != condition.Status {
|
||||
newConditions = append(newConditions, newCondition.condition)
|
||||
} else {
|
||||
newConditions = append(newConditions, condition)
|
||||
}
|
||||
newCondition.processed = true
|
||||
} else if keepOldResizeConditions {
|
||||
// if keepOldResizeConditions is true, we keep the old resize conditions that were present in the
|
||||
// existing pvc.Status.Conditions field.
|
||||
newConditions = append(newConditions, condition)
|
||||
}
|
||||
}
|
||||
|
||||
// append all unprocessed conditions
|
||||
for _, newCondition := range resizeConditionMap {
|
||||
if !newCondition.processed {
|
||||
newConditions = append(newConditions, newCondition.condition)
|
||||
}
|
||||
}
|
||||
pvc.Status.Conditions = newConditions
|
||||
return pvc
|
||||
}
|
||||
|
||||
func mergeStorageResourceStatus(pvc *v1.PersistentVolumeClaim, status v1.ClaimResourceStatus) *v1.PersistentVolumeClaim {
|
||||
allocatedResourceStatusMap := pvc.Status.AllocatedResourceStatuses
|
||||
if allocatedResourceStatusMap == nil {
|
||||
pvc.Status.AllocatedResourceStatuses = map[v1.ResourceName]v1.ClaimResourceStatus{
|
||||
v1.ResourceStorage: status,
|
||||
}
|
||||
return pvc
|
||||
}
|
||||
allocatedResourceStatusMap[v1.ResourceStorage] = status
|
||||
pvc.Status.AllocatedResourceStatuses = allocatedResourceStatusMap
|
||||
return pvc
|
||||
}
|
||||
|
||||
func mergeStorageAllocatedResources(pvc *v1.PersistentVolumeClaim, size resource.Quantity) *v1.PersistentVolumeClaim {
|
||||
allocatedResourcesMap := pvc.Status.AllocatedResources
|
||||
if allocatedResourcesMap == nil {
|
||||
pvc.Status.AllocatedResources = map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceStorage: size,
|
||||
}
|
||||
return pvc
|
||||
}
|
||||
allocatedResourcesMap[v1.ResourceStorage] = size
|
||||
pvc.Status.AllocatedResources = allocatedResourcesMap
|
||||
return pvc
|
||||
}
|
||||
|
||||
// GenericResizeFS : call generic filesystem resizer for plugins that don't have any special filesystem resize requirements
|
||||
func GenericResizeFS(host volume.VolumeHost, pluginName, devicePath, deviceMountPath string) (bool, error) {
|
||||
resizer := mount.NewResizeFs(host.GetExec(pluginName))
|
||||
return resizer.Resize(devicePath, deviceMountPath)
|
||||
}
|
313
vendor/k8s.io/kubernetes/pkg/volume/util/selinux.go
generated
vendored
313
vendor/k8s.io/kubernetes/pkg/volume/util/selinux.go
generated
vendored
@ -1,313 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// SELinuxLabelTranslator translates v1.SELinuxOptions of a process to SELinux file label.
|
||||
type SELinuxLabelTranslator interface {
|
||||
// SELinuxOptionsToFileLabel returns SELinux file label for given SELinuxOptions
|
||||
// of a container process.
|
||||
// When Role, User or Type are empty, they're read from the system defaults.
|
||||
// It returns "" and no error on platforms that do not have SELinux enabled
|
||||
// or don't support SELinux at all.
|
||||
SELinuxOptionsToFileLabel(opts *v1.SELinuxOptions) (string, error)
|
||||
|
||||
// SELinuxEnabled returns true when the OS has enabled SELinux support.
|
||||
SELinuxEnabled() bool
|
||||
}
|
||||
|
||||
// Real implementation of the interface.
|
||||
// On Linux with SELinux enabled it translates. Otherwise it always returns an empty string and no error.
|
||||
type translator struct{}
|
||||
|
||||
var _ SELinuxLabelTranslator = &translator{}
|
||||
|
||||
// NewSELinuxLabelTranslator returns new SELinuxLabelTranslator for the platform.
|
||||
func NewSELinuxLabelTranslator() SELinuxLabelTranslator {
|
||||
return &translator{}
|
||||
}
|
||||
|
||||
// SELinuxOptionsToFileLabel returns SELinux file label for given SELinuxOptions
|
||||
// of a container process.
|
||||
// When Role, User or Type are empty, they're read from the system defaults.
|
||||
// It returns "" and no error on platforms that do not have SELinux enabled
|
||||
// or don't support SELinux at all.
|
||||
func (l *translator) SELinuxOptionsToFileLabel(opts *v1.SELinuxOptions) (string, error) {
|
||||
if opts == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
args := contextOptions(opts)
|
||||
if len(args) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
processLabel, fileLabel, err := label.InitLabels(args)
|
||||
if err != nil {
|
||||
// In theory, this should be unreachable. InitLabels can fail only when args contain an unknown option,
|
||||
// and all options returned by contextOptions are known.
|
||||
return "", &SELinuxLabelTranslationError{msg: err.Error()}
|
||||
}
|
||||
// InitLabels() may allocate a new unique SELinux label in kubelet memory. The label is *not* allocated
|
||||
// in the container runtime. Clear it to avoid memory problems.
|
||||
// ReleaseLabel on non-allocated label is NOOP.
|
||||
selinux.ReleaseLabel(processLabel)
|
||||
|
||||
return fileLabel, nil
|
||||
}
|
||||
|
||||
// Convert SELinuxOptions to []string accepted by label.InitLabels
|
||||
func contextOptions(opts *v1.SELinuxOptions) []string {
|
||||
if opts == nil {
|
||||
return nil
|
||||
}
|
||||
args := make([]string, 0, 3)
|
||||
if opts.User != "" {
|
||||
args = append(args, "user:"+opts.User)
|
||||
}
|
||||
if opts.Role != "" {
|
||||
args = append(args, "role:"+opts.Role)
|
||||
}
|
||||
if opts.Type != "" {
|
||||
args = append(args, "type:"+opts.Type)
|
||||
}
|
||||
if opts.Level != "" {
|
||||
args = append(args, "level:"+opts.Level)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func (l *translator) SELinuxEnabled() bool {
|
||||
return selinux.GetEnabled()
|
||||
}
|
||||
|
||||
// Fake implementation of the interface for unit tests.
|
||||
type fakeTranslator struct{}
|
||||
|
||||
var _ SELinuxLabelTranslator = &fakeTranslator{}
|
||||
|
||||
// NewFakeSELinuxLabelTranslator returns a fake translator for unit tests.
|
||||
// It imitates a real translator on platforms that do not have SELinux enabled
|
||||
// or don't support SELinux at all.
|
||||
func NewFakeSELinuxLabelTranslator() SELinuxLabelTranslator {
|
||||
return &fakeTranslator{}
|
||||
}
|
||||
|
||||
// SELinuxOptionsToFileLabel returns SELinux file label for given options.
|
||||
func (l *fakeTranslator) SELinuxOptionsToFileLabel(opts *v1.SELinuxOptions) (string, error) {
|
||||
if opts == nil {
|
||||
return "", nil
|
||||
}
|
||||
// Fill empty values from "system defaults" (taken from Fedora Linux).
|
||||
user := opts.User
|
||||
if user == "" {
|
||||
user = "system_u"
|
||||
}
|
||||
|
||||
role := opts.Role
|
||||
if role == "" {
|
||||
role = "object_r"
|
||||
}
|
||||
|
||||
// opts is context of the *process* to run in a container. Translate
|
||||
// process type "container_t" to file label type "container_file_t".
|
||||
// (The rest of the context is the same for processes and files).
|
||||
fileType := opts.Type
|
||||
if fileType == "" || fileType == "container_t" {
|
||||
fileType = "container_file_t"
|
||||
}
|
||||
|
||||
level := opts.Level
|
||||
if level == "" {
|
||||
// If empty, level is allocated randomly.
|
||||
level = "s0:c998,c999"
|
||||
}
|
||||
|
||||
ctx := fmt.Sprintf("%s:%s:%s:%s", user, role, fileType, level)
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (l *fakeTranslator) SELinuxEnabled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type SELinuxLabelTranslationError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SELinuxLabelTranslationError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
func IsSELinuxLabelTranslationError(err error) bool {
|
||||
var seLinuxError *SELinuxLabelTranslationError
|
||||
return errors.As(err, &seLinuxError)
|
||||
}
|
||||
|
||||
// SupportsSELinuxContextMount checks if the given volumeSpec supports with mount -o context
|
||||
func SupportsSELinuxContextMount(volumeSpec *volume.Spec, volumePluginMgr *volume.VolumePluginMgr) (bool, error) {
|
||||
plugin, _ := volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||
if plugin != nil {
|
||||
return plugin.SupportsSELinuxContextMount(volumeSpec)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// VolumeSupportsSELinuxMount returns true if given volume access mode can support mount with SELinux mount options.
|
||||
func VolumeSupportsSELinuxMount(volumeSpec *volume.Spec) bool {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||
return false
|
||||
}
|
||||
if volumeSpec.PersistentVolume == nil {
|
||||
return false
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMount) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Only SELinuxMountReadWriteOncePod feature is enabled
|
||||
if len(volumeSpec.PersistentVolume.Spec.AccessModes) != 1 {
|
||||
// RWOP volumes must be the only access mode of the volume
|
||||
return false
|
||||
}
|
||||
if !v1helper.ContainsAccessMode(volumeSpec.PersistentVolume.Spec.AccessModes, v1.ReadWriteOncePod) {
|
||||
// Not a RWOP volume
|
||||
return false
|
||||
}
|
||||
// RWOP volume
|
||||
return true
|
||||
}
|
||||
|
||||
// MultipleSELinuxLabelsError tells that one volume in a pod is mounted in multiple containers and each has a different SELinux label.
|
||||
type MultipleSELinuxLabelsError struct {
|
||||
labels []string
|
||||
}
|
||||
|
||||
func (e *MultipleSELinuxLabelsError) Error() string {
|
||||
return fmt.Sprintf("multiple SELinux labels found: %s", strings.Join(e.labels, ","))
|
||||
}
|
||||
|
||||
func (e *MultipleSELinuxLabelsError) Labels() []string {
|
||||
return e.labels
|
||||
}
|
||||
|
||||
func IsMultipleSELinuxLabelsError(err error) bool {
|
||||
var multiError *MultipleSELinuxLabelsError
|
||||
return errors.As(err, &multiError)
|
||||
}
|
||||
|
||||
// AddSELinuxMountOption adds -o context="XYZ" mount option to a given list
|
||||
func AddSELinuxMountOption(options []string, seLinuxContext string) []string {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||
return options
|
||||
}
|
||||
// Use double quotes to support a comma "," in the SELinux context string.
|
||||
// For example: dirsync,context="system_u:object_r:container_file_t:s0:c15,c25",noatime
|
||||
return append(options, fmt.Sprintf("context=%q", seLinuxContext))
|
||||
}
|
||||
|
||||
// SELinuxLabelInfo contains information about SELinux labels that should be used to mount a volume for a Pod.
|
||||
type SELinuxLabelInfo struct {
|
||||
// SELinuxMountLabel is the SELinux label that should be used to mount the volume.
|
||||
// The volume plugin supports SELinuxMount and the Pod did not opt out via SELinuxChangePolicy.
|
||||
// Empty string otherwise.
|
||||
SELinuxMountLabel string
|
||||
// SELinuxProcessLabel is the SELinux label that will the container runtime use for the Pod.
|
||||
// Regardless if the volume plugin supports SELinuxMount or the Pod opted out via SELinuxChangePolicy.
|
||||
SELinuxProcessLabel string
|
||||
// PluginSupportsSELinuxContextMount is true if the volume plugin supports SELinux mount.
|
||||
PluginSupportsSELinuxContextMount bool
|
||||
}
|
||||
|
||||
// GetMountSELinuxLabel returns SELinux labels that should be used to mount the given volume volumeSpec and podSecurityContext.
|
||||
// It expects effectiveSELinuxContainerLabels as returned by volumeutil.GetPodVolumeNames, i.e. with all SELinuxOptions
|
||||
// from all containers that use the volume in the pod, potentially expanded with PodSecurityContext.SELinuxOptions,
|
||||
// if container's SELinuxOptions are nil.
|
||||
// It does not evaluate the volume access mode! It's up to the caller to check SELinuxMount feature gate,
|
||||
// it may need to bump different metrics based on feature gates / access modes / label anyway.
|
||||
func GetMountSELinuxLabel(volumeSpec *volume.Spec, effectiveSELinuxContainerLabels []*v1.SELinuxOptions, podSecurityContext *v1.PodSecurityContext, volumePluginMgr *volume.VolumePluginMgr, seLinuxTranslator SELinuxLabelTranslator) (SELinuxLabelInfo, error) {
|
||||
info := SELinuxLabelInfo{}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
if !seLinuxTranslator.SELinuxEnabled() {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
pluginSupportsSELinuxContextMount, err := SupportsSELinuxContextMount(volumeSpec, volumePluginMgr)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
info.PluginSupportsSELinuxContextMount = pluginSupportsSELinuxContextMount
|
||||
|
||||
// Collect all SELinux options from all containers that use this volume.
|
||||
// A set will squash any duplicities.
|
||||
labels := sets.New[string]()
|
||||
for _, containerLabel := range effectiveSELinuxContainerLabels {
|
||||
lbl, err := seLinuxTranslator.SELinuxOptionsToFileLabel(containerLabel)
|
||||
if err != nil {
|
||||
fullErr := fmt.Errorf("failed to construct SELinux label from context %q: %w", containerLabel, err)
|
||||
return info, fullErr
|
||||
}
|
||||
labels.Insert(lbl)
|
||||
}
|
||||
|
||||
// Ensure that all containers use the same SELinux label.
|
||||
if labels.Len() > 1 {
|
||||
// This volume is used with more than one SELinux label in the pod.
|
||||
return info, &MultipleSELinuxLabelsError{labels: labels.UnsortedList()}
|
||||
}
|
||||
if labels.Len() == 0 {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
lbl, _ := labels.PopAny()
|
||||
info.SELinuxProcessLabel = lbl
|
||||
info.SELinuxMountLabel = lbl
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxChangePolicy) &&
|
||||
podSecurityContext != nil &&
|
||||
podSecurityContext.SELinuxChangePolicy != nil &&
|
||||
*podSecurityContext.SELinuxChangePolicy == v1.SELinuxChangePolicyRecursive {
|
||||
// The pod has opted into recursive SELinux label changes. Do not mount with -o context.
|
||||
info.SELinuxMountLabel = ""
|
||||
}
|
||||
|
||||
if !pluginSupportsSELinuxContextMount {
|
||||
// The volume plugin does not support SELinux mount. Do not mount with -o context.
|
||||
info.SELinuxMountLabel = ""
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
85
vendor/k8s.io/kubernetes/pkg/volume/util/storageclass.go
generated
vendored
85
vendor/k8s.io/kubernetes/pkg/volume/util/storageclass.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
storagev1listers "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// isDefaultStorageClassAnnotation represents a StorageClass annotation that
|
||||
// marks a class as the default StorageClass
|
||||
IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
|
||||
|
||||
// betaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation.
|
||||
// TODO: remove Beta when no longer used
|
||||
BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
|
||||
)
|
||||
|
||||
// GetDefaultClass returns the default StorageClass from the store, or nil.
|
||||
func GetDefaultClass(lister storagev1listers.StorageClassLister) (*storagev1.StorageClass, error) {
|
||||
list, err := lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defaultClasses := []*storagev1.StorageClass{}
|
||||
for _, class := range list {
|
||||
if IsDefaultAnnotation(class.ObjectMeta) {
|
||||
defaultClasses = append(defaultClasses, class)
|
||||
klog.V(4).Infof("GetDefaultClass added: %s", class.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(defaultClasses) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Primary sort by creation timestamp, newest first
|
||||
// Secondary sort by class name, ascending order
|
||||
sort.Slice(defaultClasses, func(i, j int) bool {
|
||||
if defaultClasses[i].CreationTimestamp.UnixNano() == defaultClasses[j].CreationTimestamp.UnixNano() {
|
||||
return defaultClasses[i].Name < defaultClasses[j].Name
|
||||
}
|
||||
return defaultClasses[i].CreationTimestamp.UnixNano() > defaultClasses[j].CreationTimestamp.UnixNano()
|
||||
})
|
||||
if len(defaultClasses) > 1 {
|
||||
klog.V(4).Infof("%d default StorageClasses were found, choosing: %s", len(defaultClasses), defaultClasses[0].Name)
|
||||
}
|
||||
|
||||
return defaultClasses[0], nil
|
||||
}
|
||||
|
||||
// IsDefaultAnnotation returns a boolean if the default storage class
|
||||
// annotation is set
|
||||
// TODO: remove Beta when no longer needed
|
||||
func IsDefaultAnnotation(obj metav1.ObjectMeta) bool {
|
||||
if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" {
|
||||
return true
|
||||
}
|
||||
if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
691
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
691
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
@ -1,691 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
utypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
"k8s.io/mount-utils"
|
||||
"k8s.io/utils/io"
|
||||
utilstrings "k8s.io/utils/strings"
|
||||
)
|
||||
|
||||
const (
|
||||
readyFileName = "ready"
|
||||
|
||||
// ControllerManagedAttachAnnotation is the key of the annotation on Node
|
||||
// objects that indicates attach/detach operations for the node should be
|
||||
// managed by the attach/detach controller
|
||||
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
||||
|
||||
// MountsInGlobalPDPath is name of the directory appended to a volume plugin
|
||||
// name to create the place for volume mounts in the global PD path.
|
||||
MountsInGlobalPDPath = "mounts"
|
||||
|
||||
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
|
||||
// object that specifies a supplemental GID.
|
||||
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
|
||||
|
||||
// VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume
|
||||
// object created dynamically
|
||||
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
|
||||
|
||||
// kubernetesPluginPathPrefix is the prefix of kubernetes plugin mount paths.
|
||||
kubernetesPluginPathPrefix = "/plugins/kubernetes.io/"
|
||||
)
|
||||
|
||||
// IsReady checks for the existence of a regular file
|
||||
// called 'ready' in the given directory and returns
|
||||
// true if that file exists.
|
||||
func IsReady(dir string) bool {
|
||||
readyFile := filepath.Join(dir, readyFileName)
|
||||
s, err := os.Stat(readyFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !s.Mode().IsRegular() {
|
||||
klog.Errorf("ready-file is not a file: %s", readyFile)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// SetReady creates a file called 'ready' in the given
|
||||
// directory. It logs an error if the file cannot be
|
||||
// created.
|
||||
func SetReady(dir string) {
|
||||
if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) {
|
||||
klog.Errorf("Can't mkdir %s: %v", dir, err)
|
||||
return
|
||||
}
|
||||
|
||||
readyFile := filepath.Join(dir, readyFileName)
|
||||
file, err := os.Create(readyFile)
|
||||
if err != nil {
|
||||
klog.Errorf("Can't touch %s: %v", readyFile, err)
|
||||
return
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
|
||||
func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeClient clientset.Interface) (map[string]string, error) {
|
||||
secret := make(map[string]string)
|
||||
if kubeClient == nil {
|
||||
return secret, fmt.Errorf("cannot get kube client")
|
||||
}
|
||||
secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
if secrets.Type != v1.SecretType(volumePluginName) {
|
||||
return secret, fmt.Errorf("cannot get secret of type %s", volumePluginName)
|
||||
}
|
||||
for name, data := range secrets.Data {
|
||||
secret[name] = string(data)
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// LoadPodFromFile will read, decode, and return a Pod from a file.
|
||||
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
|
||||
if filePath == "" {
|
||||
return nil, fmt.Errorf("file path not specified")
|
||||
}
|
||||
podDef, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read file path %s: %+v", filePath, err)
|
||||
}
|
||||
if len(podDef) == 0 {
|
||||
return nil, fmt.Errorf("file was empty: %s", filePath)
|
||||
}
|
||||
pod := &v1.Pod{}
|
||||
|
||||
codec := legacyscheme.Codecs.UniversalDecoder()
|
||||
if err := apiruntime.DecodeInto(codec, podDef, pod); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding file: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
|
||||
// recycle operation. The calculation and return value is either the
|
||||
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
|
||||
// greater.
|
||||
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
|
||||
giQty := resource.MustParse("1Gi")
|
||||
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
|
||||
giSize := giQty.Value()
|
||||
pvSize := pvQty.Value()
|
||||
timeout := (pvSize / giSize) * int64(timeoutIncrement)
|
||||
if timeout < int64(minimumTimeout) {
|
||||
return int64(minimumTimeout)
|
||||
}
|
||||
return timeout
|
||||
}
|
||||
|
||||
// GetPath checks if the path from the mounter is empty.
|
||||
func GetPath(mounter volume.Mounter) (string, error) {
|
||||
path := mounter.GetPath()
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("path is empty %s", reflect.TypeOf(mounter).String())
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
|
||||
// to empty_dir
|
||||
func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error {
|
||||
klog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrapped.TearDownAt(dir)
|
||||
}
|
||||
|
||||
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
|
||||
func MountOptionFromSpec(spec *volume.Spec, options ...string) []string {
|
||||
pv := spec.PersistentVolume
|
||||
|
||||
if pv != nil {
|
||||
// Use beta annotation first
|
||||
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
|
||||
moList := strings.Split(mo, ",")
|
||||
return JoinMountOptions(moList, options)
|
||||
}
|
||||
|
||||
if len(pv.Spec.MountOptions) > 0 {
|
||||
return JoinMountOptions(pv.Spec.MountOptions, options)
|
||||
}
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// JoinMountOptions joins mount options eliminating duplicates
|
||||
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
|
||||
allMountOptions := sets.New[string]()
|
||||
|
||||
for _, mountOption := range userOptions {
|
||||
if len(mountOption) > 0 {
|
||||
allMountOptions.Insert(mountOption)
|
||||
}
|
||||
}
|
||||
|
||||
for _, mountOption := range systemOptions {
|
||||
allMountOptions.Insert(mountOption)
|
||||
}
|
||||
return sets.List(allMountOptions)
|
||||
}
|
||||
|
||||
// ContainsAccessMode returns whether the requested mode is contained by modes
|
||||
func ContainsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ContainsAllAccessModes returns whether all of the requested modes are contained by modes
|
||||
func ContainsAllAccessModes(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
|
||||
for _, mode := range requestedModes {
|
||||
if !ContainsAccessMode(indexedModes, mode) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetWindowsPath get a windows path
|
||||
func GetWindowsPath(path string) string {
|
||||
windowsPath := strings.Replace(path, "/", "\\", -1)
|
||||
if strings.HasPrefix(windowsPath, "\\") {
|
||||
windowsPath = "c:" + windowsPath
|
||||
}
|
||||
return windowsPath
|
||||
}
|
||||
|
||||
// GetUniquePodName returns a unique identifier to reference a pod by
|
||||
func GetUniquePodName(pod *v1.Pod) types.UniquePodName {
|
||||
return types.UniquePodName(pod.UID)
|
||||
}
|
||||
|
||||
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
|
||||
// Caller should ensure that volumeName is a name/ID uniquely identifying the
|
||||
// actual backing device, directory, path, etc. for a particular volume.
|
||||
// The returned name can be used to uniquely reference the volume, for example,
|
||||
// to prevent operations (attach/detach or mount/unmount) from being triggered
|
||||
// on the same volume.
|
||||
func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
|
||||
}
|
||||
|
||||
// GetUniqueVolumeNameFromSpecWithPod returns a unique volume name with pod
|
||||
// name included. This is useful to generate different names for different pods
|
||||
// on same volume.
|
||||
func GetUniqueVolumeNameFromSpecWithPod(
|
||||
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
|
||||
return v1.UniqueVolumeName(
|
||||
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
|
||||
}
|
||||
|
||||
// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique
|
||||
// name representing the volume defined in the specified volume spec.
|
||||
// This returned name can be used to uniquely reference the actual backing
|
||||
// device, directory, path, etc. referenced by the given volumeSpec.
|
||||
// If the given plugin does not support the volume spec, this returns an error.
|
||||
func GetUniqueVolumeNameFromSpec(
|
||||
volumePlugin volume.VolumePlugin,
|
||||
volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) {
|
||||
if volumePlugin == nil {
|
||||
return "", fmt.Errorf(
|
||||
"volumePlugin should not be nil. volumeSpec.Name=%q",
|
||||
volumeSpec.Name())
|
||||
}
|
||||
|
||||
volumeName, err := volumePlugin.GetVolumeName(volumeSpec)
|
||||
if err != nil || volumeName == "" {
|
||||
return "", fmt.Errorf(
|
||||
"failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v",
|
||||
volumeSpec.Name(),
|
||||
err)
|
||||
}
|
||||
|
||||
return GetUniqueVolumeName(
|
||||
volumePlugin.GetPluginName(),
|
||||
volumeName),
|
||||
nil
|
||||
}
|
||||
|
||||
// IsPodTerminated checks if pod is terminated
|
||||
func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool {
|
||||
// TODO: the guarantees provided by kubelet status are not sufficient to guarantee it's safe to ignore a deleted pod,
|
||||
// even if everything is notRunning (kubelet does not guarantee that when pod status is waiting that it isn't trying
|
||||
// to start a container).
|
||||
return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.InitContainerStatuses) && notRunning(podStatus.ContainerStatuses) && notRunning(podStatus.EphemeralContainerStatuses))
|
||||
}
|
||||
|
||||
// notRunning returns true if every status is terminated or waiting, or the status list
|
||||
// is empty.
|
||||
func notRunning(statuses []v1.ContainerStatus) bool {
|
||||
for _, status := range statuses {
|
||||
if status.State.Terminated == nil && status.State.Waiting == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
|
||||
// the format plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
|
||||
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
|
||||
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
|
||||
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
|
||||
// the unique volume names.
|
||||
func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) {
|
||||
components := strings.SplitN(string(uniqueName), "/", 3)
|
||||
if len(components) != 3 {
|
||||
return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName)
|
||||
}
|
||||
pluginName := fmt.Sprintf("%s/%s", components[0], components[1])
|
||||
return pluginName, components[2], nil
|
||||
}
|
||||
|
||||
// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter
|
||||
// and Exec taken from given VolumeHost.
|
||||
func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount {
|
||||
mounter := host.GetMounter(pluginName)
|
||||
exec := host.GetExec(pluginName)
|
||||
return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}
|
||||
}
|
||||
|
||||
// GetVolumeMode retrieves VolumeMode from pv.
|
||||
// If the volume doesn't have PersistentVolume, it's an inline volume,
|
||||
// should return volumeMode as filesystem to keep existing behavior.
|
||||
func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) {
|
||||
if volumeSpec == nil || volumeSpec.PersistentVolume == nil {
|
||||
return v1.PersistentVolumeFilesystem, nil
|
||||
}
|
||||
if volumeSpec.PersistentVolume.Spec.VolumeMode != nil {
|
||||
return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil
|
||||
}
|
||||
return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name())
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimQualifiedName returns a qualified name for pvc.
|
||||
func GetPersistentVolumeClaimQualifiedName(claim *v1.PersistentVolumeClaim) string {
|
||||
return utilstrings.JoinQualifiedName(claim.GetNamespace(), claim.GetName())
|
||||
}
|
||||
|
||||
// CheckVolumeModeFilesystem checks VolumeMode.
|
||||
// If the mode is Filesystem, return true otherwise return false.
|
||||
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
|
||||
volumeMode, err := GetVolumeMode(volumeSpec)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if volumeMode == v1.PersistentVolumeBlock {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CheckPersistentVolumeClaimModeBlock checks VolumeMode.
|
||||
// If the mode is Block, return true otherwise return false.
|
||||
func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool {
|
||||
return pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock
|
||||
}
|
||||
|
||||
// IsWindowsUNCPath checks if path is prefixed with \\
|
||||
// This can be used to skip any processing of paths
|
||||
// that point to SMB shares, local named pipes and local UNC path
|
||||
func IsWindowsUNCPath(goos, path string) bool {
|
||||
if goos != "windows" {
|
||||
return false
|
||||
}
|
||||
// Check for UNC prefix \\
|
||||
if strings.HasPrefix(path, `\\`) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsWindowsLocalPath checks if path is a local path
|
||||
// prefixed with "/" or "\" like "/foo/bar" or "\foo\bar"
|
||||
func IsWindowsLocalPath(goos, path string) bool {
|
||||
if goos != "windows" {
|
||||
return false
|
||||
}
|
||||
if IsWindowsUNCPath(goos, path) {
|
||||
return false
|
||||
}
|
||||
if strings.Contains(path, ":") {
|
||||
return false
|
||||
}
|
||||
if !(strings.HasPrefix(path, `/`) || strings.HasPrefix(path, `\`)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MakeAbsolutePath convert path to absolute path according to GOOS
|
||||
func MakeAbsolutePath(goos, path string) string {
|
||||
if goos != "windows" {
|
||||
return filepath.Clean("/" + path)
|
||||
}
|
||||
// These are all for windows
|
||||
// If there is a colon, give up.
|
||||
if strings.Contains(path, ":") {
|
||||
return path
|
||||
}
|
||||
// If there is a slash, but no drive, add 'c:'
|
||||
if strings.HasPrefix(path, "/") || strings.HasPrefix(path, "\\") {
|
||||
return "c:" + path
|
||||
}
|
||||
// Otherwise, add 'c:\'
|
||||
return "c:\\" + path
|
||||
}
|
||||
|
||||
// MapBlockVolume is a utility function to provide a common way of mapping
|
||||
// block device path for a specified volume and pod. This function should be
|
||||
// called by volume plugins that implements volume.BlockVolumeMapper.Map() method.
|
||||
func MapBlockVolume(
|
||||
blkUtil volumepathhandler.BlockVolumePathHandler,
|
||||
devicePath,
|
||||
globalMapPath,
|
||||
podVolumeMapPath,
|
||||
volumeMapName string,
|
||||
podUID utypes.UID,
|
||||
) error {
|
||||
// map devicePath to global node path as bind mount
|
||||
mapErr := blkUtil.MapDevice(devicePath, globalMapPath, string(podUID), true /* bindMount */)
|
||||
if mapErr != nil {
|
||||
return fmt.Errorf("blkUtil.MapDevice failed. devicePath: %s, globalMapPath:%s, podUID: %s, bindMount: %v: %v",
|
||||
devicePath, globalMapPath, string(podUID), true, mapErr)
|
||||
}
|
||||
|
||||
// map devicePath to pod volume path
|
||||
mapErr = blkUtil.MapDevice(devicePath, podVolumeMapPath, volumeMapName, false /* bindMount */)
|
||||
if mapErr != nil {
|
||||
return fmt.Errorf("blkUtil.MapDevice failed. devicePath: %s, podVolumeMapPath:%s, volumeMapName: %s, bindMount: %v: %v",
|
||||
devicePath, podVolumeMapPath, volumeMapName, false, mapErr)
|
||||
}
|
||||
|
||||
// Take file descriptor lock to keep a block device opened. Otherwise, there is a case
|
||||
// that the block device is silently removed and attached another device with the same name.
|
||||
// Container runtime can't handle this problem. To avoid unexpected condition fd lock
|
||||
// for the block device is required.
|
||||
_, mapErr = blkUtil.AttachFileDevice(filepath.Join(globalMapPath, string(podUID)))
|
||||
if mapErr != nil {
|
||||
return fmt.Errorf("blkUtil.AttachFileDevice failed. globalMapPath:%s, podUID: %s: %v",
|
||||
globalMapPath, string(podUID), mapErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmapBlockVolume is a utility function to provide a common way of unmapping
|
||||
// block device path for a specified volume and pod. This function should be
|
||||
// called by volume plugins that implements volume.BlockVolumeMapper.Map() method.
|
||||
func UnmapBlockVolume(
|
||||
blkUtil volumepathhandler.BlockVolumePathHandler,
|
||||
globalUnmapPath,
|
||||
podDeviceUnmapPath,
|
||||
volumeMapName string,
|
||||
podUID utypes.UID,
|
||||
) error {
|
||||
// Release file descriptor lock.
|
||||
err := blkUtil.DetachFileDevice(filepath.Join(globalUnmapPath, string(podUID)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s: %v",
|
||||
globalUnmapPath, string(podUID), err)
|
||||
}
|
||||
|
||||
// unmap devicePath from pod volume path
|
||||
unmapDeviceErr := blkUtil.UnmapDevice(podDeviceUnmapPath, volumeMapName, false /* bindMount */)
|
||||
if unmapDeviceErr != nil {
|
||||
return fmt.Errorf("blkUtil.DetachFileDevice failed. podDeviceUnmapPath:%s, volumeMapName: %s, bindMount: %v: %v",
|
||||
podDeviceUnmapPath, volumeMapName, false, unmapDeviceErr)
|
||||
}
|
||||
|
||||
// unmap devicePath from global node path
|
||||
unmapDeviceErr = blkUtil.UnmapDevice(globalUnmapPath, string(podUID), true /* bindMount */)
|
||||
if unmapDeviceErr != nil {
|
||||
return fmt.Errorf("blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s, bindMount: %v: %v",
|
||||
globalUnmapPath, string(podUID), true, unmapDeviceErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsLocalEphemeralVolume determines whether the argument is a local ephemeral
|
||||
// volume vs. some other type
|
||||
// Local means the volume is using storage from the local disk that is managed by kubelet.
|
||||
// Ephemeral means the lifecycle of the volume is the same as the Pod.
|
||||
func IsLocalEphemeralVolume(volume v1.Volume) bool {
|
||||
return volume.GitRepo != nil ||
|
||||
(volume.EmptyDir != nil && volume.EmptyDir.Medium == v1.StorageMediumDefault) ||
|
||||
volume.ConfigMap != nil
|
||||
}
|
||||
|
||||
// GetPodVolumeNames returns names of volumes that are used in a pod,
|
||||
// either as filesystem mount or raw block device.
|
||||
// To save another sweep through containers, SELinux options are optionally collected too.
|
||||
func GetPodVolumeNames(pod *v1.Pod, collectSELinuxOptions bool) (mounts sets.Set[string], devices sets.Set[string], seLinuxContainerContexts map[string][]*v1.SELinuxOptions) {
|
||||
mounts = sets.New[string]()
|
||||
devices = sets.New[string]()
|
||||
seLinuxContainerContexts = make(map[string][]*v1.SELinuxOptions)
|
||||
|
||||
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
|
||||
var seLinuxOptions *v1.SELinuxOptions
|
||||
if collectSELinuxOptions {
|
||||
effectiveContainerSecurity := securitycontext.DetermineEffectiveSecurityContext(pod, container)
|
||||
if effectiveContainerSecurity != nil {
|
||||
seLinuxOptions = effectiveContainerSecurity.SELinuxOptions
|
||||
}
|
||||
}
|
||||
|
||||
if container.VolumeMounts != nil {
|
||||
for _, mount := range container.VolumeMounts {
|
||||
mounts.Insert(mount.Name)
|
||||
if seLinuxOptions != nil && collectSELinuxOptions {
|
||||
seLinuxContainerContexts[mount.Name] = append(seLinuxContainerContexts[mount.Name], seLinuxOptions.DeepCopy())
|
||||
}
|
||||
}
|
||||
}
|
||||
if container.VolumeDevices != nil {
|
||||
for _, device := range container.VolumeDevices {
|
||||
devices.Insert(device.Name)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// FsUserFrom returns FsUser of pod, which is determined by the runAsUser
|
||||
// attributes.
|
||||
func FsUserFrom(pod *v1.Pod) *int64 {
|
||||
var fsUser *int64
|
||||
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
|
||||
runAsUser, ok := securitycontext.DetermineEffectiveRunAsUser(pod, container)
|
||||
// One container doesn't specify user or there are more than one
|
||||
// non-root UIDs.
|
||||
if !ok || (fsUser != nil && *fsUser != *runAsUser) {
|
||||
fsUser = nil
|
||||
return false
|
||||
}
|
||||
if fsUser == nil {
|
||||
fsUser = runAsUser
|
||||
}
|
||||
return true
|
||||
})
|
||||
return fsUser
|
||||
}
|
||||
|
||||
// HasMountRefs checks if the given mountPath has mountRefs.
|
||||
// TODO: this is a workaround for the unmount device issue caused by gci mounter.
|
||||
// In GCI cluster, if gci mounter is used for mounting, the container started by mounter
|
||||
// script will cause additional mounts created in the container. Since these mounts are
|
||||
// irrelevant to the original mounts, they should be not considered when checking the
|
||||
// mount references. The current solution is to filter out those mount paths that contain
|
||||
// the k8s plugin suffix of original mount path.
|
||||
func HasMountRefs(mountPath string, mountRefs []string) bool {
|
||||
// A mountPath typically is like
|
||||
// /var/lib/kubelet/plugins/kubernetes.io/some-plugin/mounts/volume-XXXX
|
||||
// Mount refs can look like
|
||||
// /home/somewhere/var/lib/kubelet/plugins/kubernetes.io/some-plugin/...
|
||||
// but if /var/lib/kubelet is mounted to a different device a ref might be like
|
||||
// /mnt/some-other-place/kubelet/plugins/kubernetes.io/some-plugin/...
|
||||
// Neither of the above should be counted as a mount ref as those are handled
|
||||
// by the kubelet. What we're concerned about is a path like
|
||||
// /data/local/some/manual/mount
|
||||
// As unmounting could interrupt usage from that mountpoint.
|
||||
//
|
||||
// So instead of looking for the entire /var/lib/... path, the plugins/kubernetes.io/
|
||||
// suffix is trimmed off and searched for.
|
||||
//
|
||||
// If there isn't a /plugins/... path, the whole mountPath is used instead.
|
||||
pathToFind := mountPath
|
||||
if i := strings.Index(mountPath, kubernetesPluginPathPrefix); i > -1 {
|
||||
pathToFind = mountPath[i:]
|
||||
}
|
||||
for _, ref := range mountRefs {
|
||||
if !strings.Contains(ref, pathToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsMultiAttachAllowed checks if attaching this volume to multiple nodes is definitely not allowed/possible.
|
||||
// In its current form, this function can only reliably say for which volumes it's definitely forbidden. If it returns
|
||||
// false, it is not guaranteed that multi-attach is actually supported by the volume type and we must rely on the
|
||||
// attacher to fail fast in such cases.
|
||||
// Please see https://github.com/kubernetes/kubernetes/issues/40669 and https://github.com/kubernetes/kubernetes/pull/40148#discussion_r98055047
|
||||
func IsMultiAttachAllowed(volumeSpec *volume.Spec) bool {
|
||||
if volumeSpec == nil {
|
||||
// we don't know if it's supported or not and let the attacher fail later in cases it's not supported
|
||||
return true
|
||||
}
|
||||
|
||||
if volumeSpec.Volume != nil {
|
||||
// Check for volume types which are known to fail slow or cause trouble when trying to multi-attach
|
||||
if volumeSpec.Volume.AzureDisk != nil ||
|
||||
volumeSpec.Volume.Cinder != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Only if this volume is a persistent volume, we have reliable information on whether it's allowed or not to
|
||||
// multi-attach. We trust in the individual volume implementations to not allow unsupported access modes
|
||||
if volumeSpec.PersistentVolume != nil {
|
||||
// Check for persistent volume types which do not fail when trying to multi-attach
|
||||
if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 {
|
||||
// No access mode specified so we don't know for sure. Let the attacher fail if needed
|
||||
return true
|
||||
}
|
||||
|
||||
// check if this volume is allowed to be attached to multiple PODs/nodes, if yes, return false
|
||||
for _, accessMode := range volumeSpec.PersistentVolume.Spec.AccessModes {
|
||||
if accessMode == v1.ReadWriteMany || accessMode == v1.ReadOnlyMany {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// we don't know if it's supported or not and let the attacher fail later in cases it's not supported
|
||||
return true
|
||||
}
|
||||
|
||||
// IsAttachableVolume checks if the given volumeSpec is an attachable volume or not
|
||||
func IsAttachableVolume(volumeSpec *volume.Spec, volumePluginMgr *volume.VolumePluginMgr) bool {
|
||||
attachableVolumePlugin, _ := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||
if attachableVolumePlugin != nil {
|
||||
volumeAttacher, err := attachableVolumePlugin.NewAttacher()
|
||||
if err == nil && volumeAttacher != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDeviceMountableVolume checks if the given volumeSpec is an device mountable volume or not
|
||||
func IsDeviceMountableVolume(volumeSpec *volume.Spec, volumePluginMgr *volume.VolumePluginMgr) bool {
|
||||
deviceMountableVolumePlugin, _ := volumePluginMgr.FindDeviceMountablePluginBySpec(volumeSpec)
|
||||
if deviceMountableVolumePlugin != nil {
|
||||
volumeDeviceMounter, err := deviceMountableVolumePlugin.NewDeviceMounter()
|
||||
if err == nil && volumeDeviceMounter != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetReliableMountRefs calls mounter.GetMountRefs and retries on IsInconsistentReadError.
|
||||
// To be used in volume reconstruction of volume plugins that don't have any protection
|
||||
// against mounting a single volume on multiple nodes (such as attach/detach).
|
||||
func GetReliableMountRefs(mounter mount.Interface, mountPath string) ([]string, error) {
|
||||
var paths []string
|
||||
var lastErr error
|
||||
err := wait.PollImmediate(10*time.Millisecond, time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
paths, err = mounter.GetMountRefs(mountPath)
|
||||
if io.IsInconsistentReadError(err) {
|
||||
lastErr = err
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
return nil, lastErr
|
||||
}
|
||||
return paths, err
|
||||
}
|
295
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go
generated
vendored
295
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go
generated
vendored
@ -1,295 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volumepathhandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/mount-utils"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
losetupPath = "losetup"
|
||||
ErrDeviceNotFound = "device not found"
|
||||
)
|
||||
|
||||
// BlockVolumePathHandler defines a set of operations for handling block volume-related operations
|
||||
type BlockVolumePathHandler interface {
|
||||
// MapDevice creates a symbolic link to block device under specified map path
|
||||
MapDevice(devicePath string, mapPath string, linkName string, bindMount bool) error
|
||||
// UnmapDevice removes a symbolic link to block device under specified map path
|
||||
UnmapDevice(mapPath string, linkName string, bindMount bool) error
|
||||
// RemovePath removes a file or directory on specified map path
|
||||
RemoveMapPath(mapPath string) error
|
||||
// IsSymlinkExist returns true if specified symbolic link exists
|
||||
IsSymlinkExist(mapPath string) (bool, error)
|
||||
// IsDeviceBindMountExist returns true if specified bind mount exists
|
||||
IsDeviceBindMountExist(mapPath string) (bool, error)
|
||||
// GetDeviceBindMountRefs searches bind mounts under global map path
|
||||
GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error)
|
||||
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
|
||||
// corresponding to map path symlink, and then return global map path with pod uuid.
|
||||
FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error)
|
||||
// AttachFileDevice takes a path to a regular file and makes it available as an
|
||||
// attached block device.
|
||||
AttachFileDevice(path string) (string, error)
|
||||
// DetachFileDevice takes a path to the attached block device and
|
||||
// detach it from block device.
|
||||
DetachFileDevice(path string) error
|
||||
// GetLoopDevice returns the full path to the loop device associated with the given path.
|
||||
GetLoopDevice(path string) (string, error)
|
||||
}
|
||||
|
||||
// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler.
|
||||
func NewBlockVolumePathHandler() BlockVolumePathHandler {
|
||||
var volumePathHandler VolumePathHandler
|
||||
return volumePathHandler
|
||||
}
|
||||
|
||||
// VolumePathHandler is path related operation handlers for block volume
|
||||
type VolumePathHandler struct {
|
||||
}
|
||||
|
||||
// MapDevice creates a symbolic link to block device under specified map path
|
||||
func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string, bindMount bool) error {
|
||||
// Example of global map path:
|
||||
// globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid}
|
||||
// linkName: {podUid}
|
||||
//
|
||||
// Example of pod device map path:
|
||||
// podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
|
||||
// linkName: {volumeName}
|
||||
if len(devicePath) == 0 {
|
||||
return fmt.Errorf("failed to map device to map path. devicePath is empty")
|
||||
}
|
||||
if len(mapPath) == 0 {
|
||||
return fmt.Errorf("failed to map device to map path. mapPath is empty")
|
||||
}
|
||||
if !filepath.IsAbs(mapPath) {
|
||||
return fmt.Errorf("the map path should be absolute: map path: %s", mapPath)
|
||||
}
|
||||
klog.V(5).Infof("MapDevice: devicePath %s", devicePath)
|
||||
klog.V(5).Infof("MapDevice: mapPath %s", mapPath)
|
||||
klog.V(5).Infof("MapDevice: linkName %s", linkName)
|
||||
|
||||
// Check and create mapPath
|
||||
_, err := os.Stat(mapPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("cannot validate map path: %s: %v", mapPath, err)
|
||||
}
|
||||
if err = os.MkdirAll(mapPath, 0750); err != nil {
|
||||
return fmt.Errorf("failed to mkdir %s: %v", mapPath, err)
|
||||
}
|
||||
|
||||
if bindMount {
|
||||
return mapBindMountDevice(v, devicePath, mapPath, linkName)
|
||||
}
|
||||
return mapSymlinkDevice(v, devicePath, mapPath, linkName)
|
||||
}
|
||||
|
||||
func mapBindMountDevice(v VolumePathHandler, devicePath string, mapPath string, linkName string) error {
|
||||
// Check bind mount exists
|
||||
linkPath := filepath.Join(mapPath, string(linkName))
|
||||
|
||||
file, err := os.Stat(linkPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to stat file %s: %v", linkPath, err)
|
||||
}
|
||||
|
||||
// Create file
|
||||
newFile, err := os.OpenFile(linkPath, os.O_CREATE|os.O_RDWR, 0750)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file %s: %v", linkPath, err)
|
||||
}
|
||||
if err := newFile.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close file %s: %v", linkPath, err)
|
||||
}
|
||||
} else {
|
||||
// Check if device file
|
||||
// TODO: Need to check if this device file is actually the expected bind mount
|
||||
if file.Mode()&os.ModeDevice == os.ModeDevice {
|
||||
klog.Warningf("Warning: Map skipped because bind mount already exist on the path: %v", linkPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Warningf("Warning: file %s is already exist but not mounted, skip creating file", linkPath)
|
||||
}
|
||||
|
||||
// Bind mount file
|
||||
mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: utilexec.New()}
|
||||
if err := mounter.MountSensitiveWithoutSystemd(devicePath, linkPath, "" /* fsType */, []string{"bind"}, nil); err != nil {
|
||||
return fmt.Errorf("failed to bind mount devicePath: %s to linkPath %s: %v", devicePath, linkPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapSymlinkDevice(v VolumePathHandler, devicePath string, mapPath string, linkName string) error {
|
||||
// Remove old symbolic link(or file) then create new one.
|
||||
// This should be done because current symbolic link is
|
||||
// stale across node reboot.
|
||||
linkPath := filepath.Join(mapPath, string(linkName))
|
||||
if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove file %s: %v", linkPath, err)
|
||||
}
|
||||
return os.Symlink(devicePath, linkPath)
|
||||
}
|
||||
|
||||
// UnmapDevice removes a symbolic link associated to block device under specified map path
|
||||
func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string, bindMount bool) error {
|
||||
if len(mapPath) == 0 {
|
||||
return fmt.Errorf("failed to unmap device from map path. mapPath is empty")
|
||||
}
|
||||
klog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
|
||||
klog.V(5).Infof("UnmapDevice: linkName %s", linkName)
|
||||
|
||||
if bindMount {
|
||||
return unmapBindMountDevice(v, mapPath, linkName)
|
||||
}
|
||||
return unmapSymlinkDevice(v, mapPath, linkName)
|
||||
}
|
||||
|
||||
func unmapBindMountDevice(v VolumePathHandler, mapPath string, linkName string) error {
|
||||
// Check bind mount exists
|
||||
linkPath := filepath.Join(mapPath, string(linkName))
|
||||
if isMountExist, checkErr := v.IsDeviceBindMountExist(linkPath); checkErr != nil {
|
||||
return checkErr
|
||||
} else if !isMountExist {
|
||||
klog.Warningf("Warning: Unmap skipped because bind mount does not exist on the path: %v", linkPath)
|
||||
|
||||
// Check if linkPath still exists
|
||||
if _, err := os.Stat(linkPath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to check if path %s exists: %v", linkPath, err)
|
||||
}
|
||||
// linkPath has already been removed
|
||||
return nil
|
||||
}
|
||||
// Remove file
|
||||
if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove file %s: %v", linkPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount file
|
||||
mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: utilexec.New()}
|
||||
if err := mounter.Unmount(linkPath); err != nil {
|
||||
return fmt.Errorf("failed to unmount linkPath %s: %v", linkPath, err)
|
||||
}
|
||||
|
||||
// Remove file
|
||||
if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove file %s: %v", linkPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmapSymlinkDevice(v VolumePathHandler, mapPath string, linkName string) error {
|
||||
// Check symbolic link exists
|
||||
linkPath := filepath.Join(mapPath, string(linkName))
|
||||
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
|
||||
return checkErr
|
||||
} else if !islinkExist {
|
||||
klog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
|
||||
return nil
|
||||
}
|
||||
return os.Remove(linkPath)
|
||||
}
|
||||
|
||||
// RemoveMapPath removes a file or directory on specified map path
|
||||
func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
|
||||
if len(mapPath) == 0 {
|
||||
return fmt.Errorf("failed to remove map path. mapPath is empty")
|
||||
}
|
||||
klog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
|
||||
err := os.RemoveAll(mapPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove directory %s: %v", mapPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsSymlinkExist returns true if specified file exists and the type is symbolik link.
|
||||
// If file doesn't exist, or file exists but not symbolic link, return false with no error.
|
||||
// On other cases, return false with error from Lstat().
|
||||
func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
|
||||
fi, err := os.Lstat(mapPath)
|
||||
if err != nil {
|
||||
// If file doesn't exist, return false and no error
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
// Return error from Lstat()
|
||||
return false, fmt.Errorf("failed to Lstat file %s: %v", mapPath, err)
|
||||
}
|
||||
// If file exits and it's symbolic link, return true and no error
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
return true, nil
|
||||
}
|
||||
// If file exits but it's not symbolic link, return false and no error
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// IsDeviceBindMountExist returns true if specified file exists and the type is device.
|
||||
// If file doesn't exist, or file exists but not device, return false with no error.
|
||||
// On other cases, return false with error from Lstat().
|
||||
func (v VolumePathHandler) IsDeviceBindMountExist(mapPath string) (bool, error) {
|
||||
fi, err := os.Lstat(mapPath)
|
||||
if err != nil {
|
||||
// If file doesn't exist, return false and no error
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Return error from Lstat()
|
||||
return false, fmt.Errorf("failed to Lstat file %s: %v", mapPath, err)
|
||||
}
|
||||
// If file exits and it's device, return true and no error
|
||||
if fi.Mode()&os.ModeDevice == os.ModeDevice {
|
||||
return true, nil
|
||||
}
|
||||
// If file exits but it's not device, return false and no error
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetDeviceBindMountRefs searches bind mounts under global map path
|
||||
func (v VolumePathHandler) GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error) {
|
||||
var refs []string
|
||||
files, err := os.ReadDir(mapPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, file := range files {
|
||||
if file.Type()&os.ModeDevice != os.ModeDevice {
|
||||
continue
|
||||
}
|
||||
filename := file.Name()
|
||||
// TODO: Might need to check if the file is actually linked to devPath
|
||||
refs = append(refs, filepath.Join(mapPath, filename))
|
||||
}
|
||||
klog.V(5).Infof("GetDeviceBindMountRefs: refs %v", refs)
|
||||
return refs, nil
|
||||
}
|
236
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go
generated
vendored
236
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go
generated
vendored
@ -1,236 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volumepathhandler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// AttachFileDevice takes a path to a regular file and makes it available as an
|
||||
// attached block device.
|
||||
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
|
||||
blockDevicePath, err := v.GetLoopDevice(path)
|
||||
if err != nil && err.Error() != ErrDeviceNotFound {
|
||||
return "", fmt.Errorf("GetLoopDevice failed for path %s: %v", path, err)
|
||||
}
|
||||
|
||||
// If no existing loop device for the path, create one
|
||||
if blockDevicePath == "" {
|
||||
klog.V(4).Infof("Creating device for path: %s", path)
|
||||
blockDevicePath, err = makeLoopDevice(path)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("makeLoopDevice failed for path %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
return blockDevicePath, nil
|
||||
}
|
||||
|
||||
// DetachFileDevice takes a path to the attached block device and
|
||||
// detach it from block device.
|
||||
func (v VolumePathHandler) DetachFileDevice(path string) error {
|
||||
loopPath, err := v.GetLoopDevice(path)
|
||||
if err != nil {
|
||||
if err.Error() == ErrDeviceNotFound {
|
||||
klog.Warningf("couldn't find loopback device which takes file descriptor lock. Skip detaching device. device path: %q", path)
|
||||
} else {
|
||||
return fmt.Errorf("GetLoopDevice failed for path %s: %v", path, err)
|
||||
}
|
||||
} else {
|
||||
if len(loopPath) != 0 {
|
||||
err = removeLoopDevice(loopPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removeLoopDevice failed for path %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLoopDevice returns the full path to the loop device associated with the given path.
|
||||
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
|
||||
_, err := os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return "", errors.New(ErrDeviceNotFound)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("not attachable: %v", err)
|
||||
}
|
||||
|
||||
return getLoopDeviceFromSysfs(path)
|
||||
}
|
||||
|
||||
func makeLoopDevice(path string) (string, error) {
|
||||
args := []string{"-f", path}
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed device create command for path: %s %v %s", path, err, out)
|
||||
return "", fmt.Errorf("losetup %s failed: %v", strings.Join(args, " "), err)
|
||||
}
|
||||
|
||||
return getLoopDeviceFromSysfs(path)
|
||||
}
|
||||
|
||||
// removeLoopDevice removes specified loopback device
|
||||
func removeLoopDevice(device string) error {
|
||||
args := []string{"-d", device}
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if _, err := os.Stat(device); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
klog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
|
||||
return fmt.Errorf("losetup -d %s failed: %v", device, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLoopDeviceFromSysfs finds the backing file for a loop
|
||||
// device from sysfs via "/sys/block/loop*/loop/backing_file".
|
||||
func getLoopDeviceFromSysfs(path string) (string, error) {
|
||||
// If the file is a symlink.
|
||||
realPath, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to evaluate path %s: %s", path, err)
|
||||
}
|
||||
|
||||
devices, err := filepath.Glob("/sys/block/loop*")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to list loop devices in sysfs: %s", err)
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
backingFile := fmt.Sprintf("%s/loop/backing_file", device)
|
||||
|
||||
// The contents of this file is the absolute path of "path".
|
||||
data, err := os.ReadFile(backingFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Return the first match.
|
||||
backingFilePath := cleanBackingFilePath(string(data))
|
||||
if backingFilePath == path || backingFilePath == realPath {
|
||||
return fmt.Sprintf("/dev/%s", filepath.Base(device)), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New(ErrDeviceNotFound)
|
||||
}
|
||||
|
||||
// cleanPath remove any trailing substrings that are not part of the backing file path.
|
||||
func cleanBackingFilePath(path string) string {
|
||||
// If the block device was deleted, the path will contain a "(deleted)" suffix
|
||||
path = strings.TrimSpace(path)
|
||||
path = strings.TrimSuffix(path, "(deleted)")
|
||||
return strings.TrimSpace(path)
|
||||
}
|
||||
|
||||
// FindGlobalMapPathUUIDFromPod finds {pod uuid} bind mount under globalMapPath
|
||||
// corresponding to map path symlink, and then return global map path with pod uuid.
|
||||
// (See pkg/volume/volume.go for details on a global map path and a pod device map path.)
|
||||
// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX
|
||||
//
|
||||
// globalMapPath/{pod uuid} bind mount: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX
|
||||
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
|
||||
var globalMapPathUUID string
|
||||
// Find symbolic link named pod uuid under plugin dir
|
||||
err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (fi.Mode()&os.ModeDevice == os.ModeDevice) && (fi.Name() == string(podUID)) {
|
||||
klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
|
||||
if res, err := compareBindMountAndSymlinks(path, mapPath); err == nil && res {
|
||||
globalMapPathUUID = path
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("FindGlobalMapPathUUIDFromPod failed: %v", err)
|
||||
}
|
||||
klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
|
||||
// Return path contains global map path + {pod uuid}
|
||||
return globalMapPathUUID, nil
|
||||
}
|
||||
|
||||
// compareBindMountAndSymlinks returns if global path (bind mount) and
|
||||
// pod path (symlink) are pointing to the same device.
|
||||
// If there is an error in checking it returns error.
|
||||
func compareBindMountAndSymlinks(global, pod string) (bool, error) {
|
||||
// To check if bind mount and symlink are pointing to the same device,
|
||||
// we need to check if they are pointing to the devices that have same major/minor number.
|
||||
|
||||
// Get the major/minor number for global path
|
||||
devNumGlobal, err := getDeviceMajorMinor(global)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("getDeviceMajorMinor failed for path %s: %v", global, err)
|
||||
}
|
||||
|
||||
// Get the symlinked device from the pod path
|
||||
devPod, err := os.Readlink(pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to readlink path %s: %v", pod, err)
|
||||
}
|
||||
// Get the major/minor number for the symlinked device from the pod path
|
||||
devNumPod, err := getDeviceMajorMinor(devPod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("getDeviceMajorMinor failed for path %s: %v", devPod, err)
|
||||
}
|
||||
klog.V(5).Infof("CompareBindMountAndSymlinks: devNumGlobal %s, devNumPod %s", devNumGlobal, devNumPod)
|
||||
|
||||
// Check if the major/minor number are the same
|
||||
if devNumGlobal == devNumPod {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// getDeviceMajorMinor returns major/minor number for the path with below format:
|
||||
// major:minor (in hex)
|
||||
// ex)
|
||||
//
|
||||
// fc:10
|
||||
func getDeviceMajorMinor(path string) (string, error) {
|
||||
var stat unix.Stat_t
|
||||
|
||||
if err := unix.Stat(path, &stat); err != nil {
|
||||
return "", fmt.Errorf("failed to stat path %s: %v", path, err)
|
||||
}
|
||||
|
||||
devNumber := uint64(stat.Rdev)
|
||||
major := unix.Major(devNumber)
|
||||
minor := unix.Minor(devNumber)
|
||||
|
||||
return fmt.Sprintf("%x:%x", major, minor), nil
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volumepathhandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// AttachFileDevice takes a path to a regular file and makes it available as an
|
||||
// attached block device.
|
||||
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
|
||||
return "", fmt.Errorf("AttachFileDevice not supported for this build.")
|
||||
}
|
||||
|
||||
// DetachFileDevice takes a path to the attached block device and
|
||||
// detach it from block device.
|
||||
func (v VolumePathHandler) DetachFileDevice(path string) error {
|
||||
return fmt.Errorf("DetachFileDevice not supported for this build.")
|
||||
}
|
||||
|
||||
// GetLoopDevice returns the full path to the loop device associated with the given path.
|
||||
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
|
||||
return "", fmt.Errorf("GetLoopDevice not supported for this build.")
|
||||
}
|
||||
|
||||
// FindGlobalMapPathUUIDFromPod finds {pod uuid} bind mount under globalMapPath
|
||||
// corresponding to map path symlink, and then return global map path with pod uuid.
|
||||
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
|
||||
return "", fmt.Errorf("FindGlobalMapPathUUIDFromPod not supported for this build.")
|
||||
}
|
Reference in New Issue
Block a user