mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
Migrate from snapClient.VolumesnapshotV1alpha1Client to
snapClient.SnapshotV1alpha1Client and also update kube dependency Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
3bc6771df8
commit
22ff5c0911
86
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
86
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
@ -29,11 +29,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
@ -231,7 +235,7 @@ type AttachableVolumePlugin interface {
|
||||
NewAttacher() (Attacher, error)
|
||||
NewDetacher() (Detacher, error)
|
||||
// CanAttach tests if provided volume spec is attachable
|
||||
CanAttach(spec *Spec) bool
|
||||
CanAttach(spec *Spec) (bool, error)
|
||||
}
|
||||
|
||||
// DeviceMountableVolumePlugin is an extended interface of VolumePlugin and is used
|
||||
@ -241,6 +245,8 @@ type DeviceMountableVolumePlugin interface {
|
||||
NewDeviceMounter() (DeviceMounter, error)
|
||||
NewDeviceUnmounter() (DeviceUnmounter, error)
|
||||
GetDeviceMountRefs(deviceMountPath string) ([]string, error)
|
||||
// CanDeviceMount determines if device in volume.Spec is mountable
|
||||
CanDeviceMount(spec *Spec) (bool, error)
|
||||
}
|
||||
|
||||
// ExpandableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that can be
|
||||
@ -319,6 +325,15 @@ type KubeletVolumeHost interface {
|
||||
// SetKubeletError lets plugins set an error on the Kubelet runtime status
|
||||
// that will cause the Kubelet to post NotReady status with the error message provided
|
||||
SetKubeletError(err error)
|
||||
|
||||
// GetInformerFactory returns the informer factory for CSIDriverLister
|
||||
GetInformerFactory() informers.SharedInformerFactory
|
||||
// CSIDriverLister returns the informer lister for the CSIDriver API Object
|
||||
CSIDriverLister() storagelisters.CSIDriverLister
|
||||
// CSIDriverSynced returns the informer synced for the CSIDriver API Object
|
||||
CSIDriversSynced() cache.InformerSynced
|
||||
// WaitForCacheSync is a helper function that waits for cache sync for CSIDriverLister
|
||||
WaitForCacheSync() error
|
||||
}
|
||||
|
||||
// AttachDetachVolumeHost is a AttachDetach Controller specific interface that plugins can use
|
||||
@ -327,6 +342,9 @@ type AttachDetachVolumeHost interface {
|
||||
// CSINodeLister returns the informer lister for the CSINode API Object
|
||||
CSINodeLister() storagelisters.CSINodeLister
|
||||
|
||||
// CSIDriverLister returns the informer lister for the CSIDriver API Object
|
||||
CSIDriverLister() storagelisters.CSIDriverLister
|
||||
|
||||
// IsAttachDetachController is an interface marker to strictly tie AttachDetachVolumeHost
|
||||
// to the attachDetachController
|
||||
IsAttachDetachController() bool
|
||||
@ -434,9 +452,10 @@ type VolumePluginMgr struct {
|
||||
|
||||
// Spec is an internal representation of a volume. All API volume types translate to Spec.
|
||||
type Spec struct {
|
||||
Volume *v1.Volume
|
||||
PersistentVolume *v1.PersistentVolume
|
||||
ReadOnly bool
|
||||
Volume *v1.Volume
|
||||
PersistentVolume *v1.PersistentVolume
|
||||
ReadOnly bool
|
||||
InlineVolumeSpecForCSIMigration bool
|
||||
}
|
||||
|
||||
// Name returns the name of either Volume or PersistentVolume, one of which must not be nil.
|
||||
@ -629,11 +648,9 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
|
||||
return nil, fmt.Errorf("Could not find plugin because volume spec is nil")
|
||||
}
|
||||
|
||||
matchedPluginNames := []string{}
|
||||
matches := []VolumePlugin{}
|
||||
for k, v := range pm.plugins {
|
||||
for _, v := range pm.plugins {
|
||||
if v.CanSupport(spec) {
|
||||
matchedPluginNames = append(matchedPluginNames, k)
|
||||
matches = append(matches, v)
|
||||
}
|
||||
}
|
||||
@ -641,7 +658,6 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
|
||||
pm.refreshProbedPlugins()
|
||||
for _, plugin := range pm.probedPlugins {
|
||||
if plugin.CanSupport(spec) {
|
||||
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
|
||||
matches = append(matches, plugin)
|
||||
}
|
||||
}
|
||||
@ -650,6 +666,10 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
|
||||
return nil, fmt.Errorf("no volume plugin matched")
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
matchedPluginNames := []string{}
|
||||
for _, plugin := range matches {
|
||||
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
|
||||
}
|
||||
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ","))
|
||||
}
|
||||
return matches[0], nil
|
||||
@ -666,11 +686,9 @@ func (pm *VolumePluginMgr) IsPluginMigratableBySpec(spec *Spec) (bool, error) {
|
||||
return false, fmt.Errorf("could not find if plugin is migratable because volume spec is nil")
|
||||
}
|
||||
|
||||
matchedPluginNames := []string{}
|
||||
matches := []VolumePlugin{}
|
||||
for k, v := range pm.plugins {
|
||||
for _, v := range pm.plugins {
|
||||
if v.CanSupport(spec) {
|
||||
matchedPluginNames = append(matchedPluginNames, k)
|
||||
matches = append(matches, v)
|
||||
}
|
||||
}
|
||||
@ -680,6 +698,10 @@ func (pm *VolumePluginMgr) IsPluginMigratableBySpec(spec *Spec) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
matchedPluginNames := []string{}
|
||||
for _, plugin := range matches {
|
||||
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
|
||||
}
|
||||
return false, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ","))
|
||||
}
|
||||
|
||||
@ -693,27 +715,24 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
|
||||
defer pm.mutex.Unlock()
|
||||
|
||||
// Once we can get rid of legacy names we can reduce this to a map lookup.
|
||||
matchedPluginNames := []string{}
|
||||
matches := []VolumePlugin{}
|
||||
for k, v := range pm.plugins {
|
||||
if v.GetPluginName() == name {
|
||||
matchedPluginNames = append(matchedPluginNames, k)
|
||||
matches = append(matches, v)
|
||||
}
|
||||
if v, found := pm.plugins[name]; found {
|
||||
matches = append(matches, v)
|
||||
}
|
||||
|
||||
pm.refreshProbedPlugins()
|
||||
for _, plugin := range pm.probedPlugins {
|
||||
if plugin.GetPluginName() == name {
|
||||
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
|
||||
matches = append(matches, plugin)
|
||||
}
|
||||
if plugin, found := pm.probedPlugins[name]; found {
|
||||
matches = append(matches, plugin)
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return nil, fmt.Errorf("no volume plugin matched")
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
matchedPluginNames := []string{}
|
||||
for _, plugin := range matches {
|
||||
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
|
||||
}
|
||||
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ","))
|
||||
}
|
||||
return matches[0], nil
|
||||
@ -824,7 +843,7 @@ func (pm *VolumePluginMgr) FindProvisionablePluginByName(name string) (Provision
|
||||
return nil, fmt.Errorf("no provisionable volume plugin matched")
|
||||
}
|
||||
|
||||
// FindDeletablePluginBySppec fetches a persistent volume plugin by spec. If
|
||||
// FindDeletablePluginBySpec fetches a persistent volume plugin by spec. If
|
||||
// no plugin is found, returns error.
|
||||
func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) {
|
||||
volumePlugin, err := pm.FindPluginBySpec(spec)
|
||||
@ -873,7 +892,9 @@ func (pm *VolumePluginMgr) FindAttachablePluginBySpec(spec *Spec) (AttachableVol
|
||||
return nil, err
|
||||
}
|
||||
if attachableVolumePlugin, ok := volumePlugin.(AttachableVolumePlugin); ok {
|
||||
if attachableVolumePlugin.CanAttach(spec) {
|
||||
if canAttach, err := attachableVolumePlugin.CanAttach(spec); err != nil {
|
||||
return nil, err
|
||||
} else if canAttach {
|
||||
return attachableVolumePlugin, nil
|
||||
}
|
||||
}
|
||||
@ -902,7 +923,11 @@ func (pm *VolumePluginMgr) FindDeviceMountablePluginBySpec(spec *Spec) (DeviceMo
|
||||
return nil, err
|
||||
}
|
||||
if deviceMountableVolumePlugin, ok := volumePlugin.(DeviceMountableVolumePlugin); ok {
|
||||
return deviceMountableVolumePlugin, nil
|
||||
if canMount, err := deviceMountableVolumePlugin.CanDeviceMount(spec); err != nil {
|
||||
return nil, err
|
||||
} else if canMount {
|
||||
return deviceMountableVolumePlugin, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@ -1004,6 +1029,17 @@ func (pm *VolumePluginMgr) FindNodeExpandablePluginByName(name string) (NodeExpa
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (pm *VolumePluginMgr) Run(stopCh <-chan struct{}) {
|
||||
kletHost, ok := pm.Host.(KubeletVolumeHost)
|
||||
if ok {
|
||||
// start informer for CSIDriver
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
|
||||
informerFactory := kletHost.GetInformerFactory()
|
||||
informerFactory.Start(stopCh)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler
|
||||
// pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests
|
||||
// for emptiness. Most attributes of the template will be correct for most
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
@ -126,7 +126,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
||||
}
|
||||
|
||||
// (2)
|
||||
dataDirPath := path.Join(w.targetDir, dataDirName)
|
||||
dataDirPath := filepath.Join(w.targetDir, dataDirName)
|
||||
oldTsDir, err := os.Readlink(dataDirPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
@ -137,7 +137,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
||||
// empty oldTsDir indicates that it didn't exist
|
||||
oldTsDir = ""
|
||||
}
|
||||
oldTsPath := path.Join(w.targetDir, oldTsDir)
|
||||
oldTsPath := filepath.Join(w.targetDir, oldTsDir)
|
||||
|
||||
var pathsToRemove sets.String
|
||||
// if there was no old version, there's nothing to remove
|
||||
@ -183,7 +183,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
|
||||
}
|
||||
|
||||
// (8)
|
||||
newDataDirPath := path.Join(w.targetDir, newDataDirName)
|
||||
newDataDirPath := filepath.Join(w.targetDir, newDataDirName)
|
||||
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
|
||||
os.RemoveAll(tsDir)
|
||||
klog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
|
||||
@ -279,7 +279,7 @@ func validatePath(targetPath string) error {
|
||||
// shouldWritePayload returns whether the payload should be written to disk.
|
||||
func shouldWritePayload(payload map[string]FileProjection, oldTsDir string) (bool, error) {
|
||||
for userVisiblePath, fileProjection := range payload {
|
||||
shouldWrite, err := shouldWriteFile(path.Join(oldTsDir, userVisiblePath), fileProjection.Data)
|
||||
shouldWrite, err := shouldWriteFile(filepath.Join(oldTsDir, userVisiblePath), fileProjection.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -375,7 +375,7 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
|
||||
for userVisiblePath, fileProjection := range payload {
|
||||
content := fileProjection.Data
|
||||
mode := os.FileMode(fileProjection.Mode)
|
||||
fullPath := path.Join(dir, userVisiblePath)
|
||||
fullPath := filepath.Join(dir, userVisiblePath)
|
||||
baseDir, _ := filepath.Split(fullPath)
|
||||
|
||||
err := os.MkdirAll(baseDir, os.ModePerm)
|
||||
@ -419,11 +419,11 @@ func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection)
|
||||
slashpos = len(userVisiblePath)
|
||||
}
|
||||
linkname := userVisiblePath[:slashpos]
|
||||
_, err := os.Readlink(path.Join(w.targetDir, linkname))
|
||||
_, err := os.Readlink(filepath.Join(w.targetDir, linkname))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// The link into the data directory for this path doesn't exist; create it
|
||||
visibleFile := path.Join(w.targetDir, linkname)
|
||||
dataDirFile := path.Join(dataDirName, linkname)
|
||||
visibleFile := filepath.Join(w.targetDir, linkname)
|
||||
dataDirFile := filepath.Join(dataDirName, linkname)
|
||||
|
||||
err = os.Symlink(dataDirFile, visibleFile)
|
||||
if err != nil {
|
||||
@ -444,7 +444,7 @@ func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
|
||||
if strings.Contains(p, ps) {
|
||||
continue
|
||||
}
|
||||
if err := os.Remove(path.Join(w.targetDir, p)); err != nil {
|
||||
if err := os.Remove(filepath.Join(w.targetDir, p)); err != nil {
|
||||
klog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
|
||||
lasterr = err
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -77,10 +77,10 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
|
||||
return devices
|
||||
}
|
||||
disk := parts[2]
|
||||
slavesPath := path.Join("/sys/block/", disk, "/slaves/")
|
||||
slavesPath := filepath.Join("/sys/block/", disk, "/slaves/")
|
||||
if files, err := io.ReadDir(slavesPath); err == nil {
|
||||
for _, f := range files {
|
||||
devices = append(devices, path.Join("/dev/", f.Name()))
|
||||
devices = append(devices, filepath.Join("/dev/", f.Name()))
|
||||
}
|
||||
}
|
||||
return devices
|
||||
|
19
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota"
|
||||
)
|
||||
|
||||
// FSInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
|
||||
@ -56,6 +57,15 @@ func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
|
||||
|
||||
// DiskUsage gets disk usage of specified path.
|
||||
func DiskUsage(path string) (*resource.Quantity, error) {
|
||||
// First check whether the quota system knows about this directory
|
||||
// A nil quantity with no error means that the path does not support quotas
|
||||
// and we should use other mechanisms.
|
||||
data, err := quota.GetConsumption(path)
|
||||
if data != nil {
|
||||
return data, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("unable to retrieve disk consumption via quota for %s: %v", path, err)
|
||||
}
|
||||
// Uses the same niceness level as cadvisor.fs does when running du
|
||||
// Uses -B 1 to always scale to a blocksize of 1 byte
|
||||
out, err := exec.Command("nice", "-n", "19", "du", "-s", "-B", "1", path).CombinedOutput()
|
||||
@ -76,6 +86,15 @@ func Find(path string) (int64, error) {
|
||||
if path == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
// First check whether the quota system knows about this directory
|
||||
// A nil quantity with no error means that the path does not support quotas
|
||||
// and we should use other mechanisms.
|
||||
inodes, err := quota.GetInodes(path)
|
||||
if inodes != nil {
|
||||
return inodes.Value(), nil
|
||||
} else if err != nil {
|
||||
return 0, fmt.Errorf("unable to retrieve inode consumption via quota for %s: %v", path, err)
|
||||
}
|
||||
var counter byteCounter
|
||||
var stderr bytes.Buffer
|
||||
findCmd := exec.Command("find", path, "-xdev", "-printf", ".")
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
@ -54,6 +54,15 @@ var storageOperationStatusMetric = prometheus.NewCounterVec(
|
||||
[]string{"volume_plugin", "operation_name", "status"},
|
||||
)
|
||||
|
||||
var storageOperationEndToEndLatencyMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "volume_operation_total_seconds",
|
||||
Help: "Storage operation end to end duration in seconds",
|
||||
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"},
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMetrics()
|
||||
}
|
||||
@ -62,6 +71,7 @@ func registerMetrics() {
|
||||
prometheus.MustRegister(storageOperationMetric)
|
||||
prometheus.MustRegister(storageOperationErrorMetric)
|
||||
prometheus.MustRegister(storageOperationStatusMetric)
|
||||
prometheus.MustRegister(storageOperationEndToEndLatencyMetric)
|
||||
}
|
||||
|
||||
// OperationCompleteHook returns a hook to call when an operation is completed
|
||||
@ -95,3 +105,9 @@ func GetFullQualifiedPluginNameForVolume(pluginName string, spec *volume.Spec) s
|
||||
}
|
||||
return pluginName
|
||||
}
|
||||
|
||||
// RecordOperationLatencyMetric records the end to end latency for certain operation
|
||||
// into metric volume_operation_total_seconds
|
||||
func RecordOperationLatencyMetric(plugin, operationName string, secondsTaken float64) {
|
||||
storageOperationEndToEndLatencyMetric.WithLabelValues(plugin, operationName).Observe(secondsTaken)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/util/nested_volumes.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/util/nested_volumes.go
generated
vendored
@ -18,12 +18,12 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// getNestedMountpoints returns a list of mountpoint directories that should be created
|
||||
@ -90,7 +90,7 @@ func MakeNestedMountpoints(name, baseDir string, pod v1.Pod) error {
|
||||
return err
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
err := os.MkdirAll(path.Join(baseDir, dir), 0755)
|
||||
err := os.MkdirAll(filepath.Join(baseDir, dir), 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create nested volume mountpoints: %v", err)
|
||||
}
|
||||
|
345
vendor/k8s.io/kubernetes/pkg/volume/util/nsenter/nsenter_mount.go
generated
vendored
Normal file
345
vendor/k8s.io/kubernetes/pkg/volume/util/nsenter/nsenter_mount.go
generated
vendored
Normal file
@ -0,0 +1,345 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nsenter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/utils/nsenter"
|
||||
utilpath "k8s.io/utils/path"
|
||||
)
|
||||
|
||||
const (
|
||||
// hostProcMountsPath is the default mount path for rootfs
|
||||
hostProcMountsPath = "/rootfs/proc/1/mounts"
|
||||
// hostProcMountinfoPath is the default mount info path for rootfs
|
||||
hostProcMountinfoPath = "/rootfs/proc/1/mountinfo"
|
||||
)
|
||||
|
||||
// Mounter implements mount.Interface
|
||||
// Currently, all docker containers receive their own mount namespaces.
|
||||
// Mounter works by executing nsenter to run commands in
|
||||
// the host's mount namespace.
|
||||
type Mounter struct {
|
||||
ne *nsenter.Nsenter
|
||||
// rootDir is location of /var/lib/kubelet directory.
|
||||
rootDir string
|
||||
}
|
||||
|
||||
// NewMounter creates a new mounter for kubelet that runs as a container.
|
||||
func NewMounter(rootDir string, ne *nsenter.Nsenter) *Mounter {
|
||||
return &Mounter{
|
||||
rootDir: rootDir,
|
||||
ne: ne,
|
||||
}
|
||||
}
|
||||
|
||||
// Mounter implements mount.Interface
|
||||
var _ = mount.Interface(&Mounter{})
|
||||
|
||||
// Mount runs mount(8) in the host's root mount namespace. Aside from this
|
||||
// aspect, Mount has the same semantics as the mounter returned by mount.New()
|
||||
func (n *Mounter) Mount(source string, target string, fstype string, options []string) error {
|
||||
bind, bindOpts, bindRemountOpts := mount.IsBind(options)
|
||||
|
||||
if bind {
|
||||
err := n.doNsenterMount(source, target, fstype, bindOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return n.doNsenterMount(source, target, fstype, bindRemountOpts)
|
||||
}
|
||||
|
||||
return n.doNsenterMount(source, target, fstype, options)
|
||||
}
|
||||
|
||||
// doNsenterMount nsenters the host's mount namespace and performs the
|
||||
// requested mount.
|
||||
func (n *Mounter) doNsenterMount(source, target, fstype string, options []string) error {
|
||||
klog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options)
|
||||
cmd, args := n.makeNsenterArgs(source, target, fstype, options)
|
||||
outputBytes, err := n.ne.Exec(cmd, args).CombinedOutput()
|
||||
if len(outputBytes) != 0 {
|
||||
klog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// makeNsenterArgs makes a list of argument to nsenter in order to do the
|
||||
// requested mount.
|
||||
func (n *Mounter) makeNsenterArgs(source, target, fstype string, options []string) (string, []string) {
|
||||
mountCmd := n.ne.AbsHostPath("mount")
|
||||
mountArgs := mount.MakeMountArgs(source, target, fstype, options)
|
||||
|
||||
if systemdRunPath, hasSystemd := n.ne.SupportsSystemd(); hasSystemd {
|
||||
// Complete command line:
|
||||
// nsenter --mount=/rootfs/proc/1/ns/mnt -- /bin/systemd-run --description=... --scope -- /bin/mount -t <type> <what> <where>
|
||||
// Expected flow is:
|
||||
// * nsenter breaks out of container's mount namespace and executes
|
||||
// host's systemd-run.
|
||||
// * systemd-run creates a transient scope (=~ cgroup) and executes its
|
||||
// argument (/bin/mount) there.
|
||||
// * mount does its job, forks a fuse daemon if necessary and finishes.
|
||||
// (systemd-run --scope finishes at this point, returning mount's exit
|
||||
// code and stdout/stderr - thats one of --scope benefits).
|
||||
// * systemd keeps the fuse daemon running in the scope (i.e. in its own
|
||||
// cgroup) until the fuse daemon dies (another --scope benefit).
|
||||
// Kubelet container can be restarted and the fuse daemon survives.
|
||||
// * When the daemon dies (e.g. during unmount) systemd removes the
|
||||
// scope automatically.
|
||||
mountCmd, mountArgs = mount.AddSystemdScope(systemdRunPath, target, mountCmd, mountArgs)
|
||||
} else {
|
||||
// Fall back to simple mount when the host has no systemd.
|
||||
// Complete command line:
|
||||
// nsenter --mount=/rootfs/proc/1/ns/mnt -- /bin/mount -t <type> <what> <where>
|
||||
// Expected flow is:
|
||||
// * nsenter breaks out of container's mount namespace and executes host's /bin/mount.
|
||||
// * mount does its job, forks a fuse daemon if necessary and finishes.
|
||||
// * Any fuse daemon runs in cgroup of kubelet docker container,
|
||||
// restart of kubelet container will kill it!
|
||||
|
||||
// No code here, mountCmd and mountArgs use /bin/mount
|
||||
}
|
||||
|
||||
return mountCmd, mountArgs
|
||||
}
|
||||
|
||||
// Unmount runs umount(8) in the host's mount namespace.
|
||||
func (n *Mounter) Unmount(target string) error {
|
||||
args := []string{target}
|
||||
// No need to execute systemd-run here, it's enough that unmount is executed
|
||||
// in the host's mount namespace. It will finish appropriate fuse daemon(s)
|
||||
// running in any scope.
|
||||
klog.V(5).Infof("nsenter unmount args: %v", args)
|
||||
outputBytes, err := n.ne.Exec("umount", args).CombinedOutput()
|
||||
if len(outputBytes) != 0 {
|
||||
klog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// List returns a list of all mounted filesystems in the host's mount namespace.
|
||||
func (*Mounter) List() ([]mount.MountPoint, error) {
|
||||
return mount.ListProcMounts(hostProcMountsPath)
|
||||
}
|
||||
|
||||
// IsMountPointMatch tests if dir and mp are the same path
|
||||
func (*Mounter) IsMountPointMatch(mp mount.MountPoint, dir string) bool {
|
||||
deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
|
||||
return (mp.Path == dir) || (mp.Path == deletedDir)
|
||||
}
|
||||
|
||||
// IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt
|
||||
// in the host's root mount namespace.
|
||||
func (n *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
|
||||
file, err := filepath.Abs(file)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// Check the directory exists
|
||||
if _, err = os.Stat(file); os.IsNotExist(err) {
|
||||
klog.V(5).Infof("findmnt: directory %s does not exist", file)
|
||||
return true, err
|
||||
}
|
||||
|
||||
// Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts
|
||||
resolvedFile, err := n.EvalHostSymlinks(file)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// Add --first-only option: since we are testing for the absence of a mountpoint, it is sufficient to get only
|
||||
// the first of multiple possible mountpoints using --first-only.
|
||||
// Also add fstype output to make sure that the output of target file will give the full path
|
||||
// TODO: Need more refactoring for this function. Track the solution with issue #26996
|
||||
args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", resolvedFile}
|
||||
klog.V(5).Infof("nsenter findmnt args: %v", args)
|
||||
out, err := n.ne.Exec("findmnt", args).CombinedOutput()
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed findmnt command for path %s: %s %v", resolvedFile, out, err)
|
||||
// Different operating systems behave differently for paths which are not mount points.
|
||||
// On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/".
|
||||
// It's safer to assume that it's not a mount point.
|
||||
return true, nil
|
||||
}
|
||||
mountTarget, err := parseFindMnt(string(out))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
klog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", resolvedFile, mountTarget)
|
||||
|
||||
if mountTarget == resolvedFile {
|
||||
klog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", resolvedFile)
|
||||
return false, nil
|
||||
}
|
||||
klog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", resolvedFile)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// parse output of "findmnt -o target,fstype" and return just the target
|
||||
func parseFindMnt(out string) (string, error) {
|
||||
// cut trailing newline
|
||||
out = strings.TrimSuffix(out, "\n")
|
||||
// cut everything after the last space - it's the filesystem type
|
||||
i := strings.LastIndex(out, " ")
|
||||
if i == -1 {
|
||||
return "", fmt.Errorf("error parsing findmnt output, expected at least one space: %q", out)
|
||||
}
|
||||
return out[:i], nil
|
||||
}
|
||||
|
||||
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
|
||||
// Returns true if open returns errno EBUSY, and false if errno is nil.
|
||||
// Returns an error if errno is any error other than EBUSY.
|
||||
// Returns with error if pathname is not a device.
|
||||
func (n *Mounter) DeviceOpened(pathname string) (bool, error) {
|
||||
return mount.ExclusiveOpenFailsOnDevice(pathname)
|
||||
}
|
||||
|
||||
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
|
||||
// to a device.
|
||||
func (n *Mounter) PathIsDevice(pathname string) (bool, error) {
|
||||
pathType, err := n.GetFileType(pathname)
|
||||
isDevice := pathType == mount.FileTypeCharDev || pathType == mount.FileTypeBlockDev
|
||||
return isDevice, err
|
||||
}
|
||||
|
||||
//GetDeviceNameFromMount given a mount point, find the volume id from checking /proc/mounts
|
||||
func (n *Mounter) GetDeviceNameFromMount(mountPath, pluginMountDir string) (string, error) {
|
||||
return mount.GetDeviceNameFromMountLinux(n, mountPath, pluginMountDir)
|
||||
}
|
||||
|
||||
// MakeRShared checks if path is shared and bind-mounts it as rshared if needed.
|
||||
func (n *Mounter) MakeRShared(path string) error {
|
||||
return mount.DoMakeRShared(path, hostProcMountinfoPath)
|
||||
}
|
||||
|
||||
// GetFileType checks for file/directory/socket/block/character devices.
|
||||
func (n *Mounter) GetFileType(pathname string) (mount.FileType, error) {
|
||||
var pathType mount.FileType
|
||||
outputBytes, err := n.ne.Exec("stat", []string{"-L", "--printf=%F", pathname}).CombinedOutput()
|
||||
if err != nil {
|
||||
if strings.Contains(string(outputBytes), "No such file") {
|
||||
err = fmt.Errorf("%s does not exist", pathname)
|
||||
} else {
|
||||
err = fmt.Errorf("stat %s error: %v", pathname, string(outputBytes))
|
||||
}
|
||||
return pathType, err
|
||||
}
|
||||
|
||||
switch string(outputBytes) {
|
||||
case "socket":
|
||||
return mount.FileTypeSocket, nil
|
||||
case "character special file":
|
||||
return mount.FileTypeCharDev, nil
|
||||
case "block special file":
|
||||
return mount.FileTypeBlockDev, nil
|
||||
case "directory":
|
||||
return mount.FileTypeDirectory, nil
|
||||
case "regular file", "regular empty file":
|
||||
return mount.FileTypeFile, nil
|
||||
}
|
||||
|
||||
return pathType, fmt.Errorf("only recognise file, directory, socket, block device and character device")
|
||||
}
|
||||
|
||||
// MakeDir creates a new directory.
|
||||
func (n *Mounter) MakeDir(pathname string) error {
|
||||
args := []string{"-p", pathname}
|
||||
if _, err := n.ne.Exec("mkdir", args).CombinedOutput(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeFile creates an empty file.
|
||||
func (n *Mounter) MakeFile(pathname string) error {
|
||||
args := []string{pathname}
|
||||
if _, err := n.ne.Exec("touch", args).CombinedOutput(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExistsPath checks if pathname exists.
|
||||
// Error is returned on any other error than "file not found".
|
||||
func (n *Mounter) ExistsPath(pathname string) (bool, error) {
|
||||
// Resolve the symlinks but allow the target not to exist. EvalSymlinks
|
||||
// would return an generic error when the target does not exist.
|
||||
hostPath, err := n.ne.EvalSymlinks(pathname, false /* mustExist */)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
kubeletpath := n.ne.KubeletPath(hostPath)
|
||||
return utilpath.Exists(utilpath.CheckFollowSymlink, kubeletpath)
|
||||
}
|
||||
|
||||
// EvalHostSymlinks returns the path name after evaluating symlinks.
|
||||
func (n *Mounter) EvalHostSymlinks(pathname string) (string, error) {
|
||||
return n.ne.EvalSymlinks(pathname, true)
|
||||
}
|
||||
|
||||
// GetMountRefs finds all mount references to the path, returns a
|
||||
// list of paths. Path could be a mountpoint path, device or a normal
|
||||
// directory (for bind mount).
|
||||
func (n *Mounter) GetMountRefs(pathname string) ([]string, error) {
|
||||
pathExists, pathErr := mount.PathExists(pathname)
|
||||
if !pathExists || mount.IsCorruptedMnt(pathErr) {
|
||||
return []string{}, nil
|
||||
} else if pathErr != nil {
|
||||
return nil, fmt.Errorf("Error checking path %s: %v", pathname, pathErr)
|
||||
}
|
||||
hostpath, err := n.ne.EvalSymlinks(pathname, true /* mustExist */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mount.SearchMountPoints(hostpath, hostProcMountinfoPath)
|
||||
}
|
||||
|
||||
// GetFSGroup returns FSGroup of pathname.
|
||||
func (n *Mounter) GetFSGroup(pathname string) (int64, error) {
|
||||
hostPath, err := n.ne.EvalSymlinks(pathname, true /* mustExist */)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
kubeletpath := n.ne.KubeletPath(hostPath)
|
||||
return mount.GetFSGroupLinux(kubeletpath)
|
||||
}
|
||||
|
||||
// GetSELinuxSupport tests if pathname is on a mount that supports SELinux.
|
||||
func (n *Mounter) GetSELinuxSupport(pathname string) (bool, error) {
|
||||
return mount.GetSELinux(pathname, hostProcMountsPath)
|
||||
}
|
||||
|
||||
// GetMode returns permissions of pathname.
|
||||
func (n *Mounter) GetMode(pathname string) (os.FileMode, error) {
|
||||
hostPath, err := n.ne.EvalSymlinks(pathname, true /* mustExist */)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
kubeletpath := n.ne.KubeletPath(hostPath)
|
||||
return mount.GetModeLinux(kubeletpath)
|
||||
}
|
139
vendor/k8s.io/kubernetes/pkg/volume/util/nsenter/nsenter_mount_unsupported.go
generated
vendored
Normal file
139
vendor/k8s.io/kubernetes/pkg/volume/util/nsenter/nsenter_mount_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nsenter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"k8s.io/utils/nsenter"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// Mounter provides the mount.Interface implementation for unsupported
|
||||
// platforms.
|
||||
type Mounter struct{}
|
||||
|
||||
// NewMounter returns a new Mounter for the current system
|
||||
func NewMounter(rootDir string, ne *nsenter.Nsenter) *Mounter {
|
||||
return &Mounter{}
|
||||
}
|
||||
|
||||
var _ = mount.Interface(&Mounter{})
|
||||
|
||||
// Mount mounts the source to the target. It is a noop for unsupported systems
|
||||
func (*Mounter) Mount(source string, target string, fstype string, options []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount unmounts the target path from the system. it is a noop for unsupported
|
||||
// systems
|
||||
func (*Mounter) Unmount(target string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns a list of all mounted filesystems. It is a noop for unsupported systems
|
||||
func (*Mounter) List() ([]mount.MountPoint, error) {
|
||||
return []mount.MountPoint{}, nil
|
||||
}
|
||||
|
||||
// IsMountPointMatch tests if dir and mp are the same path
|
||||
func (*Mounter) IsMountPointMatch(mp mount.MountPoint, dir string) bool {
|
||||
return (mp.Path == dir)
|
||||
}
|
||||
|
||||
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
|
||||
// It is a noop on unsupported systems
|
||||
func (*Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DeviceOpened checks if block device in use. I tis a noop for unsupported systems
|
||||
func (*Mounter) DeviceOpened(pathname string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// PathIsDevice checks if pathname refers to a device. It is a noop for unsupported
|
||||
// systems
|
||||
func (*Mounter) PathIsDevice(pathname string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetDeviceNameFromMount finds the device name from its global mount point using the
|
||||
// given mountpath and plugin location. It is a noop of unsupported platforms
|
||||
func (*Mounter) GetDeviceNameFromMount(mountPath, pluginMountDir string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// MakeRShared checks if path is shared and bind-mounts it as rshared if needed.
|
||||
// It is a noop on unsupported platforms
|
||||
func (*Mounter) MakeRShared(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFileType checks for file/directory/socket/block/character devices.
|
||||
// Always returns an error and "fake" filetype on unsupported platforms
|
||||
func (*Mounter) GetFileType(_ string) (mount.FileType, error) {
|
||||
return mount.FileType("fake"), errors.New("not implemented")
|
||||
}
|
||||
|
||||
// MakeDir creates a new directory. Noop on unsupported platforms
|
||||
func (*Mounter) MakeDir(pathname string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeFile creats an empty file. Noop on unsupported platforms
|
||||
func (*Mounter) MakeFile(pathname string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExistsPath checks if pathname exists. Always returns an error on unsupported
|
||||
// platforms
|
||||
func (*Mounter) ExistsPath(pathname string) (bool, error) {
|
||||
return true, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// EvalHostSymlinks returns the path name after evaluating symlinks. Always
|
||||
// returns an error on unsupported platforms
|
||||
func (*Mounter) EvalHostSymlinks(pathname string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// GetMountRefs finds all mount references to the path, returns a
|
||||
// list of paths. Always returns an error on unsupported platforms
|
||||
func (*Mounter) GetMountRefs(pathname string) ([]string, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// GetFSGroup returns FSGroup of pathname. Always returns an error on unsupported platforms
|
||||
func (*Mounter) GetFSGroup(pathname string) (int64, error) {
|
||||
return -1, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// GetSELinuxSupport tests if pathname is on a mount that supports SELinux.
|
||||
// Always returns an error on unsupported platforms
|
||||
func (*Mounter) GetSELinuxSupport(pathname string) (bool, error) {
|
||||
return false, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// GetMode returns permissions of pathname. Always returns an error on unsupported platforms
|
||||
func (*Mounter) GetMode(pathname string) (os.FileMode, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
105
vendor/k8s.io/kubernetes/pkg/volume/util/quota/common/quota_linux_common.go
generated
vendored
Normal file
105
vendor/k8s.io/kubernetes/pkg/volume/util/quota/common/quota_linux_common.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// QuotaID is generic quota identifier.
|
||||
// Data type based on quotactl(2).
|
||||
type QuotaID int32
|
||||
|
||||
const (
|
||||
// UnknownQuotaID -- cannot determine whether a quota is in force
|
||||
UnknownQuotaID QuotaID = -1
|
||||
// BadQuotaID -- Invalid quota
|
||||
BadQuotaID QuotaID = 0
|
||||
)
|
||||
|
||||
const (
|
||||
acct = iota
|
||||
enforcing = iota
|
||||
)
|
||||
|
||||
// QuotaType -- type of quota to be applied
|
||||
type QuotaType int
|
||||
|
||||
const (
|
||||
// FSQuotaAccounting for quotas for accounting only
|
||||
FSQuotaAccounting QuotaType = 1 << iota
|
||||
// FSQuotaEnforcing for quotas for enforcement
|
||||
FSQuotaEnforcing QuotaType = 1 << iota
|
||||
)
|
||||
|
||||
// FirstQuota is the quota ID we start with.
|
||||
// XXXXXXX Need a better way of doing this...
|
||||
var FirstQuota QuotaID = 1048577
|
||||
|
||||
// MountsFile is the location of the system mount data
|
||||
var MountsFile = "/proc/self/mounts"
|
||||
|
||||
// MountParseRegexp parses out /proc/sys/self/mounts
|
||||
var MountParseRegexp = regexp.MustCompilePOSIX("^([^ ]*)[ \t]*([^ ]*)[ \t]*([^ ]*)") // Ignore options etc.
|
||||
|
||||
// LinuxVolumeQuotaProvider returns an appropriate quota applier
|
||||
// object if we can support quotas on this device
|
||||
type LinuxVolumeQuotaProvider interface {
|
||||
// GetQuotaApplier retrieves an object that can apply
|
||||
// quotas (or nil if this provider cannot support quotas
|
||||
// on the device)
|
||||
GetQuotaApplier(mountpoint string, backingDev string) LinuxVolumeQuotaApplier
|
||||
}
|
||||
|
||||
// LinuxVolumeQuotaApplier is a generic interface to any quota
|
||||
// mechanism supported by Linux
|
||||
type LinuxVolumeQuotaApplier interface {
|
||||
// GetQuotaOnDir gets the quota ID (if any) that applies to
|
||||
// this directory
|
||||
GetQuotaOnDir(path string) (QuotaID, error)
|
||||
|
||||
// SetQuotaOnDir applies the specified quota ID to a directory.
|
||||
// Negative value for bytes means that a non-enforcing quota
|
||||
// should be applied (perhaps by setting a quota too large to
|
||||
// be hit)
|
||||
SetQuotaOnDir(path string, id QuotaID, bytes int64) error
|
||||
|
||||
// QuotaIDIsInUse determines whether the quota ID is in use.
|
||||
// Implementations should not check /etc/project or /etc/projid,
|
||||
// only whether their underlying mechanism already has the ID in
|
||||
// use.
|
||||
// Return value of false with no error means that the ID is not
|
||||
// in use; true means that it is already in use. An error
|
||||
// return means that any quota ID will fail.
|
||||
QuotaIDIsInUse(id QuotaID) (bool, error)
|
||||
|
||||
// GetConsumption returns the consumption (in bytes) of the
|
||||
// directory, determined by the implementation's quota-based
|
||||
// mechanism. If it is unable to do so using that mechanism,
|
||||
// it should return an error and allow higher layers to
|
||||
// enumerate the directory.
|
||||
GetConsumption(path string, id QuotaID) (int64, error)
|
||||
|
||||
// GetInodes returns the number of inodes used by the
|
||||
// directory, determined by the implementation's quota-based
|
||||
// mechanism. If it is unable to do so using that mechanism,
|
||||
// it should return an error and allow higher layers to
|
||||
// enumerate the directory.
|
||||
GetInodes(path string, id QuotaID) (int64, error)
|
||||
}
|
286
vendor/k8s.io/kubernetes/pkg/volume/util/quota/common/quota_linux_common_impl.go
generated
vendored
Normal file
286
vendor/k8s.io/kubernetes/pkg/volume/util/quota/common/quota_linux_common_impl.go
generated
vendored
Normal file
@ -0,0 +1,286 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var quotaCmd string
|
||||
var quotaCmdInitialized bool
|
||||
var quotaCmdLock sync.RWMutex
|
||||
|
||||
// If we later get a filesystem that uses project quota semantics other than
|
||||
// XFS, we'll need to change this.
|
||||
// Higher levels don't need to know what's inside
|
||||
type linuxFilesystemType struct {
|
||||
name string
|
||||
typeMagic int64 // Filesystem magic number, per statfs(2)
|
||||
maxQuota int64
|
||||
allowEmptyOutput bool // Accept empty output from "quota" command
|
||||
}
|
||||
|
||||
const (
|
||||
bitsPerWord = 32 << (^uint(0) >> 63) // either 32 or 64
|
||||
)
|
||||
|
||||
var (
|
||||
linuxSupportedFilesystems = []linuxFilesystemType{
|
||||
{
|
||||
name: "XFS",
|
||||
typeMagic: 0x58465342,
|
||||
maxQuota: 1<<(bitsPerWord-1) - 1,
|
||||
allowEmptyOutput: true, // XFS filesystems report nothing if a quota is not present
|
||||
}, {
|
||||
name: "ext4fs",
|
||||
typeMagic: 0xef53,
|
||||
maxQuota: (1<<(bitsPerWord-1) - 1) & (1<<58 - 1),
|
||||
allowEmptyOutput: false, // ext4 filesystems always report something even if a quota is not present
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// VolumeProvider supplies a quota applier to the generic code.
|
||||
type VolumeProvider struct {
|
||||
}
|
||||
|
||||
var quotaCmds = []string{"/sbin/xfs_quota",
|
||||
"/usr/sbin/xfs_quota",
|
||||
"/bin/xfs_quota"}
|
||||
|
||||
var quotaParseRegexp = regexp.MustCompilePOSIX("^[^ \t]*[ \t]*([0-9]+)")
|
||||
|
||||
var lsattrCmd = "/usr/bin/lsattr"
|
||||
var lsattrParseRegexp = regexp.MustCompilePOSIX("^ *([0-9]+) [^ ]+ (.*)$")
|
||||
|
||||
// GetQuotaApplier -- does this backing device support quotas that
|
||||
// can be applied to directories?
|
||||
func (*VolumeProvider) GetQuotaApplier(mountpoint string, backingDev string) LinuxVolumeQuotaApplier {
|
||||
for _, fsType := range linuxSupportedFilesystems {
|
||||
if isFilesystemOfType(mountpoint, backingDev, fsType.typeMagic) {
|
||||
return linuxVolumeQuotaApplier{mountpoint: mountpoint,
|
||||
maxQuota: fsType.maxQuota,
|
||||
allowEmptyOutput: fsType.allowEmptyOutput,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type linuxVolumeQuotaApplier struct {
|
||||
mountpoint string
|
||||
maxQuota int64
|
||||
allowEmptyOutput bool
|
||||
}
|
||||
|
||||
func getXFSQuotaCmd() (string, error) {
|
||||
quotaCmdLock.Lock()
|
||||
defer quotaCmdLock.Unlock()
|
||||
if quotaCmdInitialized {
|
||||
return quotaCmd, nil
|
||||
}
|
||||
for _, program := range quotaCmds {
|
||||
fileinfo, err := os.Stat(program)
|
||||
if err == nil && ((fileinfo.Mode().Perm() & (1 << 6)) != 0) {
|
||||
klog.V(3).Infof("Found xfs_quota program %s", program)
|
||||
quotaCmd = program
|
||||
quotaCmdInitialized = true
|
||||
return quotaCmd, nil
|
||||
}
|
||||
}
|
||||
quotaCmdInitialized = true
|
||||
return "", fmt.Errorf("No xfs_quota program found")
|
||||
}
|
||||
|
||||
func doRunXFSQuotaCommand(mountpoint string, mountsFile, command string) (string, error) {
|
||||
quotaCmd, err := getXFSQuotaCmd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// We're using numeric project IDs directly; no need to scan
|
||||
// /etc/projects or /etc/projid
|
||||
klog.V(4).Infof("runXFSQuotaCommand %s -t %s -P/dev/null -D/dev/null -x -f %s -c %s", quotaCmd, mountsFile, mountpoint, command)
|
||||
cmd := exec.Command(quotaCmd, "-t", mountsFile, "-P/dev/null", "-D/dev/null", "-x", "-f", mountpoint, "-c", command)
|
||||
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
klog.V(4).Infof("runXFSQuotaCommand output %q", string(data))
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// Extract the mountpoint we care about into a temporary mounts file so that xfs_quota does
|
||||
// not attempt to scan every mount on the filesystem, which could hang if e. g.
|
||||
// a stuck NFS mount is present.
|
||||
// See https://bugzilla.redhat.com/show_bug.cgi?id=237120 for an example
|
||||
// of the problem that could be caused if this were to happen.
|
||||
func runXFSQuotaCommand(mountpoint string, command string) (string, error) {
|
||||
tmpMounts, err := ioutil.TempFile("", "mounts")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot create temporary mount file: %v", err)
|
||||
}
|
||||
tmpMountsFileName := tmpMounts.Name()
|
||||
defer tmpMounts.Close()
|
||||
defer os.Remove(tmpMountsFileName)
|
||||
|
||||
mounts, err := os.Open(MountsFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot open mounts file %s: %v", MountsFile, err)
|
||||
}
|
||||
defer mounts.Close()
|
||||
|
||||
scanner := bufio.NewScanner(mounts)
|
||||
for scanner.Scan() {
|
||||
match := MountParseRegexp.FindStringSubmatch(scanner.Text())
|
||||
if match != nil {
|
||||
mount := match[2]
|
||||
if mount == mountpoint {
|
||||
if _, err := tmpMounts.WriteString(fmt.Sprintf("%s\n", scanner.Text())); err != nil {
|
||||
return "", fmt.Errorf("Cannot write temporary mounts file: %v", err)
|
||||
}
|
||||
if err := tmpMounts.Sync(); err != nil {
|
||||
return "", fmt.Errorf("Cannot sync temporary mounts file: %v", err)
|
||||
}
|
||||
return doRunXFSQuotaCommand(mountpoint, tmpMountsFileName, command)
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Cannot run xfs_quota: cannot find mount point %s in %s", mountpoint, MountsFile)
|
||||
}
|
||||
|
||||
// SupportsQuotas determines whether the filesystem supports quotas.
|
||||
func SupportsQuotas(mountpoint string, qType QuotaType) (bool, error) {
|
||||
data, err := runXFSQuotaCommand(mountpoint, "state -p")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if qType == FSQuotaEnforcing {
|
||||
return strings.Contains(data, "Enforcement: ON"), nil
|
||||
}
|
||||
return strings.Contains(data, "Accounting: ON"), nil
|
||||
}
|
||||
|
||||
func isFilesystemOfType(mountpoint string, backingDev string, typeMagic int64) bool {
|
||||
var buf syscall.Statfs_t
|
||||
err := syscall.Statfs(mountpoint, &buf)
|
||||
if err != nil {
|
||||
klog.Warningf("Warning: Unable to statfs %s: %v", mountpoint, err)
|
||||
return false
|
||||
}
|
||||
if int64(buf.Type) != typeMagic {
|
||||
return false
|
||||
}
|
||||
if answer, _ := SupportsQuotas(mountpoint, FSQuotaAccounting); answer {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetQuotaOnDir retrieves the quota ID (if any) associated with the specified directory
|
||||
// If we can't make system calls, all we can say is that we don't know whether
|
||||
// it has a quota, and higher levels have to make the call.
|
||||
func (v linuxVolumeQuotaApplier) GetQuotaOnDir(path string) (QuotaID, error) {
|
||||
cmd := exec.Command(lsattrCmd, "-pd", path)
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return BadQuotaID, fmt.Errorf("cannot run lsattr: %v", err)
|
||||
}
|
||||
match := lsattrParseRegexp.FindStringSubmatch(string(data))
|
||||
if match == nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse lsattr -pd %s output %s", path, string(data))
|
||||
}
|
||||
if match[2] != path {
|
||||
return BadQuotaID, fmt.Errorf("Mismatch between supplied and returned path (%s != %s)", path, match[2])
|
||||
}
|
||||
projid, err := strconv.ParseInt(match[1], 10, 32)
|
||||
if err != nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse project ID from %s (%v)", match[1], err)
|
||||
}
|
||||
return QuotaID(projid), nil
|
||||
}
|
||||
|
||||
// SetQuotaOnDir applies a quota to the specified directory under the specified mountpoint.
|
||||
func (v linuxVolumeQuotaApplier) SetQuotaOnDir(path string, id QuotaID, bytes int64) error {
|
||||
if bytes < 0 || bytes > v.maxQuota {
|
||||
bytes = v.maxQuota
|
||||
}
|
||||
_, err := runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("limit -p bhard=%v bsoft=%v %v", bytes, bytes, id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("project -s -p %s %v", path, id))
|
||||
return err
|
||||
}
|
||||
|
||||
func getQuantity(mountpoint string, id QuotaID, xfsQuotaArg string, multiplier int64, allowEmptyOutput bool) (int64, error) {
|
||||
data, err := runXFSQuotaCommand(mountpoint, fmt.Sprintf("quota -p -N -n -v %s %v", xfsQuotaArg, id))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to run xfs_quota: %v", err)
|
||||
}
|
||||
if data == "" && allowEmptyOutput {
|
||||
return 0, nil
|
||||
}
|
||||
match := quotaParseRegexp.FindStringSubmatch(data)
|
||||
if match == nil {
|
||||
return 0, fmt.Errorf("Unable to parse quota output '%s'", data)
|
||||
}
|
||||
size, err := strconv.ParseInt(match[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to parse data size '%s' from '%s': %v", match[1], data, err)
|
||||
}
|
||||
klog.V(4).Infof("getQuantity %s %d %s %d => %d %v", mountpoint, id, xfsQuotaArg, multiplier, size, err)
|
||||
return size * multiplier, nil
|
||||
}
|
||||
|
||||
// GetConsumption returns the consumption in bytes if available via quotas
|
||||
func (v linuxVolumeQuotaApplier) GetConsumption(_ string, id QuotaID) (int64, error) {
|
||||
return getQuantity(v.mountpoint, id, "-b", 1024, v.allowEmptyOutput)
|
||||
}
|
||||
|
||||
// GetInodes returns the inodes in use if available via quotas
|
||||
func (v linuxVolumeQuotaApplier) GetInodes(_ string, id QuotaID) (int64, error) {
|
||||
return getQuantity(v.mountpoint, id, "-i", 1, v.allowEmptyOutput)
|
||||
}
|
||||
|
||||
// QuotaIDIsInUse checks whether the specified quota ID is in use on the specified
|
||||
// filesystem
|
||||
func (v linuxVolumeQuotaApplier) QuotaIDIsInUse(id QuotaID) (bool, error) {
|
||||
bytes, err := v.GetConsumption(v.mountpoint, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if bytes > 0 {
|
||||
return true, nil
|
||||
}
|
||||
inodes, err := v.GetInodes(v.mountpoint, id)
|
||||
return inodes > 0, err
|
||||
}
|
357
vendor/k8s.io/kubernetes/pkg/volume/util/quota/project.go
generated
vendored
Normal file
357
vendor/k8s.io/kubernetes/pkg/volume/util/quota/project.go
generated
vendored
Normal file
@ -0,0 +1,357 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
)
|
||||
|
||||
var projectsFile = "/etc/projects"
|
||||
var projidFile = "/etc/projid"
|
||||
|
||||
var projectsParseRegexp = regexp.MustCompilePOSIX("^([[:digit:]]+):(.*)$")
|
||||
var projidParseRegexp = regexp.MustCompilePOSIX("^([^#][^:]*):([[:digit:]]+)$")
|
||||
|
||||
var quotaIDLock sync.RWMutex
|
||||
|
||||
const maxUnusedQuotasToSearch = 128 // Don't go into an infinite loop searching for an unused quota
|
||||
|
||||
type projectType struct {
|
||||
isValid bool // False if we need to remove this line
|
||||
id common.QuotaID
|
||||
data string // Project name (projid) or directory (projects)
|
||||
line string
|
||||
}
|
||||
|
||||
type projectsList struct {
|
||||
projects []projectType
|
||||
projid []projectType
|
||||
}
|
||||
|
||||
func projFilesAreOK() error {
|
||||
if sf, err := os.Lstat(projectsFile); err != nil || sf.Mode().IsRegular() {
|
||||
if sf, err := os.Lstat(projidFile); err != nil || sf.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s exists but is not a plain file, cannot continue", projidFile)
|
||||
}
|
||||
return fmt.Errorf("%s exists but is not a plain file, cannot continue", projectsFile)
|
||||
}
|
||||
|
||||
func lockFile(file *os.File) error {
|
||||
return unix.Flock(int(file.Fd()), unix.LOCK_EX)
|
||||
}
|
||||
|
||||
func unlockFile(file *os.File) error {
|
||||
return unix.Flock(int(file.Fd()), unix.LOCK_UN)
|
||||
}
|
||||
|
||||
// openAndLockProjectFiles opens /etc/projects and /etc/projid locked.
|
||||
// Creates them if they don't exist
|
||||
func openAndLockProjectFiles() (*os.File, *os.File, error) {
|
||||
// Make sure neither project-related file is a symlink!
|
||||
if err := projFilesAreOK(); err != nil {
|
||||
return nil, nil, fmt.Errorf("system project files failed verification: %v", err)
|
||||
}
|
||||
// We don't actually modify the original files; we create temporaries and
|
||||
// move them over the originals
|
||||
fProjects, err := os.OpenFile(projectsFile, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to open %s: %v", projectsFile, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
fProjid, err := os.OpenFile(projidFile, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err == nil {
|
||||
// Check once more, to ensure nothing got changed out from under us
|
||||
if err := projFilesAreOK(); err == nil {
|
||||
err = lockFile(fProjects)
|
||||
if err == nil {
|
||||
err = lockFile(fProjid)
|
||||
if err == nil {
|
||||
return fProjects, fProjid, nil
|
||||
}
|
||||
// Nothing useful we can do if we get an error here
|
||||
err = fmt.Errorf("unable to lock %s: %v", projidFile, err)
|
||||
unlockFile(fProjects)
|
||||
} else {
|
||||
err = fmt.Errorf("unable to lock %s: %v", projectsFile, err)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("system project files failed re-verification: %v", err)
|
||||
}
|
||||
fProjid.Close()
|
||||
} else {
|
||||
err = fmt.Errorf("unable to open %s: %v", projidFile, err)
|
||||
}
|
||||
fProjects.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func closeProjectFiles(fProjects *os.File, fProjid *os.File) error {
|
||||
// Nothing useful we can do if either of these fail,
|
||||
// but we have to close (and thereby unlock) the files anyway.
|
||||
var err error
|
||||
var err1 error
|
||||
if fProjid != nil {
|
||||
err = fProjid.Close()
|
||||
}
|
||||
if fProjects != nil {
|
||||
err1 = fProjects.Close()
|
||||
}
|
||||
if err == nil {
|
||||
return err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func parseProject(l string) projectType {
|
||||
if match := projectsParseRegexp.FindStringSubmatch(l); match != nil {
|
||||
i, err := strconv.Atoi(match[1])
|
||||
if err == nil {
|
||||
return projectType{true, common.QuotaID(i), match[2], l}
|
||||
}
|
||||
}
|
||||
return projectType{true, common.BadQuotaID, "", l}
|
||||
}
|
||||
|
||||
func parseProjid(l string) projectType {
|
||||
if match := projidParseRegexp.FindStringSubmatch(l); match != nil {
|
||||
i, err := strconv.Atoi(match[2])
|
||||
if err == nil {
|
||||
return projectType{true, common.QuotaID(i), match[1], l}
|
||||
}
|
||||
}
|
||||
return projectType{true, common.BadQuotaID, "", l}
|
||||
}
|
||||
|
||||
func parseProjFile(f *os.File, parser func(l string) projectType) []projectType {
|
||||
var answer []projectType
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
answer = append(answer, parser(scanner.Text()))
|
||||
}
|
||||
return answer
|
||||
}
|
||||
|
||||
func readProjectFiles(projects *os.File, projid *os.File) projectsList {
|
||||
return projectsList{parseProjFile(projects, parseProject), parseProjFile(projid, parseProjid)}
|
||||
}
|
||||
|
||||
func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) {
|
||||
unusedQuotasSearched := 0
|
||||
for id := common.FirstQuota; id == id; id++ {
|
||||
if _, ok := idMap[id]; !ok {
|
||||
isInUse, err := getApplier(path).QuotaIDIsInUse(id)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, err
|
||||
} else if !isInUse {
|
||||
return id, nil
|
||||
}
|
||||
unusedQuotasSearched++
|
||||
if unusedQuotasSearched > maxUnusedQuotasToSearch {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return common.BadQuotaID, fmt.Errorf("Cannot find available quota ID")
|
||||
}
|
||||
|
||||
func addDirToProject(path string, id common.QuotaID, list *projectsList) (common.QuotaID, bool, error) {
|
||||
idMap := make(map[common.QuotaID]bool)
|
||||
for _, project := range list.projects {
|
||||
if project.data == path {
|
||||
if id != project.id {
|
||||
return common.BadQuotaID, false, fmt.Errorf("Attempt to reassign project ID for %s", path)
|
||||
}
|
||||
// Trying to reassign a directory to the project it's
|
||||
// already in. Maybe this should be an error, but for
|
||||
// now treat it as an idempotent operation
|
||||
return id, false, nil
|
||||
}
|
||||
idMap[project.id] = true
|
||||
}
|
||||
var needToAddProjid = true
|
||||
for _, projid := range list.projid {
|
||||
idMap[projid.id] = true
|
||||
if projid.id == id && id != common.BadQuotaID {
|
||||
needToAddProjid = false
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if id == common.BadQuotaID {
|
||||
id, err = findAvailableQuota(path, idMap)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, false, err
|
||||
}
|
||||
needToAddProjid = true
|
||||
}
|
||||
if needToAddProjid {
|
||||
name := fmt.Sprintf("volume%v", id)
|
||||
line := fmt.Sprintf("%s:%v", name, id)
|
||||
list.projid = append(list.projid, projectType{true, id, name, line})
|
||||
}
|
||||
line := fmt.Sprintf("%v:%s", id, path)
|
||||
list.projects = append(list.projects, projectType{true, id, path, line})
|
||||
return id, needToAddProjid, nil
|
||||
}
|
||||
|
||||
func removeDirFromProject(path string, id common.QuotaID, list *projectsList) (bool, error) {
|
||||
if id == common.BadQuotaID {
|
||||
return false, fmt.Errorf("Attempt to remove invalid quota ID from %s", path)
|
||||
}
|
||||
foundAt := -1
|
||||
countByID := make(map[common.QuotaID]int)
|
||||
for i, project := range list.projects {
|
||||
if project.data == path {
|
||||
if id != project.id {
|
||||
return false, fmt.Errorf("Attempting to remove quota ID %v from path %s, but expecting ID %v", id, path, project.id)
|
||||
} else if foundAt != -1 {
|
||||
return false, fmt.Errorf("Found multiple quota IDs for path %s", path)
|
||||
}
|
||||
// Faster and easier than deleting an element
|
||||
list.projects[i].isValid = false
|
||||
foundAt = i
|
||||
}
|
||||
countByID[project.id]++
|
||||
}
|
||||
if foundAt == -1 {
|
||||
return false, fmt.Errorf("Cannot find quota associated with path %s", path)
|
||||
}
|
||||
if countByID[id] <= 1 {
|
||||
// Removing the last entry means that we're no longer using
|
||||
// the quota ID, so remove that as well
|
||||
for i, projid := range list.projid {
|
||||
if projid.id == id {
|
||||
list.projid[i].isValid = false
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func writeProjectFile(base *os.File, projects []projectType) (string, error) {
|
||||
oname := base.Name()
|
||||
stat, err := base.Stat()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
mode := stat.Mode() & os.ModePerm
|
||||
f, err := ioutil.TempFile(filepath.Dir(oname), filepath.Base(oname))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
filename := f.Name()
|
||||
if err := os.Chmod(filename, mode); err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, proj := range projects {
|
||||
if proj.isValid {
|
||||
if _, err := f.WriteString(fmt.Sprintf("%s\n", proj.line)); err != nil {
|
||||
f.Close()
|
||||
os.Remove(filename)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
os.Remove(filename)
|
||||
return "", err
|
||||
}
|
||||
return filename, nil
|
||||
}
|
||||
|
||||
func writeProjectFiles(fProjects *os.File, fProjid *os.File, writeProjid bool, list projectsList) error {
|
||||
tmpProjects, err := writeProjectFile(fProjects, list.projects)
|
||||
if err == nil {
|
||||
// Ensure that both files are written before we try to rename either.
|
||||
if writeProjid {
|
||||
tmpProjid, err := writeProjectFile(fProjid, list.projid)
|
||||
if err == nil {
|
||||
err = os.Rename(tmpProjid, fProjid.Name())
|
||||
if err != nil {
|
||||
os.Remove(tmpProjid)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = os.Rename(tmpProjects, fProjects.Name())
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// We're in a bit of trouble here; at this
|
||||
// point we've successfully renamed tmpProjid
|
||||
// to the real thing, but renaming tmpProject
|
||||
// to the real file failed. There's not much we
|
||||
// can do in this position. Anything we could do
|
||||
// to try to undo it would itself be likely to fail.
|
||||
}
|
||||
os.Remove(tmpProjects)
|
||||
}
|
||||
return fmt.Errorf("Unable to write project files: %v", err)
|
||||
}
|
||||
|
||||
func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
|
||||
quotaIDLock.Lock()
|
||||
defer quotaIDLock.Unlock()
|
||||
fProjects, fProjid, err := openAndLockProjectFiles()
|
||||
if err == nil {
|
||||
defer closeProjectFiles(fProjects, fProjid)
|
||||
list := readProjectFiles(fProjects, fProjid)
|
||||
writeProjid := true
|
||||
ID, writeProjid, err = addDirToProject(path, ID, &list)
|
||||
if err == nil && ID != common.BadQuotaID {
|
||||
if err = writeProjectFiles(fProjects, fProjid, writeProjid, list); err == nil {
|
||||
return ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return common.BadQuotaID, fmt.Errorf("createProjectID %s %v failed %v", path, ID, err)
|
||||
}
|
||||
|
||||
func removeProjectID(path string, ID common.QuotaID) error {
|
||||
if ID == common.BadQuotaID {
|
||||
return fmt.Errorf("attempting to remove invalid quota ID %v", ID)
|
||||
}
|
||||
quotaIDLock.Lock()
|
||||
defer quotaIDLock.Unlock()
|
||||
fProjects, fProjid, err := openAndLockProjectFiles()
|
||||
if err == nil {
|
||||
defer closeProjectFiles(fProjects, fProjid)
|
||||
list := readProjectFiles(fProjects, fProjid)
|
||||
writeProjid := true
|
||||
writeProjid, err = removeDirFromProject(path, ID, &list)
|
||||
if err == nil {
|
||||
if err = writeProjectFiles(fProjects, fProjid, writeProjid, list); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("removeProjectID %s %v failed %v", path, ID, err)
|
||||
}
|
48
vendor/k8s.io/kubernetes/pkg/volume/util/quota/quota.go
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/pkg/volume/util/quota/quota.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// Interface -- quota interface
|
||||
type Interface interface {
|
||||
// Does the path provided support quotas, and if so, what types
|
||||
SupportsQuotas(m mount.Interface, path string) (bool, error)
|
||||
// Assign a quota (picked by the quota mechanism) to a path,
|
||||
// and return it.
|
||||
AssignQuota(m mount.Interface, path string, poduid string, bytes *resource.Quantity) error
|
||||
|
||||
// Get the quota-based storage consumption for the path
|
||||
GetConsumption(path string) (*resource.Quantity, error)
|
||||
|
||||
// Get the quota-based inode consumption for the path
|
||||
GetInodes(path string) (*resource.Quantity, error)
|
||||
|
||||
// Remove the quota from a path
|
||||
// Implementations may assume that any data covered by the
|
||||
// quota has already been removed.
|
||||
ClearQuota(m mount.Interface, path string, poduid string) error
|
||||
}
|
||||
|
||||
func enabledQuotasForMonitoring() bool {
|
||||
return utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolationFSQuotaMonitoring)
|
||||
}
|
440
vendor/k8s.io/kubernetes/pkg/volume/util/quota/quota_linux.go
generated
vendored
Normal file
440
vendor/k8s.io/kubernetes/pkg/volume/util/quota/quota_linux.go
generated
vendored
Normal file
@ -0,0 +1,440 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
)
|
||||
|
||||
// Pod -> ID
|
||||
var podQuotaMap = make(map[string]common.QuotaID)
|
||||
|
||||
// Dir -> ID (for convenience)
|
||||
var dirQuotaMap = make(map[string]common.QuotaID)
|
||||
|
||||
// ID -> pod
|
||||
var quotaPodMap = make(map[common.QuotaID]string)
|
||||
|
||||
// Directory -> pod
|
||||
var dirPodMap = make(map[string]string)
|
||||
|
||||
// Backing device -> applier
|
||||
// This is *not* cleaned up; its size will be bounded.
|
||||
var devApplierMap = make(map[string]common.LinuxVolumeQuotaApplier)
|
||||
|
||||
// Directory -> applier
|
||||
var dirApplierMap = make(map[string]common.LinuxVolumeQuotaApplier)
|
||||
var dirApplierLock sync.RWMutex
|
||||
|
||||
// Pod -> refcount
|
||||
var podDirCountMap = make(map[string]int)
|
||||
|
||||
// ID -> size
|
||||
var quotaSizeMap = make(map[common.QuotaID]int64)
|
||||
var quotaLock sync.RWMutex
|
||||
|
||||
var supportsQuotasMap = make(map[string]bool)
|
||||
var supportsQuotasLock sync.RWMutex
|
||||
|
||||
// Directory -> backingDev
|
||||
var backingDevMap = make(map[string]string)
|
||||
var backingDevLock sync.RWMutex
|
||||
|
||||
var mountpointMap = make(map[string]string)
|
||||
var mountpointLock sync.RWMutex
|
||||
|
||||
var providers = []common.LinuxVolumeQuotaProvider{
|
||||
&common.VolumeProvider{},
|
||||
}
|
||||
|
||||
// Separate the innards for ease of testing
|
||||
func detectBackingDevInternal(mountpoint string, mounts string) (string, error) {
|
||||
file, err := os.Open(mounts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
match := common.MountParseRegexp.FindStringSubmatch(scanner.Text())
|
||||
if match != nil {
|
||||
device := match[1]
|
||||
mount := match[2]
|
||||
if mount == mountpoint {
|
||||
return device, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("couldn't find backing device for %s", mountpoint)
|
||||
}
|
||||
|
||||
// detectBackingDev assumes that the mount point provided is valid
|
||||
func detectBackingDev(_ mount.Interface, mountpoint string) (string, error) {
|
||||
return detectBackingDevInternal(mountpoint, common.MountsFile)
|
||||
}
|
||||
|
||||
func clearBackingDev(path string) {
|
||||
backingDevLock.Lock()
|
||||
defer backingDevLock.Unlock()
|
||||
delete(backingDevMap, path)
|
||||
}
|
||||
|
||||
// Assumes that the path has been fully canonicalized
|
||||
// Breaking this up helps with testing
|
||||
func detectMountpointInternal(m mount.Interface, path string) (string, error) {
|
||||
for path != "" && path != "/" {
|
||||
// per pkg/util/mount/mount_linux this detects all but
|
||||
// a bind mount from one part of a mount to another.
|
||||
// For our purposes that's fine; we simply want the "true"
|
||||
// mount point
|
||||
//
|
||||
// IsNotMountPoint proved much more troublesome; it actually
|
||||
// scans the mounts, and when a lot of mount/unmount
|
||||
// activity takes place, it is not able to get a consistent
|
||||
// view of /proc/self/mounts, causing it to time out and
|
||||
// report incorrectly.
|
||||
isNotMount, err := m.IsLikelyNotMountPoint(path)
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
if !isNotMount {
|
||||
return path, nil
|
||||
}
|
||||
path = filepath.Dir(path)
|
||||
}
|
||||
return "/", nil
|
||||
}
|
||||
|
||||
func detectMountpoint(m mount.Interface, path string) (string, error) {
|
||||
xpath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
xpath, err = filepath.EvalSymlinks(xpath)
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
if xpath, err = detectMountpointInternal(m, xpath); err == nil {
|
||||
return xpath, nil
|
||||
}
|
||||
return "/", err
|
||||
}
|
||||
|
||||
func clearMountpoint(path string) {
|
||||
mountpointLock.Lock()
|
||||
defer mountpointLock.Unlock()
|
||||
delete(mountpointMap, path)
|
||||
}
|
||||
|
||||
// getFSInfo Returns mountpoint and backing device
|
||||
// getFSInfo should cache the mountpoint and backing device for the
|
||||
// path.
|
||||
func getFSInfo(m mount.Interface, path string) (string, string, error) {
|
||||
mountpointLock.Lock()
|
||||
defer mountpointLock.Unlock()
|
||||
|
||||
backingDevLock.Lock()
|
||||
defer backingDevLock.Unlock()
|
||||
|
||||
var err error
|
||||
|
||||
mountpoint, okMountpoint := mountpointMap[path]
|
||||
if !okMountpoint {
|
||||
mountpoint, err = detectMountpoint(m, path)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Cannot determine mountpoint for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
backingDev, okBackingDev := backingDevMap[path]
|
||||
if !okBackingDev {
|
||||
backingDev, err = detectBackingDev(m, mountpoint)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Cannot determine backing device for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
mountpointMap[path] = mountpoint
|
||||
backingDevMap[path] = backingDev
|
||||
return mountpoint, backingDev, nil
|
||||
}
|
||||
|
||||
func clearFSInfo(path string) {
|
||||
clearMountpoint(path)
|
||||
clearBackingDev(path)
|
||||
}
|
||||
|
||||
func getApplier(path string) common.LinuxVolumeQuotaApplier {
|
||||
dirApplierLock.Lock()
|
||||
defer dirApplierLock.Unlock()
|
||||
return dirApplierMap[path]
|
||||
}
|
||||
|
||||
func setApplier(path string, applier common.LinuxVolumeQuotaApplier) {
|
||||
dirApplierLock.Lock()
|
||||
defer dirApplierLock.Unlock()
|
||||
dirApplierMap[path] = applier
|
||||
}
|
||||
|
||||
func clearApplier(path string) {
|
||||
dirApplierLock.Lock()
|
||||
defer dirApplierLock.Unlock()
|
||||
delete(dirApplierMap, path)
|
||||
}
|
||||
|
||||
func setQuotaOnDir(path string, id common.QuotaID, bytes int64) error {
|
||||
return getApplier(path).SetQuotaOnDir(path, id, bytes)
|
||||
}
|
||||
|
||||
func getQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
|
||||
_, _, err := getFSInfo(m, path)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, err
|
||||
}
|
||||
return getApplier(path).GetQuotaOnDir(path)
|
||||
}
|
||||
|
||||
func clearQuotaOnDir(m mount.Interface, path string) error {
|
||||
// Since we may be called without path being in the map,
|
||||
// we explicitly have to check in this case.
|
||||
klog.V(4).Infof("clearQuotaOnDir %s", path)
|
||||
supportsQuotas, err := SupportsQuotas(m, path)
|
||||
if !supportsQuotas {
|
||||
return nil
|
||||
}
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
if err == nil && projid != common.BadQuotaID {
|
||||
// This means that we have a quota on the directory but
|
||||
// we can't clear it. That's not good.
|
||||
err = setQuotaOnDir(path, projid, 0)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Attempt to clear quota failed: %v", err)
|
||||
}
|
||||
// Even if clearing the quota failed, we still need to
|
||||
// try to remove the project ID, or that may be left dangling.
|
||||
err1 := removeProjectID(path, projid)
|
||||
if err1 != nil {
|
||||
klog.V(3).Infof("Attempt to remove quota ID from system files failed: %v", err1)
|
||||
}
|
||||
clearFSInfo(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err1
|
||||
}
|
||||
// If we couldn't get a quota, that's fine -- there may
|
||||
// never have been one, and we have no way to know otherwise
|
||||
klog.V(3).Infof("clearQuotaOnDir fails %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsQuotas -- Does the path support quotas
|
||||
// Cache the applier for paths that support quotas. For paths that don't,
|
||||
// don't cache the result because nothing will clean it up.
|
||||
// However, do cache the device->applier map; the number of devices
|
||||
// is bounded.
|
||||
func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
if !enabledQuotasForMonitoring() {
|
||||
klog.V(3).Info("SupportsQuotas called, but quotas disabled")
|
||||
return false, nil
|
||||
}
|
||||
supportsQuotasLock.Lock()
|
||||
defer supportsQuotasLock.Unlock()
|
||||
if supportsQuotas, ok := supportsQuotasMap[path]; ok {
|
||||
return supportsQuotas, nil
|
||||
}
|
||||
mount, dev, err := getFSInfo(m, path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Do we know about this device?
|
||||
applier, ok := devApplierMap[mount]
|
||||
if !ok {
|
||||
for _, provider := range providers {
|
||||
if applier = provider.GetQuotaApplier(mount, dev); applier != nil {
|
||||
devApplierMap[mount] = applier
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if applier != nil {
|
||||
supportsQuotasMap[path] = true
|
||||
setApplier(path, applier)
|
||||
return true, nil
|
||||
}
|
||||
delete(backingDevMap, path)
|
||||
delete(mountpointMap, path)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// AssignQuota -- assign a quota to the specified directory.
|
||||
// AssignQuota chooses the quota ID based on the pod UID and path.
|
||||
// If the pod UID is identical to another one known, it may (but presently
|
||||
// doesn't) choose the same quota ID as other volumes in the pod.
|
||||
func AssignQuota(m mount.Interface, path string, poduid string, bytes *resource.Quantity) error {
|
||||
if bytes == nil {
|
||||
return fmt.Errorf("Attempting to assign null quota to %s", path)
|
||||
}
|
||||
ibytes := bytes.Value()
|
||||
if ok, err := SupportsQuotas(m, path); !ok {
|
||||
return fmt.Errorf("Quotas not supported on %s: %v", path, err)
|
||||
}
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
// Current policy is to set individual quotas on each volumes.
|
||||
// If we decide later that we want to assign one quota for all
|
||||
// volumes in a pod, we can simply remove this line of code.
|
||||
// If and when we decide permanently that we're going to adop
|
||||
// one quota per volume, we can rip all of the pod code out.
|
||||
poduid = string(uuid.NewUUID())
|
||||
if pod, ok := dirPodMap[path]; ok && pod != poduid {
|
||||
return fmt.Errorf("Requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
|
||||
}
|
||||
oid, ok := podQuotaMap[poduid]
|
||||
if ok {
|
||||
if quotaSizeMap[oid] != ibytes {
|
||||
return fmt.Errorf("Requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
|
||||
}
|
||||
} else {
|
||||
oid = common.BadQuotaID
|
||||
}
|
||||
id, err := createProjectID(path, oid)
|
||||
if err == nil {
|
||||
if oid != common.BadQuotaID && oid != id {
|
||||
return fmt.Errorf("Attempt to reassign quota %v to %v", oid, id)
|
||||
}
|
||||
// When enforcing quotas are enabled, we'll condition this
|
||||
// on their being disabled also.
|
||||
if ibytes > 0 {
|
||||
ibytes = -1
|
||||
}
|
||||
if err = setQuotaOnDir(path, id, ibytes); err == nil {
|
||||
quotaPodMap[id] = poduid
|
||||
quotaSizeMap[id] = ibytes
|
||||
podQuotaMap[poduid] = id
|
||||
dirQuotaMap[path] = id
|
||||
dirPodMap[path] = poduid
|
||||
podDirCountMap[poduid]++
|
||||
klog.V(4).Infof("Assigning quota ID %d (%d) to %s", id, ibytes, path)
|
||||
return nil
|
||||
}
|
||||
removeProjectID(path, id)
|
||||
}
|
||||
return fmt.Errorf("Assign quota FAILED %v", err)
|
||||
}
|
||||
|
||||
// GetConsumption -- retrieve the consumption (in bytes) of the directory
|
||||
func GetConsumption(path string) (*resource.Quantity, error) {
|
||||
// Note that we actually need to hold the lock at least through
|
||||
// running the quota command, so it can't get recycled behind our back
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
applier := getApplier(path)
|
||||
// No applier means directory is not under quota management
|
||||
if applier == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ibytes, err := applier.GetConsumption(path, dirQuotaMap[path])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewQuantity(ibytes, resource.DecimalSI), nil
|
||||
}
|
||||
|
||||
// GetInodes -- retrieve the number of inodes in use under the directory
|
||||
func GetInodes(path string) (*resource.Quantity, error) {
|
||||
// Note that we actually need to hold the lock at least through
|
||||
// running the quota command, so it can't get recycled behind our back
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
applier := getApplier(path)
|
||||
// No applier means directory is not under quota management
|
||||
if applier == nil {
|
||||
return nil, nil
|
||||
}
|
||||
inodes, err := applier.GetInodes(path, dirQuotaMap[path])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewQuantity(inodes, resource.DecimalSI), nil
|
||||
}
|
||||
|
||||
// ClearQuota -- remove the quota assigned to a directory
|
||||
func ClearQuota(m mount.Interface, path string) error {
|
||||
klog.V(3).Infof("ClearQuota %s", path)
|
||||
if !enabledQuotasForMonitoring() {
|
||||
return fmt.Errorf("ClearQuota called, but quotas disabled")
|
||||
}
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
poduid, ok := dirPodMap[path]
|
||||
if !ok {
|
||||
// Nothing in the map either means that there was no
|
||||
// quota to begin with or that we're clearing a
|
||||
// stale directory, so if we find a quota, just remove it.
|
||||
// The process of clearing the quota requires that an applier
|
||||
// be found, which needs to be cleaned up.
|
||||
defer delete(supportsQuotasMap, path)
|
||||
defer clearApplier(path)
|
||||
return clearQuotaOnDir(m, path)
|
||||
}
|
||||
_, ok = podQuotaMap[poduid]
|
||||
if !ok {
|
||||
return fmt.Errorf("ClearQuota: No quota available for %s", path)
|
||||
}
|
||||
var err error
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
if projid != dirQuotaMap[path] {
|
||||
return fmt.Errorf("Expected quota ID %v on dir %s does not match actual %v", dirQuotaMap[path], path, projid)
|
||||
}
|
||||
count, ok := podDirCountMap[poduid]
|
||||
if count <= 1 || !ok {
|
||||
err = clearQuotaOnDir(m, path)
|
||||
// This error should be noted; we still need to clean up
|
||||
// and otherwise handle in the same way.
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Unable to clear quota %v %s: %v", dirQuotaMap[path], path, err)
|
||||
}
|
||||
delete(quotaSizeMap, podQuotaMap[poduid])
|
||||
delete(quotaPodMap, podQuotaMap[poduid])
|
||||
delete(podDirCountMap, poduid)
|
||||
delete(podQuotaMap, poduid)
|
||||
} else {
|
||||
err = removeProjectID(path, projid)
|
||||
podDirCountMap[poduid]--
|
||||
klog.V(4).Infof("Not clearing quota for pod %s; still %v dirs outstanding", poduid, podDirCountMap[poduid])
|
||||
}
|
||||
delete(dirPodMap, path)
|
||||
delete(dirQuotaMap, path)
|
||||
delete(supportsQuotasMap, path)
|
||||
clearApplier(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to clear quota for %s: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
55
vendor/k8s.io/kubernetes/pkg/volume/util/quota/quota_unsupported.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/volume/util/quota/quota_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// Dummy quota implementation for systems that do not implement support
|
||||
// for volume quotas
|
||||
|
||||
var errNotImplemented = errors.New("not implemented")
|
||||
|
||||
// SupportsQuotas -- dummy implementation
|
||||
func SupportsQuotas(_ mount.Interface, _ string) (bool, error) {
|
||||
return false, errNotImplemented
|
||||
}
|
||||
|
||||
// AssignQuota -- dummy implementation
|
||||
func AssignQuota(_ mount.Interface, _ string, _ string, _ *resource.Quantity) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
|
||||
// GetConsumption -- dummy implementation
|
||||
func GetConsumption(_ string) (*resource.Quantity, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// GetInodes -- dummy implementation
|
||||
func GetInodes(_ string) (*resource.Quantity, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// ClearQuota -- dummy implementation
|
||||
func ClearQuota(_ mount.Interface, _ string) error {
|
||||
return errNotImplemented
|
||||
}
|
10
vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// RecycleEventRecorder is a func that defines how to record RecycleEvent.
|
||||
type RecycleEventRecorder func(eventtype, message string)
|
||||
|
||||
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
|
||||
@ -127,9 +128,8 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
if pod.Status.Message != "" {
|
||||
return fmt.Errorf(pod.Status.Message)
|
||||
} else {
|
||||
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
|
||||
}
|
||||
return fmt.Errorf("pod failed, pod.Status.Message unknown")
|
||||
}
|
||||
|
||||
case watch.Deleted:
|
||||
@ -238,9 +238,8 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
|
||||
case eventEvent, ok := <-eventWatch.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
} else {
|
||||
eventCh <- eventEvent
|
||||
}
|
||||
eventCh <- eventEvent
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -256,9 +255,8 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
|
||||
case podEvent, ok := <-podWatch.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
} else {
|
||||
eventCh <- podEvent
|
||||
}
|
||||
eventCh <- podEvent
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
157
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
157
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
@ -21,12 +21,17 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/resizefs"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -46,45 +51,175 @@ func ClaimToClaimKey(claim *v1.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
|
||||
}
|
||||
|
||||
// UpdatePVSize updates just pv size after cloudprovider resizing is successful
|
||||
func UpdatePVSize(
|
||||
pv *v1.PersistentVolume,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) error {
|
||||
pvClone := pv.DeepCopy()
|
||||
|
||||
oldData, err := json.Marshal(pvClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error marshaling old PV %q with error : %v", pvClone.Name, err)
|
||||
}
|
||||
|
||||
pvClone.Spec.Capacity[v1.ResourceStorage] = newSize
|
||||
|
||||
newData, err := json.Marshal(pvClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error marshaling new PV %q with error : %v", pvClone.Name, err)
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, pvClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err)
|
||||
}
|
||||
|
||||
_, err = kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, types.StrategicMergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error Patching PV %q with error : %v", pvClone.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkResizeInProgressWithResizer marks cloudprovider resizing as in progress
|
||||
// and also annotates the PVC with the name of the resizer.
|
||||
func MarkResizeInProgressWithResizer(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
resizerName string,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
// Mark PVC as Resize Started
|
||||
progressCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimResizing,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
}
|
||||
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
|
||||
newPVC = setResizer(newPVC, resizerName)
|
||||
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
}
|
||||
|
||||
// SetClaimResizer sets resizer annotation on PVC
|
||||
func SetClaimResizer(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
resizerName string,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = setResizer(newPVC, resizerName)
|
||||
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
}
|
||||
|
||||
func setResizer(pvc *v1.PersistentVolumeClaim, resizerName string) *v1.PersistentVolumeClaim {
|
||||
if val, ok := pvc.Annotations[volumetypes.VolumeResizerKey]; ok && val == resizerName {
|
||||
return pvc
|
||||
}
|
||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volumetypes.VolumeResizerKey, resizerName)
|
||||
return pvc
|
||||
}
|
||||
|
||||
// MarkForFSResize marks file system resizing as pending
|
||||
func MarkForFSResize(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
kubeClient clientset.Interface) error {
|
||||
pvcCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimFileSystemResizePending,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Message: "Waiting for user to (re-)start a pod to finish file system resize of volume on node.",
|
||||
}
|
||||
conditions := []v1.PersistentVolumeClaimCondition{pvcCondition}
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
|
||||
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarkResizeFinished marks all resizing as done
|
||||
func MarkResizeFinished(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) error {
|
||||
return MarkFSResizeFinished(pvc, newSize, kubeClient)
|
||||
}
|
||||
|
||||
// MarkFSResizeFinished marks file system resizing as done
|
||||
func MarkFSResizeFinished(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
capacity v1.ResourceList,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) error {
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Status.Capacity = capacity
|
||||
newPVC.Status.Capacity[v1.ResourceStorage] = newSize
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
|
||||
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return err
|
||||
}
|
||||
|
||||
// PatchPVCStatus updates PVC status using PATCH verb
|
||||
// Don't use Update because this can be called from kubelet and if kubelet has an older client its
|
||||
// Updates will overwrite new fields. And to avoid writing to a stale object, add ResourceVersion
|
||||
// to the patch so that Patch will fail if the patch's RV != actual up-to-date RV like Update would
|
||||
func PatchPVCStatus(
|
||||
oldPVC *v1.PersistentVolumeClaim,
|
||||
newPVC *v1.PersistentVolumeClaim,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
pvcName := oldPVC.Name
|
||||
patchBytes, err := createPVCPatch(oldPVC, newPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
|
||||
Patch(oldPVC.Name, types.StrategicMergePatchType, patchBytes, "status")
|
||||
if updateErr != nil {
|
||||
return nil, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
func createPVCPatch(
|
||||
oldPVC *v1.PersistentVolumeClaim,
|
||||
newPVC *v1.PersistentVolumeClaim) ([]byte, error) {
|
||||
oldData, err := json.Marshal(oldPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal oldData for pvc %q with %v", pvcName, err)
|
||||
return nil, fmt.Errorf("failed to marshal old data: %v", err)
|
||||
}
|
||||
|
||||
newData, err := json.Marshal(newPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal newData for pvc %q with %v", pvcName, err)
|
||||
return nil, fmt.Errorf("failed to marshal new data: %v", err)
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PatchPVCStatus.Failed to CreateTwoWayMergePatch for pvc %q with %v ", pvcName, err)
|
||||
return nil, fmt.Errorf("failed to create 2 way merge patch: %v", err)
|
||||
}
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
|
||||
Patch(pvcName, types.StrategicMergePatchType, patchBytes, "status")
|
||||
if updateErr != nil {
|
||||
return nil, fmt.Errorf("PatchPVCStatus.Failed to patch PVC %q with %v", pvcName, updateErr)
|
||||
|
||||
patchBytes, err = addResourceVersion(patchBytes, oldPVC.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add resource version: %v", err)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
|
||||
return patchBytes, nil
|
||||
}
|
||||
|
||||
func addResourceVersion(patchBytes []byte, resourceVersion string) ([]byte, error) {
|
||||
var patchMap map[string]interface{}
|
||||
err := json.Unmarshal(patchBytes, &patchMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling patch: %v", err)
|
||||
}
|
||||
u := unstructured.Unstructured{Object: patchMap}
|
||||
a, err := meta.Accessor(&u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating accessor: %v", err)
|
||||
}
|
||||
a.SetResourceVersion(resourceVersion)
|
||||
versionBytes, err := json.Marshal(patchMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling json patch: %v", err)
|
||||
}
|
||||
return versionBytes, nil
|
||||
}
|
||||
|
||||
// MergeResizeConditionOnPVC updates pvc with requested resize conditions
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
@ -101,7 +101,7 @@ func safeOpenSubPath(mounter mount.Interface, subpath Subpath) (int, error) {
|
||||
func prepareSubpathTarget(mounter mount.Interface, subpath Subpath) (bool, string, error) {
|
||||
// Early check for already bind-mounted subpath.
|
||||
bindPathTarget := getSubpathBindTarget(subpath)
|
||||
notMount, err := mounter.IsNotMountPoint(bindPathTarget)
|
||||
notMount, err := mount.IsNotMountPoint(mounter, bindPathTarget)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return false, "", fmt.Errorf("error checking path %s for mount: %s", bindPathTarget, err)
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go
generated
vendored
@ -17,7 +17,10 @@ limitations under the License.
|
||||
// Package types defines types used only by volume components
|
||||
package types
|
||||
|
||||
import "k8s.io/apimachinery/pkg/types"
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// UniquePodName defines the type to key pods off of
|
||||
type UniquePodName types.UID
|
||||
@ -28,7 +31,29 @@ type UniquePVCName types.UID
|
||||
// GeneratedOperations contains the operation that is created as well as
|
||||
// supporting functions required for the operation executor
|
||||
type GeneratedOperations struct {
|
||||
// Name of operation - could be used for resetting shared exponential backoff
|
||||
OperationName string
|
||||
OperationFunc func() (eventErr error, detailedErr error)
|
||||
EventRecorderFunc func(*error)
|
||||
CompleteFunc func(*error)
|
||||
}
|
||||
|
||||
// Run executes the operations and its supporting functions
|
||||
func (o *GeneratedOperations) Run() (eventErr, detailedErr error) {
|
||||
if o.CompleteFunc != nil {
|
||||
defer o.CompleteFunc(&detailedErr)
|
||||
}
|
||||
if o.EventRecorderFunc != nil {
|
||||
defer o.EventRecorderFunc(&eventErr)
|
||||
}
|
||||
// Handle panic, if any, from operationFunc()
|
||||
defer runtime.RecoverFromPanic(&detailedErr)
|
||||
return o.OperationFunc()
|
||||
}
|
||||
|
||||
const (
|
||||
// VolumeResizerKey is key that will be used to store resizer used
|
||||
// for resizing PVC. The generated key/value pair will be added
|
||||
// as a annotation to the PVC.
|
||||
VolumeResizerKey = "volume.kubernetes.io/storage-resizer"
|
||||
)
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -58,6 +57,10 @@ const (
|
||||
// that decides if pod volumes are unmounted when pod is terminated
|
||||
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
|
||||
|
||||
// MountsInGlobalPDPath is name of the directory appended to a volume plugin
|
||||
// name to create the place for volume mounts in the global PD path.
|
||||
MountsInGlobalPDPath = "mounts"
|
||||
|
||||
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
|
||||
// object that specifies a supplemental GID.
|
||||
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
|
||||
@ -71,7 +74,7 @@ const (
|
||||
// called 'ready' in the given directory and returns
|
||||
// true if that file exists.
|
||||
func IsReady(dir string) bool {
|
||||
readyFile := path.Join(dir, readyFileName)
|
||||
readyFile := filepath.Join(dir, readyFileName)
|
||||
s, err := os.Stat(readyFile)
|
||||
if err != nil {
|
||||
return false
|
||||
@ -94,7 +97,7 @@ func SetReady(dir string) {
|
||||
return
|
||||
}
|
||||
|
||||
readyFile := path.Join(dir, readyFileName)
|
||||
readyFile := filepath.Join(dir, readyFileName)
|
||||
file, err := os.Create(readyFile)
|
||||
if err != nil {
|
||||
klog.Errorf("Can't touch %s: %v", readyFile, err)
|
||||
@ -530,3 +533,18 @@ func MapBlockVolume(
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPluginMountDir returns the global mount directory name appended
|
||||
// to the given plugin name's plugin directory
|
||||
func GetPluginMountDir(host volume.VolumeHost, name string) string {
|
||||
mntDir := filepath.Join(host.GetPluginDir(name), MountsInGlobalPDPath)
|
||||
return mntDir
|
||||
}
|
||||
|
||||
// IsLocalEphemeralVolume determines whether the argument is a local ephemeral
|
||||
// volume vs. some other type
|
||||
func IsLocalEphemeralVolume(volume v1.Volume) bool {
|
||||
return volume.GitRepo != nil ||
|
||||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
|
||||
volume.ConfigMap != nil || volume.DownwardAPI != nil
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/klog"
|
||||
@ -102,7 +101,7 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName
|
||||
// Remove old symbolic link(or file) then create new one.
|
||||
// This should be done because current symbolic link is
|
||||
// stale across node reboot.
|
||||
linkPath := path.Join(mapPath, string(linkName))
|
||||
linkPath := filepath.Join(mapPath, string(linkName))
|
||||
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@ -119,7 +118,7 @@ func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error {
|
||||
klog.V(5).Infof("UnmapDevice: linkName %s", linkName)
|
||||
|
||||
// Check symbolic link exists
|
||||
linkPath := path.Join(mapPath, string(linkName))
|
||||
linkPath := filepath.Join(mapPath, string(linkName))
|
||||
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
|
||||
return checkErr
|
||||
} else if !islinkExist {
|
||||
@ -176,13 +175,13 @@ func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string)
|
||||
continue
|
||||
}
|
||||
filename := file.Name()
|
||||
filepath, err := os.Readlink(path.Join(mapPath, filename))
|
||||
fp, err := os.Readlink(filepath.Join(mapPath, filename))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err)
|
||||
}
|
||||
klog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
|
||||
if filepath == devPath {
|
||||
refs = append(refs, path.Join(mapPath, filename))
|
||||
klog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", fp, devPath)
|
||||
if fp == devPath {
|
||||
refs = append(refs, filepath.Join(mapPath, filename))
|
||||
}
|
||||
}
|
||||
klog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
@ -101,6 +101,13 @@ type Attributes struct {
|
||||
SupportsSELinux bool
|
||||
}
|
||||
|
||||
// MounterArgs provides more easily extensible arguments to Mounter
|
||||
type MounterArgs struct {
|
||||
FsGroup *int64
|
||||
DesiredSize *resource.Quantity
|
||||
PodUID string
|
||||
}
|
||||
|
||||
// Mounter interface provides methods to set up/mount the volume.
|
||||
type Mounter interface {
|
||||
// Uses Interface to provide the path for Docker binds.
|
||||
@ -122,14 +129,14 @@ type Mounter interface {
|
||||
// content should be owned by 'fsGroup' so that it can be
|
||||
// accessed by the pod. This may be called more than once, so
|
||||
// implementations must be idempotent.
|
||||
SetUp(fsGroup *int64) error
|
||||
SetUp(mounterArgs MounterArgs) error
|
||||
// SetUpAt prepares and mounts/unpacks the volume to the
|
||||
// specified directory path, which may or may not exist yet.
|
||||
// The mount point and its content should be owned by
|
||||
// 'fsGroup' so that it can be accessed by the pod. This may
|
||||
// be called more than once, so implementations must be
|
||||
// idempotent.
|
||||
SetUpAt(dir string, fsGroup *int64) error
|
||||
SetUpAt(dir string, mounterArgs MounterArgs) error
|
||||
// GetAttributes returns the attributes of the mounter.
|
||||
// This function is called after SetUp()/SetUpAt().
|
||||
GetAttributes() Attributes
|
||||
|
Reference in New Issue
Block a user