mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
Remove mount cache for cephfs
PR #282 introduces the mount cache to solve cephfs fuse mount issue when cephfs plugin pod restarts .This is not working as intended. This PR removes the code for maintainability. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
669dc4536f
commit
034b123478
@ -85,7 +85,6 @@ spec:
|
|||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--drivername=$(DRIVER_NAME)"
|
- "--drivername=$(DRIVER_NAME)"
|
||||||
- "--metadatastorage=k8s_configmap"
|
- "--metadatastorage=k8s_configmap"
|
||||||
- "--mountcachedir=/mount-cache-dir"
|
|
||||||
env:
|
env:
|
||||||
- name: POD_IP
|
- name: POD_IP
|
||||||
valueFrom:
|
valueFrom:
|
||||||
@ -109,8 +108,6 @@ spec:
|
|||||||
add: ["SYS_ADMIN"]
|
add: ["SYS_ADMIN"]
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: mount-cache-dir
|
|
||||||
mountPath: /mount-cache-dir
|
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: mountpoint-dir
|
- name: mountpoint-dir
|
||||||
@ -161,8 +158,6 @@ spec:
|
|||||||
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
volumes:
|
volumes:
|
||||||
- name: mount-cache-dir
|
|
||||||
emptyDir: {}
|
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: {{ .Values.socketDir }}
|
path: {{ .Values.socketDir }}
|
||||||
|
@ -60,6 +60,7 @@ func init() {
|
|||||||
flag.BoolVar(&conf.IsNodeServer, "nodeserver", false, "start cephcsi node server")
|
flag.BoolVar(&conf.IsNodeServer, "nodeserver", false, "start cephcsi node server")
|
||||||
|
|
||||||
// cephfs related flags
|
// cephfs related flags
|
||||||
|
// marking this as deprecated, remove it in next major release
|
||||||
flag.StringVar(&conf.MountCacheDir, "mountcachedir", "", "mount info cache save dir")
|
flag.StringVar(&conf.MountCacheDir, "mountcachedir", "", "mount info cache save dir")
|
||||||
flag.BoolVar(&conf.ForceKernelCephFS, "forcecephkernelclient", false, "enable Ceph Kernel clients on kernel < 4.17 which support quotas")
|
flag.BoolVar(&conf.ForceKernelCephFS, "forcecephkernelclient", false, "enable Ceph Kernel clients on kernel < 4.17 which support quotas")
|
||||||
|
|
||||||
@ -172,6 +173,9 @@ func main() {
|
|||||||
driver.Run(&conf, cp)
|
driver.Run(&conf, cp)
|
||||||
|
|
||||||
case cephfsType:
|
case cephfsType:
|
||||||
|
if conf.MountCacheDir != "" {
|
||||||
|
klog.Warning("mountcachedir option is deprecated")
|
||||||
|
}
|
||||||
driver := cephfs.NewDriver()
|
driver := cephfs.NewDriver()
|
||||||
driver.Run(&conf, cp)
|
driver.Run(&conf, cp)
|
||||||
|
|
||||||
|
@ -63,7 +63,6 @@ spec:
|
|||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--drivername=cephfs.csi.ceph.com"
|
- "--drivername=cephfs.csi.ceph.com"
|
||||||
- "--metadatastorage=k8s_configmap"
|
- "--metadatastorage=k8s_configmap"
|
||||||
- "--mountcachedir=/mount-cache-dir"
|
|
||||||
- "--metricsport=8090"
|
- "--metricsport=8090"
|
||||||
- "--metricspath=/metrics"
|
- "--metricspath=/metrics"
|
||||||
- "--enablegrpcmetrics=false"
|
- "--enablegrpcmetrics=false"
|
||||||
@ -84,8 +83,6 @@ spec:
|
|||||||
value: unix:///csi/csi.sock
|
value: unix:///csi/csi.sock
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: mount-cache-dir
|
|
||||||
mountPath: /mount-cache-dir
|
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: mountpoint-dir
|
- name: mountpoint-dir
|
||||||
@ -130,8 +127,6 @@ spec:
|
|||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
volumes:
|
volumes:
|
||||||
- name: mount-cache-dir
|
|
||||||
emptyDir: {}
|
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
|
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
|
||||||
|
@ -43,24 +43,23 @@ that should be resolved in v14.2.3.
|
|||||||
|
|
||||||
**Available command line arguments:**
|
**Available command line arguments:**
|
||||||
|
|
||||||
| Option | Default value | Description |
|
| Option | Default value | Description |
|
||||||
| -------------------------------- | --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------------------- | --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
|
| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
|
||||||
| `--drivername` | `cephfs.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
|
| `--drivername` | `cephfs.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
|
||||||
| `--nodeid` | _empty_ | This node's ID |
|
| `--nodeid` | _empty_ | This node's ID |
|
||||||
| `--type` | _empty_ | Driver type `[rbd | cephfs]` If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` |
|
| `--type` | _empty_ | Driver type `[rbd | cephfs]` If the driver type is set to `rbd` it will act as a `rbd plugin` or if it's set to `cephfs` will act as a `cephfs plugin` |
|
||||||
| `--mountcachedir` | _empty_ | Volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached. |
|
| `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning |
|
||||||
| `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning |
|
| `--pluginpath` | "/var/lib/kubelet/plugins/" | The location of cephcsi plugin on host |
|
||||||
| `--pluginpath` | "/var/lib/kubelet/plugins/" | The location of cephcsi plugin on host |
|
| `--metadatastorage` | _empty_ | Points to where older (1.0.0 or older plugin versions) metadata about provisioned volumes are kept, as file or in as k8s configmap (`node` or `k8s_configmap` respectively) |
|
||||||
| `--metadatastorage` | _empty_ | Points to where older (1.0.0 or older plugin versions) metadata about provisioned volumes are kept, as file or in as k8s configmap (`node` or `k8s_configmap` respectively) |
|
| `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. |
|
||||||
| `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. |
|
| `--metricsport` | `8080` | TCP port for /grpc metrics requests |
|
||||||
| `--metricsport` | `8080` | TCP port for /grpc metrics requests |
|
| `--metricspath` | `/metrics` | Path of prometheus endpoint where metrics will be available |
|
||||||
| `--metricspath` | `/metrics` | Path of prometheus endpoint where metrics will be available |
|
| `--enablegrpcmetrics` | `false` | Enable grpc metrics collection and start prometheus server |
|
||||||
| `--enablegrpcmetrics` | `false` | Enable grpc metrics collection and start prometheus server |
|
| `--polltime` | `60s` | Time interval in between each poll |
|
||||||
| `--polltime` | `60s` | Time interval in between each poll |
|
| `--timeout` | `3s` | Probe timeout in seconds |
|
||||||
| `--timeout` | `3s` | Probe timeout in seconds |
|
| `--histogramoption` | `0.5,2,6` | Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
|
||||||
| `--histogramoption` | `0.5,2,6` | Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
|
| `--forcecephkernelclient` | `false` | Force enabling Ceph Kernel clients for mounting on kernels < 4.17 |
|
||||||
| `--forcecephkernelclient` | `false` | Force enabling Ceph Kernel clients for mounting on kernels < 4.17 |
|
|
||||||
|
|
||||||
**NOTE:** The parameter `-forcecephkernelclient` enables the Kernel
|
**NOTE:** The parameter `-forcecephkernelclient` enables the Kernel
|
||||||
CephFS mounter on kernels < 4.17.
|
CephFS mounter on kernels < 4.17.
|
||||||
|
@ -115,14 +115,6 @@ func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
|
|||||||
// Update namespace for storing keys into a specific namespace on RADOS, in the CephFS
|
// Update namespace for storing keys into a specific namespace on RADOS, in the CephFS
|
||||||
// metadata pool
|
// metadata pool
|
||||||
volJournal.SetNamespace(radosNamespace)
|
volJournal.SetNamespace(radosNamespace)
|
||||||
|
|
||||||
initVolumeMountCache(conf.DriverName, conf.MountCacheDir)
|
|
||||||
if conf.MountCacheDir != "" {
|
|
||||||
if err := remountCachedVolumes(); err != nil {
|
|
||||||
klog.Warningf("failed to remount cached volumes: %v", err)
|
|
||||||
// ignore remount fail
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Initialize default library driver
|
// Initialize default library driver
|
||||||
|
|
||||||
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
|
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
|
||||||
|
@ -1,330 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2019 The Ceph-CSI Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cephfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ceph/ceph-csi/pkg/util"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"k8s.io/klog"
|
|
||||||
)
|
|
||||||
|
|
||||||
type volumeMountCacheEntry struct {
|
|
||||||
DriverVersion string `json:"driverVersion"`
|
|
||||||
|
|
||||||
VolumeID string `json:"volumeID"`
|
|
||||||
Mounter string `json:"mounter"`
|
|
||||||
Secrets map[string]string `json:"secrets"`
|
|
||||||
StagingPath string `json:"stagingPath"`
|
|
||||||
TargetPaths map[string]bool `json:"targetPaths"`
|
|
||||||
CreateTime time.Time `json:"createTime"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type volumeMountCacheMap struct {
|
|
||||||
volumes map[string]volumeMountCacheEntry
|
|
||||||
nodeCacheStore util.NodeCache
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
volumeMountCachePrefix = "cephfs-mount-cache-"
|
|
||||||
volumeMountCache volumeMountCacheMap
|
|
||||||
volumeMountCacheMtx sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func initVolumeMountCache(driverName, mountCacheDir string) {
|
|
||||||
volumeMountCache.volumes = make(map[string]volumeMountCacheEntry)
|
|
||||||
|
|
||||||
volumeMountCache.nodeCacheStore.BasePath = mountCacheDir
|
|
||||||
volumeMountCache.nodeCacheStore.CacheDir = driverName
|
|
||||||
klog.Infof("mount-cache: name: %s, version: %s, mountCacheDir: %s", driverName, util.DriverVersion, mountCacheDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
func remountCachedVolumes() error {
|
|
||||||
if err := util.CreateMountPoint(volumeMountCache.nodeCacheStore.BasePath); err != nil {
|
|
||||||
klog.Errorf("mount-cache: failed to create %s: %v", volumeMountCache.nodeCacheStore.BasePath, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var remountFailCount, remountSuccCount int64
|
|
||||||
me := &volumeMountCacheEntry{}
|
|
||||||
err := volumeMountCache.nodeCacheStore.ForAll(volumeMountCachePrefix, me, func(identifier string) error {
|
|
||||||
volID := me.VolumeID
|
|
||||||
if volOpts, vid, err := newVolumeOptionsFromVolID(context.TODO(), me.VolumeID, nil, decodeCredentials(me.Secrets)); err != nil {
|
|
||||||
if err, ok := err.(util.ErrKeyNotFound); ok {
|
|
||||||
klog.Infof("mount-cache: image key not found, assuming the volume %s to be already deleted (%v)", volID, err)
|
|
||||||
if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil {
|
|
||||||
klog.Infof("mount-cache: metadata not found, delete volume cache entry for volume %s", volID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// update Mounter from mount cache
|
|
||||||
volOpts.Mounter = me.Mounter
|
|
||||||
if err := mountOneCacheEntry(volOpts, vid, me); err == nil {
|
|
||||||
remountSuccCount++
|
|
||||||
volumeMountCache.volumes[me.VolumeID] = *me
|
|
||||||
klog.Infof("mount-cache: successfully remounted volume %s", volID)
|
|
||||||
} else {
|
|
||||||
remountFailCount++
|
|
||||||
klog.Errorf("mount-cache: failed to remount volume %s", volID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
klog.Infof("mount-cache: metastore list cache fail %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if remountFailCount > 0 {
|
|
||||||
klog.Infof("mount-cache: successfully remounted %d volumes, failed to remount %d volumes", remountSuccCount, remountFailCount)
|
|
||||||
} else {
|
|
||||||
klog.Infof("mount-cache: successfully remounted %d volumes", remountSuccCount)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *volumeMountCacheEntry) error {
|
|
||||||
volumeMountCacheMtx.Lock()
|
|
||||||
defer volumeMountCacheMtx.Unlock()
|
|
||||||
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
cr *util.Credentials
|
|
||||||
)
|
|
||||||
volID := vid.VolumeID
|
|
||||||
|
|
||||||
if volOptions.ProvisionVolume {
|
|
||||||
cr, err = util.NewAdminCredentials(decodeCredentials(me.Secrets))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer cr.DeleteCredentials()
|
|
||||||
|
|
||||||
volOptions.RootPath, err = getVolumeRootPathCeph(context.TODO(), volOptions, cr, volumeID(vid.FsSubvolName))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
cr, err = util.NewUserCredentials(decodeCredentials(me.Secrets))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer cr.DeleteCredentials()
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cleanupMountPoint(me.StagingPath)
|
|
||||||
if err != nil {
|
|
||||||
klog.Infof("mount-cache: failed to cleanup volume mount point %s, remove it: %s %v", volID, me.StagingPath, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
isMnt, err := util.IsMountPoint(me.StagingPath)
|
|
||||||
if err != nil {
|
|
||||||
isMnt = false
|
|
||||||
klog.Infof("mount-cache: failed to check volume mounted %s: %s %v", volID, me.StagingPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isMnt {
|
|
||||||
m, err := newMounter(volOptions)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("mount-cache: failed to create mounter for volume %s: %v", volID, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := m.mount(context.TODO(), me.StagingPath, cr, volOptions); err != nil {
|
|
||||||
klog.Errorf("mount-cache: failed to mount volume %s: %v", volID, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mountOptions := []string{"bind"}
|
|
||||||
for targetPath, readOnly := range me.TargetPaths {
|
|
||||||
if err := cleanupMountPoint(targetPath); err == nil {
|
|
||||||
if err := bindMount(context.TODO(), me.StagingPath, targetPath, readOnly, mountOptions); err != nil {
|
|
||||||
klog.Errorf("mount-cache: failed to bind-mount volume %s: %s %s %v %v",
|
|
||||||
volID, me.StagingPath, targetPath, readOnly, err)
|
|
||||||
} else {
|
|
||||||
klog.Infof("mount-cache: successfully bind-mounted volume %s: %s %s %v",
|
|
||||||
volID, me.StagingPath, targetPath, readOnly)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanupMountPoint(mountPoint string) error {
|
|
||||||
if _, err := os.Stat(mountPoint); err != nil {
|
|
||||||
if isCorruptedMnt(err) {
|
|
||||||
klog.Infof("mount-cache: corrupted mount point %s, need unmount", mountPoint)
|
|
||||||
err := execCommandErr(context.TODO(), "umount", mountPoint)
|
|
||||||
if err != nil {
|
|
||||||
klog.Infof("mount-cache: failed to umount %s %v", mountPoint, err)
|
|
||||||
// ignore error return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(mountPoint); err != nil {
|
|
||||||
klog.Errorf("mount-cache: failed to stat mount point %s %v", mountPoint, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCorruptedMnt(err error) bool {
|
|
||||||
var underlyingError error
|
|
||||||
switch pe := err.(type) {
|
|
||||||
case nil:
|
|
||||||
return false
|
|
||||||
case *os.PathError:
|
|
||||||
underlyingError = pe.Err
|
|
||||||
case *os.LinkError:
|
|
||||||
underlyingError = pe.Err
|
|
||||||
case *os.SyscallError:
|
|
||||||
underlyingError = pe.Err
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
CorruptedErrors := []error{
|
|
||||||
syscall.ENOTCONN, syscall.ESTALE, syscall.EIO, syscall.EACCES}
|
|
||||||
|
|
||||||
for _, v := range CorruptedErrors {
|
|
||||||
if underlyingError == v {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func genVolumeMountCacheFileName(volID string) string {
|
|
||||||
cachePath := volumeMountCachePrefix + volID
|
|
||||||
return cachePath
|
|
||||||
}
|
|
||||||
func (mc *volumeMountCacheMap) isEnable() bool {
|
|
||||||
// if mount cache dir unset, disable state
|
|
||||||
return mc.nodeCacheStore.BasePath != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *volumeMountCacheMap) nodeStageVolume(ctx context.Context, volID, stagingTargetPath, mounter string, secrets map[string]string) error {
|
|
||||||
if !mc.isEnable() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
volumeMountCacheMtx.Lock()
|
|
||||||
defer volumeMountCacheMtx.Unlock()
|
|
||||||
|
|
||||||
lastTargetPaths := make(map[string]bool)
|
|
||||||
me, ok := volumeMountCache.volumes[volID]
|
|
||||||
if ok {
|
|
||||||
if me.StagingPath == stagingTargetPath {
|
|
||||||
klog.Warningf(util.Log(ctx, "mount-cache: node unexpected restage volume for volume %s"), volID)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
lastTargetPaths = me.TargetPaths
|
|
||||||
klog.Warningf(util.Log(ctx, "mount-cache: node stage volume ignore last cache entry for volume %s"), volID)
|
|
||||||
}
|
|
||||||
|
|
||||||
me = volumeMountCacheEntry{DriverVersion: util.DriverVersion}
|
|
||||||
|
|
||||||
me.VolumeID = volID
|
|
||||||
me.Secrets = encodeCredentials(secrets)
|
|
||||||
me.StagingPath = stagingTargetPath
|
|
||||||
me.TargetPaths = lastTargetPaths
|
|
||||||
me.Mounter = mounter
|
|
||||||
|
|
||||||
me.CreateTime = time.Now()
|
|
||||||
volumeMountCache.volumes[volID] = me
|
|
||||||
return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string) error {
|
|
||||||
if !mc.isEnable() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
volumeMountCacheMtx.Lock()
|
|
||||||
defer volumeMountCacheMtx.Unlock()
|
|
||||||
delete(volumeMountCache.volumes, volID)
|
|
||||||
return mc.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *volumeMountCacheMap) nodePublishVolume(ctx context.Context, volID, targetPath string, readOnly bool) error {
|
|
||||||
if !mc.isEnable() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
volumeMountCacheMtx.Lock()
|
|
||||||
defer volumeMountCacheMtx.Unlock()
|
|
||||||
|
|
||||||
_, ok := volumeMountCache.volumes[volID]
|
|
||||||
if !ok {
|
|
||||||
return errors.New("mount-cache: node publish volume failed to find cache entry for volume")
|
|
||||||
}
|
|
||||||
volumeMountCache.volumes[volID].TargetPaths[targetPath] = readOnly
|
|
||||||
return mc.updateNodeCache(ctx, volID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *volumeMountCacheMap) nodeUnPublishVolume(ctx context.Context, volID, targetPath string) error {
|
|
||||||
if !mc.isEnable() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
volumeMountCacheMtx.Lock()
|
|
||||||
defer volumeMountCacheMtx.Unlock()
|
|
||||||
|
|
||||||
_, ok := volumeMountCache.volumes[volID]
|
|
||||||
if !ok {
|
|
||||||
return errors.New("mount-cache: node unpublish volume failed to find cache entry for volume")
|
|
||||||
}
|
|
||||||
delete(volumeMountCache.volumes[volID].TargetPaths, targetPath)
|
|
||||||
return mc.updateNodeCache(ctx, volID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *volumeMountCacheMap) updateNodeCache(ctx context.Context, volID string) error {
|
|
||||||
me := volumeMountCache.volumes[volID]
|
|
||||||
if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil {
|
|
||||||
klog.Infof(util.Log(ctx, "mount-cache: metadata not found, delete mount cache failed for volume %s"), volID)
|
|
||||||
}
|
|
||||||
return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me)
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeCredentials(input map[string]string) (output map[string]string) {
|
|
||||||
output = make(map[string]string)
|
|
||||||
for key, value := range input {
|
|
||||||
nKey := base64.StdEncoding.EncodeToString([]byte(key))
|
|
||||||
nValue := base64.StdEncoding.EncodeToString([]byte(value))
|
|
||||||
output[nKey] = nValue
|
|
||||||
}
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeCredentials(input map[string]string) (output map[string]string) {
|
|
||||||
output = make(map[string]string)
|
|
||||||
for key, value := range input {
|
|
||||||
nKey, err := base64.StdEncoding.DecodeString(key)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("mount-cache: decode secret fail")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
nValue, err := base64.StdEncoding.DecodeString(value)
|
|
||||||
if err != nil {
|
|
||||||
klog.Errorf("mount-cache: decode secret fail")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
output[string(nKey)] = string(nValue)
|
|
||||||
}
|
|
||||||
return output
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
package cephfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMountOneCacheEntry(t *testing.T) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemountHisMountedPath(t *testing.T) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNodeStageVolume(t *testing.T) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNodeUnStageVolume(t *testing.T) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNodePublishVolume(t *testing.T) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNodeUnpublishVolume(t *testing.T) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncodeDecodeCredentials(t *testing.T) {
|
|
||||||
secrets := make(map[string]string)
|
|
||||||
secrets["user_1"] = "value_1"
|
|
||||||
enSecrets := encodeCredentials(secrets)
|
|
||||||
deSecrets := decodeCredentials(enSecrets)
|
|
||||||
for key, value := range secrets {
|
|
||||||
if deSecrets[key] != value {
|
|
||||||
t.Errorf("key %s of credentials's value %s change after decode %s ", key, value, deSecrets[key])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -154,9 +154,6 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
|||||||
klog.Errorf(util.Log(ctx, "failed to mount volume %s: %v"), volID, err)
|
klog.Errorf(util.Log(ctx, "failed to mount volume %s: %v"), volID, err)
|
||||||
return status.Error(codes.Internal, err.Error())
|
return status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
if err := volumeMountCache.nodeStageVolume(ctx, req.GetVolumeId(), stagingTargetPath, volOptions.Mounter, req.GetSecrets()); err != nil {
|
|
||||||
klog.Warningf(util.Log(ctx, "mount-cache: failed to stage volume %s %s: %v"), volID, stagingTargetPath, err)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,10 +206,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = volumeMountCache.nodePublishVolume(ctx, volID, targetPath, req.GetReadonly()); err != nil {
|
|
||||||
klog.Warningf(util.Log(ctx, "mount-cache: failed to publish volume %s %s: %v"), volID, targetPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Infof(util.Log(ctx, "cephfs: successfully bind-mounted volume %s to %s"), volID, targetPath)
|
klog.Infof(util.Log(ctx, "cephfs: successfully bind-mounted volume %s to %s"), volID, targetPath)
|
||||||
|
|
||||||
// #nosec - allow anyone to write inside the target path
|
// #nosec - allow anyone to write inside the target path
|
||||||
@ -241,10 +234,6 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
|||||||
}
|
}
|
||||||
defer ns.VolumeLocks.Release(volID)
|
defer ns.VolumeLocks.Release(volID)
|
||||||
|
|
||||||
if err = volumeMountCache.nodeUnPublishVolume(ctx, volID, targetPath); err != nil {
|
|
||||||
klog.Warningf(util.Log(ctx, "mount-cache: failed to unpublish volume %s %s: %v"), volID, targetPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmount the bind-mount
|
// Unmount the bind-mount
|
||||||
if err = unmountVolume(ctx, targetPath); err != nil {
|
if err = unmountVolume(ctx, targetPath); err != nil {
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
@ -275,11 +264,6 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
|||||||
defer ns.VolumeLocks.Release(volID)
|
defer ns.VolumeLocks.Release(volID)
|
||||||
|
|
||||||
stagingTargetPath := req.GetStagingTargetPath()
|
stagingTargetPath := req.GetStagingTargetPath()
|
||||||
|
|
||||||
if err = volumeMountCache.nodeUnStageVolume(volID); err != nil {
|
|
||||||
klog.Warningf(util.Log(ctx, "mount-cache: failed to unstage volume %s %s: %v"), volID, stagingTargetPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmount the volume
|
// Unmount the volume
|
||||||
if err = unmountVolume(ctx, stagingTargetPath); err != nil {
|
if err = unmountVolume(ctx, stagingTargetPath); err != nil {
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
|
Loading…
Reference in New Issue
Block a user