mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 16:20:28 +00:00
b318964af5
issue #217 Goal we try to solve when csi exit unexpect, the pod use cephfs pv can not auto recovery because lost mount relation until pod be killed and reschedule to other node. i think this is may be a problem. may be csi plugin can do more thing to remount the old path so when pod may be auto recovery when pod exit and restart, the old mount path can use. NoGoal Pod should exit and restart when csi plugin pod exit and mount point lost. if pod not exit will get error of **transport endpoint is not connected**. implment logic csi-plugin start: 1. load all MountCachEntry from node local dir 2. check if volID exist in cluster, if no we ignore this entry, if yes continue 3. check if stagingPath exist, if yes we mount the path 4. check if all targetPath exist, if yes we binmount to staging path NodeServer: 1. NodeStageVolume: add MountCachEntry on local dir include readonly attr and ceph secret 2. NodeStagePublishVolume: add pod bind mount path to MountCachEntry and persist local dir 3. NodeStageunPublishVolume: remove pod bind mount path From MountCachEntry and persist local dir 4. NodeStageunStageVolume: remove MountCachEntry from local dir
64 lines
1.9 KiB
Go
64 lines
1.9 KiB
Go
/*
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package util
|
|
|
|
import (
|
|
"errors"
|
|
|
|
"k8s.io/klog"
|
|
)
|
|
|
|
const (
|
|
// PluginFolder defines location of plugins
|
|
PluginFolder = "/var/lib/kubelet/plugins"
|
|
)
|
|
|
|
// ForAllFunc is a unary predicate for visiting all cache entries
|
|
// matching the `pattern' in CachePersister's ForAll function.
|
|
type ForAllFunc func(identifier string) error
|
|
|
|
// CacheEntryNotFound is an error type for "Not Found" cache errors
|
|
type CacheEntryNotFound struct {
|
|
error
|
|
}
|
|
|
|
// CachePersister interface implemented for store
|
|
type CachePersister interface {
|
|
Create(identifier string, data interface{}) error
|
|
Get(identifier string, data interface{}) error
|
|
ForAll(pattern string, destObj interface{}, f ForAllFunc) error
|
|
Delete(identifier string) error
|
|
}
|
|
|
|
// NewCachePersister returns CachePersister based on store
|
|
func NewCachePersister(metadataStore, driverName string) (CachePersister, error) {
|
|
if metadataStore == "k8s_configmap" {
|
|
klog.Infof("cache-perister: using kubernetes configmap as metadata cache persister")
|
|
k8scm := &K8sCMCache{}
|
|
k8scm.Client = NewK8sClient()
|
|
k8scm.Namespace = GetK8sNamespace()
|
|
return k8scm, nil
|
|
} else if metadataStore == "node" {
|
|
klog.Infof("cache-persister: using node as metadata cache persister")
|
|
nc := &NodeCache{}
|
|
nc.BasePath = PluginFolder + "/" + driverName
|
|
nc.CacheDir = "controller"
|
|
return nc, nil
|
|
}
|
|
return nil, errors.New("cache-persister: couldn't parse metadatastorage flag")
|
|
}
|