vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -40,7 +40,7 @@ $ sudo ./_output/cinderplugin --endpoint tcp://127.0.0.1:10000 --cloud-config /e
```
### Test using csc
Get ```csc``` tool from https://github.com/thecodeteam/gocsi/tree/master/csc
Get ```csc``` tool from https://github.com/rexray/gocsi/tree/master/csc
#### Get plugin info
```
@ -48,12 +48,6 @@ $ csc identity plugin-info --endpoint tcp://127.0.0.1:10000
"csi-cinderplugin" "0.1.0"
```
#### Get supported versions
```
$ csc identity supported-versions --endpoint tcp://127.0.0.1:10000
0.1.0
```
#### Create a volume
```
$ csc controller new --endpoint tcp://127.0.0.1:10000 CSIVolumeName

View File

@ -30,7 +30,7 @@ spec:
serviceAccount: csi-attacher
containers:
- name: csi-attacher
image: docker.io/k8scsi/csi-attacher
image: quay.io/k8scsi/csi-attacher:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
@ -42,7 +42,7 @@ spec:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: cinder
image: docker.io/k8scsi/cinderplugin
image: quay.io/k8scsi/cinderplugin
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
@ -53,13 +53,13 @@ spec:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /plugin
mountPath: /csi
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true

View File

@ -18,27 +18,27 @@ spec:
hostNetwork: true
containers:
- name: driver-registrar
image: docker.io/k8scsi/driver-registrar
image: quay.io/k8scsi/driver-registrar:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /plugin/csi.sock
value: /csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: socket-dir
mountPath: /csi
- name: cinder
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: docker.io/k8scsi/cinderplugin
image: quay.io/k8scsi/cinderplugin
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
@ -49,13 +49,13 @@ spec:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
@ -69,7 +69,7 @@ spec:
mountPath: /etc/config
readOnly: true
volumes:
- name: plugin-dir
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-cinderplugin
type: DirectoryOrCreate

View File

@ -30,7 +30,7 @@ spec:
serviceAccount: csi-provisioner
containers:
- name: csi-provisioner
image: docker.io/k8scsi/csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v0.3.0
args:
- "--provisioner=csi-cinderplugin"
- "--csi-address=$(ADDRESS)"
@ -42,7 +42,7 @@ spec:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: cinder
image: docker.io/k8scsi/cinderplugin
image: quay.io/k8scsi/cinderplugin
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
@ -53,13 +53,13 @@ spec:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /plugin
mountPath: /csi
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true

View File

@ -42,7 +42,7 @@ const (
)
var (
version = "0.2.0"
version = "0.3.0"
)
func NewDriver(nodeID, endpoint string, cloudconfig string) *driver {

View File

@ -33,16 +33,8 @@ type nodeServer struct {
func (ns *nodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRequest) (*csi.NodeGetIdResponse, error) {
// Get Mount Provider
m, err := mount.GetMountProvider()
nodeID, err := getNodeID()
if err != nil {
glog.V(3).Infof("Failed to GetMountProvider: %v", err)
return nil, err
}
nodeID, err := m.GetInstanceID()
if err != nil {
glog.V(3).Infof("Failed to GetInstanceID: %v", err)
return nil, err
}
@ -56,6 +48,41 @@ func (ns *nodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRequest)
return ns.DefaultNodeServer.NodeGetId(ctx, req)
}
func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
nodeID, err := getNodeID()
if err != nil {
return nil, err
}
if len(nodeID) > 0 {
return &csi.NodeGetInfoResponse{
NodeId: nodeID,
}, nil
}
// Using default function
return ns.DefaultNodeServer.NodeGetInfo(ctx, req)
}
func getNodeID() (string, error) {
// Get Mount Provider
m, err := mount.GetMountProvider()
if err != nil {
glog.V(3).Infof("Failed to GetMountProvider: %v", err)
return "", err
}
nodeID, err := m.GetInstanceID()
if err != nil {
glog.V(3).Infof("Failed to GetInstanceID: %v", err)
return "", err
}
return nodeID, nil
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
targetPath := req.GetTargetPath()

View File

@ -65,6 +65,36 @@ func TestNodeGetId(t *testing.T) {
assert.Equal(expectedRes, actualRes)
}
// Test NodeGetInfo
func TestNodeGetInfo(t *testing.T) {
// mock MountMock
mmock := new(mount.MountMock)
// GetInstanceID() (string, error)
mmock.On("GetInstanceID").Return(fakeNodeID, nil)
mount.MInstance = mmock
// Init assert
assert := assert.New(t)
// Expected Result
expectedRes := &csi.NodeGetInfoResponse{
NodeId: fakeNodeID,
}
// Fake request
fakeReq := &csi.NodeGetInfoRequest{}
// Invoke NodeGetId
actualRes, err := fakeNs.NodeGetInfo(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to NodeGetInfo: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}
// Test NodePublishVolume
func TestNodePublishVolume(t *testing.T) {

View File

@ -85,3 +85,15 @@ func (cs *DefaultControllerServer) ControllerGetCapabilities(ctx context.Context
Capabilities: cs.Driver.cap,
}, nil
}
func (cs *DefaultControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (cs *DefaultControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (cs *DefaultControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

View File

@ -31,7 +31,7 @@ const (
)
var (
vendorVersion = "0.2.0"
vendorVersion = "0.3.0"
)
func NewFakeDriver() *CSIDriver {

View File

@ -29,7 +29,7 @@ type DefaultIdentityServer struct {
}
func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
glog.V(5).Infof("Using default GetPluginInnfo")
glog.V(5).Infof("Using default GetPluginInfo")
if ids.Driver.name == "" {
return nil, status.Error(codes.Unavailable, "Driver name not configured")
@ -56,7 +56,7 @@ func (ids *DefaultIdentityServer) GetPluginCapabilities(ctx context.Context, req
{
Type: &csi.PluginCapability_Service_{
Service: &csi.PluginCapability_Service{
Type: csi.PluginCapability_Service_UNKNOWN,
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE,
},
},
},

View File

@ -44,6 +44,14 @@ func (ns *DefaultNodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRe
}, nil
}
func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
glog.V(5).Infof("Using default NodeGetInfo")
return &csi.NodeGetInfoResponse{
NodeId: ns.Driver.nodeID,
}, nil
}
func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
glog.V(5).Infof("Using default NodeGetCapabilities")

View File

@ -38,6 +38,18 @@ func TestNodeGetId(t *testing.T) {
assert.Equal(t, resp.GetNodeId(), fakeNodeID)
}
func TestNodeGetInfo(t *testing.T) {
d := NewFakeDriver()
ns := NewDefaultNodeServer(d)
// Test valid request
req := csi.NodeGetInfoRequest{}
resp, err := ns.NodeGetInfo(context.Background(), &req)
assert.NoError(t, err)
assert.Equal(t, resp.GetNodeId(), fakeNodeID)
}
func TestNodeGetCapabilities(t *testing.T) {
d := NewFakeDriver()

View File

@ -8,7 +8,7 @@ $ sudo ./_output/flexadapter --endpoint tcp://127.0.0.1:10000 --drivername simpl
```
### Test using csc
Get ```csc``` tool from https://github.com/thecodeteam/gocsi/tree/master/csc
Get ```csc``` tool from https://github.com/rexray/gocsi/tree/master/csc
#### Get plugin info
```
@ -16,12 +16,6 @@ $ csc identity plugin-info --endpoint tcp://127.0.0.1:10000
"simplenfs" "0.1.0"
```
### Get supported versions
```
$ csc identity supported-versions --endpoint tcp://127.0.0.1:10000
0.1.0
```
#### NodePublish a volume
```
$ csc node publish --endpoint tcp://127.0.0.1:10000 --target-path /mnt/nfs --attrib server=a.b.c.d --attrib share=nfs_share nfstestvol

View File

@ -72,7 +72,7 @@ spec:
serviceAccount: csi-attacher
containers:
- name: csi-attacher
image: docker.io/k8scsi/csi-attacher
image: quay.io/k8scsi/csi-attacher:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"

View File

@ -15,7 +15,7 @@ spec:
hostNetwork: true
containers:
- name: driver-registrar
image: docker.io/k8scsi/driver-registrar
image: quay.io/k8scsi/driver-registrar:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"

View File

@ -38,7 +38,7 @@ type flexAdapter struct {
}
var (
version = "0.2.0"
version = "0.3.0"
)
func New() *flexAdapter {

View File

@ -13,7 +13,7 @@ $ sudo ./_output/hostpathplugin --endpoint tcp://127.0.0.1:10000 --nodeid CSINod
```
### Test using csc
Get ```csc``` tool from https://github.com/thecodeteam/gocsi/tree/master/csc
Get ```csc``` tool from https://github.com/rexray/gocsi/tree/master/csc
#### Get plugin info
```
@ -21,12 +21,6 @@ $ csc identity plugin-info --endpoint tcp://127.0.0.1:10000
"csi-hostpath" "0.1.0"
```
#### Get supported versions
```
$ csc identity supported-versions --endpoint tcp://127.0.0.1:10000
0.1.0
```
#### Create a volume
```
$ csc controller new --endpoint tcp://127.0.0.1:10000 --cap 1,block CSIVolumeName

View File

@ -17,6 +17,7 @@ limitations under the License.
package hostpath
import (
"fmt"
"os"
"github.com/golang/glog"
@ -30,8 +31,9 @@ import (
)
const (
deviceID = "deviceID"
provisionRoot = "/tmp/"
deviceID = "deviceID"
provisionRoot = "/tmp/"
maxStorageCapacity = tib
)
type controllerServer struct {
@ -39,6 +41,10 @@ type controllerServer struct {
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
}
// Check arguments
if len(req.GetName()) == 0 {
@ -47,23 +53,49 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
}
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
// Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of exisiting volume is the same as in new
// request
if exVol.VolSize >= int64(req.GetCapacityRange().GetRequiredBytes()) {
// exisiting volume is compatible with new request and should be reused.
// TODO (sbezverk) Do I need to make sure that RBD volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
Id: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
Attributes: req.GetParameters(),
},
}, nil
}
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Volume with the same name: %s but with different size already exist", req.GetName()))
}
volumeId := uuid.NewUUID().String()
path := provisionRoot + volumeId
// Check for maximum available capacity
capacity := int64(req.GetCapacityRange().GetRequiredBytes())
if capacity >= maxStorageCapacity {
return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, maxStorageCapacity)
}
volumeID := uuid.NewUUID().String()
path := provisionRoot + volumeID
err := os.MkdirAll(path, 0777)
if err != nil {
glog.V(3).Infof("failed to create volume: %v", err)
return nil, err
}
glog.V(4).Infof("create volume %s", path)
hostPathVol := hostPathVolume{}
hostPathVol.VolName = req.GetName()
hostPathVol.VolID = volumeID
hostPathVol.VolSize = capacity
hostPathVol.VolPath = path
hostPathVolumes[volumeID] = hostPathVol
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
Id: volumeId,
Id: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
Attributes: req.GetParameters(),
},
}, nil
}
@ -79,11 +111,11 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
glog.V(3).Infof("invalid delete volume req: %v", req)
return nil, err
}
volumeId := req.VolumeId
glog.V(4).Infof("deleting volume %s", volumeId)
path := provisionRoot + volumeId
volumeID := req.VolumeId
glog.V(4).Infof("deleting volume %s", volumeID)
path := provisionRoot + volumeID
os.RemoveAll(path)
delete(hostPathVolumes, volumeID)
return &csi.DeleteVolumeResponse{}, nil
}
@ -96,6 +128,9 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
if _, ok := hostPathVolumes[req.GetVolumeId()]; !ok {
return nil, status.Error(codes.NotFound, "Volume does not exist")
}
for _, cap := range req.VolumeCapabilities {
if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {

View File

@ -1,6 +0,0 @@
FROM alpine
LABEL maintainers="Kubernetes Authors"
LABEL description="HostPath CSI Plugin"
COPY hostpathplugin /hostpathplugin
ENTRYPOINT ["/hostpathplugin"]

View File

@ -1,13 +0,0 @@
FROM golang:alpine
LABEL maintainers="Kubernetes Authors"
LABEL description="HostPath CSI Plugin"
RUN apk add --no-cache git make wget
RUN wget https://github.com/golang/dep/releases/download/v0.3.2/dep-linux-amd64 && \
chmod +x dep-linux-amd64 && \
mv dep-linux-amd64 /usr/bin/dep
RUN go get -d github.com/kubernetes-csi/drivers/app/hostpathplugin
RUN cd /go/src/github.com/kubernetes-csi/drivers && \
dep ensure && \
make hostpath && \
cp _output/hostpathplugin /hostpathplugin

View File

@ -1,10 +0,0 @@
#!/bin/sh
PROG=hostpathplugin
docker build --rm -f Dockerfile.builder -t ${PROG}:builder .
docker run --rm --privileged -v $PWD:/host ${PROG}:builder cp /${PROG} /host/${PROG}
sudo chown $USER ${PROG}
docker build --rm -t docker.io/k8scsi/${PROG} .
docker rmi ${PROG}:builder
rm -f ${PROG}

View File

@ -17,12 +17,23 @@ limitations under the License.
package hostpath
import (
"fmt"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
)
const (
kib int64 = 1024
mib int64 = kib * 1024
gib int64 = mib * 1024
gib100 int64 = gib * 100
tib int64 = gib * 1024
tib100 int64 = tib * 100
)
type hostPath struct {
driver *csicommon.CSIDriver
@ -34,11 +45,24 @@ type hostPath struct {
cscap []*csi.ControllerServiceCapability
}
type hostPathVolume struct {
VolName string `json:"volName"`
VolID string `json:"volID"`
VolSize int64 `json:"volSize"`
VolPath string `json:"volPath"`
}
var hostPathVolumes map[string]hostPathVolume
var (
hostPathDriver *hostPath
vendorVersion = "0.2.0"
vendorVersion = "0.3.0"
)
func init() {
hostPathVolumes = map[string]hostPathVolume{}
}
func GetHostPathDriver() *hostPath {
return &hostPath{}
}
@ -81,3 +105,19 @@ func (hp *hostPath) Run(driverName, nodeID, endpoint string) {
s.Start(endpoint, hp.ids, hp.cs, hp.ns)
s.Wait()
}
func getVolumeByID(volumeID string) (hostPathVolume, error) {
if hostPathVol, ok := hostPathVolumes[volumeID]; ok {
return hostPathVol, nil
}
return hostPathVolume{}, fmt.Errorf("volume id %s does not exit in the volumes list", volumeID)
}
func getVolumeByName(volName string) (hostPathVolume, error) {
for _, hostPathVol := range hostPathVolumes {
if hostPathVol.VolName == volName {
return hostPathVol, nil
}
}
return hostPathVolume{}, fmt.Errorf("volume name %s does not exit in the volumes list", volName)
}

View File

@ -76,7 +76,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
attrib := req.GetVolumeAttributes()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v\nfstype %v\ndevice %v\nreadonly %v\nattributes %v\n mountflags %v\n",
glog.V(4).Infof("target %v\nfstype %v\ndevice %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",
targetPath, fsType, deviceId, readOnly, volumeId, attrib, mountFlags)
options := []string{"bind"}

View File

@ -8,7 +8,7 @@ $ sudo ./_output/iscsidriver --endpoint tcp://127.0.0.1:10000 --nodeid CSINode
```
### Test using csc
Get ```csc``` tool from https://github.com/thecodeteam/gocsi/tree/master/csc
Get ```csc``` tool from https://github.com/rexray/gocsi/tree/master/csc
#### Get plugin info
```
@ -16,12 +16,6 @@ $ csc identity plugin-info --endpoint tcp://127.0.0.1:10000
"ISCSI" "0.1.0"
```
### Get supported versions
```
$ csc identity supported-versions --endpoint tcp://127.0.0.1:10000
0.1.0
```
#### NodePublish a volume
```
$ export ISCSI_TARGET="iSCSI Target Server IP (Ex: 10.10.10.10)"

View File

@ -39,7 +39,7 @@ const (
)
var (
version = "0.2.0"
version = "0.3.0"
)
func NewDriver(nodeID, endpoint string) *driver {

View File

@ -268,7 +268,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
devicePath = devicePaths[0]
// Mount device
mntPath := path.Join(b.targetPath, b.VolName)
mntPath := b.targetPath
notMnt, err := b.mounter.IsLikelyNotMountPoint(mntPath)
if err != nil && !os.IsNotExist(err) {
return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err)
@ -320,21 +320,20 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
}
func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, targetPath string) error {
mntPath := path.Join(targetPath, c.VolName)
_, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath)
_, cnt, err := mount.GetDeviceNameFromMount(c.mounter, targetPath)
if err != nil {
glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", targetPath, err)
return err
}
if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil {
if pathExists, pathErr := volumeutil.PathExists(targetPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath)
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", targetPath)
return nil
}
if err = c.mounter.Unmount(mntPath); err != nil {
glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err)
if err = c.mounter.Unmount(targetPath); err != nil {
glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", targetPath, err)
return err
}
cnt--

View File

@ -39,7 +39,7 @@ $ sudo ./_output/nfsplugin --endpoint tcp://127.0.0.1:10000 --nodeid CSINode -v=
```
## Test
Get ```csc``` tool from https://github.com/thecodeteam/gocsi/tree/master/csc
Get ```csc``` tool from https://github.com/rexray/gocsi/tree/master/csc
#### Get plugin info
```
@ -47,12 +47,6 @@ $ csc identity plugin-info --endpoint tcp://127.0.0.1:10000
"NFS" "0.1.0"
```
### Get supported versions
```
$ csc identity supported-versions --endpoint tcp://127.0.0.1:10000
0.1.0
```
#### NodePublish a volume
```
$ export NFS_SERVER="Your Server IP (Ex: 10.10.10.10)"

View File

@ -30,7 +30,7 @@ spec:
serviceAccount: csi-attacher
containers:
- name: csi-attacher
image: docker.io/k8scsi/csi-attacher
image: quay.io/k8scsi/csi-attacher:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
@ -43,7 +43,7 @@ spec:
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: nfs
image: docker.io/k8scsi/nfsplugin:v0.1
image: quay.io/k8scsi/nfsplugin:v0.3.0
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"

View File

@ -17,7 +17,7 @@ spec:
hostNetwork: true
containers:
- name: driver-registrar
image: docker.io/k8scsi/driver-registrar
image: quay.io/k8scsi/driver-registrar:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
@ -37,7 +37,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: docker.io/k8scsi/nfsplugin:v0.1
image: quay.io/k8scsi/nfsplugin:v0.3.0
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"

View File

@ -39,7 +39,7 @@ const (
)
var (
version = "0.2.0"
version = "0.3.0"
)
func NewDriver(nodeID, endpoint string) *driver {
@ -51,6 +51,10 @@ func NewDriver(nodeID, endpoint string) *driver {
csiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)
csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
// NFS plugin does not support ControllerServiceCapability now.
// If support is added, it should set to appropriate
// ControllerServiceCapability RPC types.
csiDriver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_UNKNOWN})
d.csiDriver = csiDriver
@ -67,7 +71,8 @@ func (d *driver) Run() {
s := csicommon.NewNonBlockingGRPCServer()
s.Start(d.endpoint,
csicommon.NewDefaultIdentityServer(d.csiDriver),
csicommon.NewDefaultControllerServer(d.csiDriver),
// NFS plugin has not implemented ControllerServer.
nil,
NewNodeServer(d))
s.Wait()
}

View File

@ -4,16 +4,17 @@ metadata:
name: data-nfsplugin
labels:
name: data-nfsplugin
annotations:
csi.volume.kubernetes.io/volume-attributes: '{"server": "10.10.10.10", "share": "share"}'
spec:
accessModes:
- ReadWriteOnce
- ReadWriteMany
capacity:
storage: 100Gi
csi:
driver: csi-nfsplugin
volumeHandle: data-id
volumeAttributes:
server: 127.0.0.1
share: /export
---
apiVersion: v1
kind: PersistentVolumeClaim
@ -21,7 +22,7 @@ metadata:
name: data-nfsplugin
spec:
accessModes:
- ReadWriteOnce
- ReadWriteMany
resources:
requests:
storage: 100Gi