Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -1,91 +0,0 @@
# CSI Cinder driver
## Kubernetes
### Requirements
The following feature gates and runtime config have to be enabled to deploy the driver.
```
FEATURE_GATES=CSIPersistentVolume=true,MountPropagation=true
RUNTIME_CONFIG="storage.k8s.io/v1alpha1=true"
```
Mountprogpation requires support for privileged containers. So, make sure privileged containers are enabled in the cluster.
### Example local-up-cluster.sh
```ALLOW_PRIVILEGED=true FEATURE_GATES=CSIPersistentVolume=true,MountPropagation=true RUNTIME_CONFIG="storage.k8s.io/v1alpha1=true" LOG_LEVEL=5 hack/local-up-cluster.sh```
### Deploy
Encode your ```cloud.conf``` file content using base64.
```base64 -w 0 cloud.conf```
Update ```cloud.conf``` configuration in ```deploy/kubernetes/csi-secret-cinderplugin.yaml``` file
by using the result of the above command.
```kubectl -f deploy/kubernetes create```
### Example Nginx application
```kubectl -f examples/kubernetes/nginx.yaml create```
## Using CSC tool
### Start Cinder driver
```
$ sudo ./_output/cinderplugin --endpoint tcp://127.0.0.1:10000 --cloud-config /etc/cloud.conf --nodeid CSINodeID
```
### Test using csc
Get ```csc``` tool from https://github.com/rexray/gocsi/tree/master/csc
#### Get plugin info
```
$ csc identity plugin-info --endpoint tcp://127.0.0.1:10000
"csi-cinderplugin" "0.1.0"
```
#### Create a volume
```
$ csc controller new --endpoint tcp://127.0.0.1:10000 CSIVolumeName
CSIVolumeID
```
#### Delete a volume
```
$ csc controller del --endpoint tcp://127.0.0.1:10000 CSIVolumeID
CSIVolumeID
```
#### ControllerPublish a volume
```
$ csc controller publish --endpoint tcp://127.0.0.1:10000 --node-id=CSINodeID CSIVolumeID
CSIVolumeID "DevicePath"="/dev/xxx"
```
#### ControllerUnpublish a volume
```
$ csc controller unpublish --endpoint tcp://127.0.0.1:10000 --node-id=CSINodeID CSIVolumeID
CSIVolumeID
```
#### NodePublish a volume
```
$ csc node publish --endpoint tcp://127.0.0.1:10000 --target-path /mnt/cinder --pub-info DevicePath="/dev/xxx" CSIVolumeID
CSIVolumeID
```
#### NodeUnpublish a volume
```
$ csc node unpublish --endpoint tcp://127.0.0.1:10000 --target-path /mnt/cinder CSIVolumeID
CSIVolumeID
```
#### Get NodeID
```
$ csc node get-id --endpoint tcp://127.0.0.1:10000
CSINodeID
```

View File

@ -1,172 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/cinder/openstack"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
"github.com/pborman/uuid"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/volume/util"
)
type controllerServer struct {
*csicommon.DefaultControllerServer
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
// Volume Name
volName := req.GetName()
if len(volName) == 0 {
volName = uuid.NewUUID().String()
}
// Volume Size - Default is 1 GiB
volSizeBytes := int64(1 * 1024 * 1024 * 1024)
if req.GetCapacityRange() != nil {
volSizeBytes = int64(req.GetCapacityRange().GetRequiredBytes())
}
volSizeGB := int(util.RoundUpSize(volSizeBytes, 1024*1024*1024))
// Volume Type
volType := req.GetParameters()["type"]
// Volume Availability - Default is nova
volAvailability := req.GetParameters()["availability"]
// Get OpenStack Provider
cloud, err := openstack.GetOpenStackProvider()
if err != nil {
glog.V(3).Infof("Failed to GetOpenStackProvider: %v", err)
return nil, err
}
// Volume Create
resID, resAvailability, err := cloud.CreateVolume(volName, volSizeGB, volType, volAvailability, nil)
if err != nil {
glog.V(3).Infof("Failed to CreateVolume: %v", err)
return nil, err
}
glog.V(4).Infof("Create volume %s in Availability Zone: %s", resID, resAvailability)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
Id: resID,
Attributes: map[string]string{
"availability": resAvailability,
},
},
}, nil
}
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
// Get OpenStack Provider
cloud, err := openstack.GetOpenStackProvider()
if err != nil {
glog.V(3).Infof("Failed to GetOpenStackProvider: %v", err)
return nil, err
}
// Volume Delete
volID := req.GetVolumeId()
err = cloud.DeleteVolume(volID)
if err != nil {
glog.V(3).Infof("Failed to DeleteVolume: %v", err)
return nil, err
}
glog.V(4).Infof("Delete volume %s", volID)
return &csi.DeleteVolumeResponse{}, nil
}
func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
// Get OpenStack Provider
cloud, err := openstack.GetOpenStackProvider()
if err != nil {
glog.V(3).Infof("Failed to GetOpenStackProvider: %v", err)
return nil, err
}
// Volume Attach
instanceID := req.GetNodeId()
volumeID := req.GetVolumeId()
_, err = cloud.AttachVolume(instanceID, volumeID)
if err != nil {
glog.V(3).Infof("Failed to AttachVolume: %v", err)
return nil, err
}
err = cloud.WaitDiskAttached(instanceID, volumeID)
if err != nil {
glog.V(3).Infof("Failed to WaitDiskAttached: %v", err)
return nil, err
}
devicePath, err := cloud.GetAttachmentDiskPath(instanceID, volumeID)
if err != nil {
glog.V(3).Infof("Failed to GetAttachmentDiskPath: %v", err)
return nil, err
}
glog.V(4).Infof("ControllerPublishVolume %s on %s", volumeID, instanceID)
// Publish Volume Info
pvInfo := map[string]string{}
pvInfo["DevicePath"] = devicePath
return &csi.ControllerPublishVolumeResponse{
PublishInfo: pvInfo,
}, nil
}
func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
// Get OpenStack Provider
cloud, err := openstack.GetOpenStackProvider()
if err != nil {
glog.V(3).Infof("Failed to GetOpenStackProvider: %v", err)
return nil, err
}
// Volume Detach
instanceID := req.GetNodeId()
volumeID := req.GetVolumeId()
err = cloud.DetachVolume(instanceID, volumeID)
if err != nil {
glog.V(3).Infof("Failed to DetachVolume: %v", err)
return nil, err
}
err = cloud.WaitDiskDetached(instanceID, volumeID)
if err != nil {
glog.V(3).Infof("Failed to WaitDiskDetached: %v", err)
return nil, err
}
glog.V(4).Infof("ControllerUnpublishVolume %s on %s", volumeID, instanceID)
return &csi.ControllerUnpublishVolumeResponse{}, nil
}

View File

@ -1,172 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"testing"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/kubernetes-csi/drivers/pkg/cinder/openstack"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
var fakeCs *controllerServer
// Init Controller Server
func init() {
if fakeCs == nil {
d := NewDriver(fakeNodeID, fakeEndpoint, fakeConfig)
fakeCs = NewControllerServer(d)
}
}
// Test CreateVolume
func TestCreateVolume(t *testing.T) {
// mock OpenStack
osmock := new(openstack.OpenStackMock)
// CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error)
osmock.On("CreateVolume", fakeVolName, mock.AnythingOfType("int"), fakeVolType, fakeAvailability, (*map[string]string)(nil)).Return(fakeVolID, fakeAvailability, nil)
openstack.OsInstance = osmock
// Init assert
assert := assert.New(t)
// Fake request
fakeReq := &csi.CreateVolumeRequest{
Name: fakeVolName,
VolumeCapabilities: nil,
}
// Invoke CreateVolume
actualRes, err := fakeCs.CreateVolume(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to CreateVolume: %v", err)
}
// Assert
assert.NotNil(actualRes.Volume)
assert.NotEqual(0, len(actualRes.Volume.Id), "Volume Id is nil")
assert.Equal(fakeAvailability, actualRes.Volume.Attributes["availability"])
}
// Test DeleteVolume
func TestDeleteVolume(t *testing.T) {
// mock OpenStack
osmock := new(openstack.OpenStackMock)
// DeleteVolume(volumeID string) error
osmock.On("DeleteVolume", fakeVolID).Return(nil)
openstack.OsInstance = osmock
// Init assert
assert := assert.New(t)
// Fake request
fakeReq := &csi.DeleteVolumeRequest{
VolumeId: fakeVolID,
}
// Expected Result
expectedRes := &csi.DeleteVolumeResponse{}
// Invoke DeleteVolume
actualRes, err := fakeCs.DeleteVolume(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to DeleteVolume: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}
// Test ControllerPublishVolume
func TestControllerPublishVolume(t *testing.T) {
// mock OpenStack
osmock := new(openstack.OpenStackMock)
// AttachVolume(instanceID, volumeID string) (string, error)
osmock.On("AttachVolume", fakeNodeID, fakeVolID).Return(fakeVolID, nil)
// WaitDiskAttached(instanceID string, volumeID string) error
osmock.On("WaitDiskAttached", fakeNodeID, fakeVolID).Return(nil)
// GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
osmock.On("GetAttachmentDiskPath", fakeNodeID, fakeVolID).Return(fakeDevicePath, nil)
openstack.OsInstance = osmock
// Init assert
assert := assert.New(t)
// Fake request
fakeReq := &csi.ControllerPublishVolumeRequest{
VolumeId: fakeVolID,
NodeId: fakeNodeID,
VolumeCapability: nil,
Readonly: false,
}
// Expected Result
expectedRes := &csi.ControllerPublishVolumeResponse{
PublishInfo: map[string]string{
"DevicePath": fakeDevicePath,
},
}
// Invoke ControllerPublishVolume
actualRes, err := fakeCs.ControllerPublishVolume(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to ControllerPublishVolume: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}
// Test ControllerUnpublishVolume
func TestControllerUnpublishVolume(t *testing.T) {
// mock OpenStack
osmock := new(openstack.OpenStackMock)
// DetachVolume(instanceID, volumeID string) error
osmock.On("DetachVolume", fakeNodeID, fakeVolID).Return(nil)
// WaitDiskDetached(instanceID string, volumeID string) error
osmock.On("WaitDiskDetached", fakeNodeID, fakeVolID).Return(nil)
openstack.OsInstance = osmock
// Init assert
assert := assert.New(t)
// Fake request
fakeReq := &csi.ControllerUnpublishVolumeRequest{
VolumeId: fakeVolID,
NodeId: fakeNodeID,
}
// Expected Result
expectedRes := &csi.ControllerUnpublishVolumeResponse{}
// Invoke ControllerUnpublishVolume
actualRes, err := fakeCs.ControllerUnpublishVolume(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to ControllerUnpublishVolume: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}

View File

@ -1,71 +0,0 @@
# This YAML file contains attacher & csi driver API objects,
# which are necessary to run external csi attacher for cinder.
kind: Service
apiVersion: v1
metadata:
name: csi-attacher-cinderplugin
labels:
app: csi-attacher-cinderplugin
spec:
selector:
app: csi-attacher-cinderplugin
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-attacher-cinderplugin
spec:
serviceName: "csi-attacher-cinderplugin"
replicas: 1
template:
metadata:
labels:
app: csi-attacher-cinderplugin
spec:
serviceAccount: csi-attacher
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: cinder
image: quay.io/k8scsi/cinderplugin
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
volumes:
- name: socket-dir
emptyDir:
- name: secret-cinderplugin
secret:
secretName: csi-secret-cinderplugin

View File

@ -1,37 +0,0 @@
# This YAML file contains RBAC API objects,
# which are necessary to run external csi attacher for cinder.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,90 +0,0 @@
# This YAML file contains driver-registrar & csi driver nodeplugin API objects,
# which are necessary to run csi nodeplugin for cinder.
kind: DaemonSet
apiVersion: apps/v1beta2
metadata:
name: csi-nodeplugin-cinderplugin
spec:
selector:
matchLabels:
app: csi-nodeplugin-cinderplugin
template:
metadata:
labels:
app: csi-nodeplugin-cinderplugin
spec:
serviceAccount: csi-nodeplugin
hostNetwork: true
containers:
- name: driver-registrar
image: quay.io/k8scsi/driver-registrar:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cinder
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/k8scsi/cinderplugin
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: pods-cloud-data
mountPath: /var/lib/cloud/data
readOnly: true
- name: pods-probe-dir
mountPath: /dev
mountPropagation: "HostToContainer"
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-cinderplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: pods-cloud-data
hostPath:
path: /var/lib/cloud/data
type: Directory
- name: pods-probe-dir
hostPath:
path: /dev
type: Directory
- name: secret-cinderplugin
secret:
secretName: csi-secret-cinderplugin

View File

@ -1,35 +0,0 @@
# This YAML defines all API objects to create RBAC roles for csi node plugin.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
subjects:
- kind: ServiceAccount
name: csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -1,71 +0,0 @@
# This YAML file contains attacher & csi driver API objects,
# which are necessary to run external csi provisioner for cinder.
kind: Service
apiVersion: v1
metadata:
name: csi-provisioner-cinderplugin
labels:
app: csi-provisioner-cinderplugin
spec:
selector:
app: csi-provisioner-cinderplugin
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-provisioner-cinderplugin
spec:
serviceName: "csi-provisioner-cinderplugin"
replicas: 1
template:
metadata:
labels:
app: csi-provisioner-cinderplugin
spec:
serviceAccount: csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v0.3.0
args:
- "--provisioner=csi-cinderplugin"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: cinder
image: quay.io/k8scsi/cinderplugin
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
volumes:
- name: socket-dir
emptyDir:
- name: secret-cinderplugin
secret:
secretName: csi-secret-cinderplugin

View File

@ -1,41 +0,0 @@
# This YAML file contains RBAC API objects,
# which are necessary to run external csi provisioner for cinder.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,9 +0,0 @@
# This YAML file contains secret objects,
# which are necessary to run csi cinder plugin.
kind: Secret
apiVersion: v1
metadata:
name: csi-secret-cinderplugin
data:
cloud.conf: W0dsb2JhbF0KdXNlcm5hbWU9dXNlcgpwYXNzd29yZD1wYXNzCmF1dGgtdXJsPWh0dHBzOi8vPGtleXN0b25lX2lwPi9pZGVudGl0eS92Mwp0ZW5hbnQtaWQ9Yzg2OTE2OGE4Mjg4NDdmMzlmN2YwNmVkZDczMDU2MzcKZG9tYWluLWlkPTJhNzNiOGY1OTdjMDQ1NTFhMGZkYzhlOTU1NDRiZThhCg==

View File

@ -1,13 +0,0 @@
# Based on centos
FROM centos:7.4.1708
LABEL maintainers="Kubernetes Authors"
LABEL description="Cinder CSI Plugin"
# Copy cinderplugin from build directory
COPY cinderplugin /cinderplugin
# Install e4fsprogs for format
RUN yum -y install e4fsprogs
# Define default command
ENTRYPOINT ["/cinderplugin"]

View File

@ -1,84 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/cinder/openstack"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type driver struct {
csiDriver *csicommon.CSIDriver
endpoint string
cloudconfig string
ids *csicommon.DefaultIdentityServer
cs *controllerServer
ns *nodeServer
cap []*csi.VolumeCapability_AccessMode
cscap []*csi.ControllerServiceCapability
}
const (
driverName = "csi-cinderplugin"
)
var (
version = "0.3.0"
)
func NewDriver(nodeID, endpoint string, cloudconfig string) *driver {
glog.Infof("Driver: %v version: %v", driverName, version)
d := &driver{}
d.endpoint = endpoint
d.cloudconfig = cloudconfig
csiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)
csiDriver.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
})
csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
d.csiDriver = csiDriver
return d
}
func NewControllerServer(d *driver) *controllerServer {
return &controllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d.csiDriver),
}
}
func NewNodeServer(d *driver) *nodeServer {
return &nodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d.csiDriver),
}
}
func (d *driver) Run() {
openstack.InitOpenStackProvider(d.cloudconfig)
csicommon.RunControllerandNodePublishServer(d.endpoint, d.csiDriver, NewControllerServer(d), NewNodeServer(d))
}

View File

@ -1,6 +0,0 @@
[Global]
username=user
password=pass
auth-url=https://<keystone_ip>/identity/v3
tenant-id=c869168a828847f39f7f06edd7305637
domain-id=2a73b8f597c04551a0fdc8e95544be8a

View File

@ -1,44 +0,0 @@
# This YAML file contains nginx & csi cinder driver objects,
# which are necessary to run nginx with csi cinder driver.
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-sc-cinderplugin
provisioner: csi-cinderplugin
parameters:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-pvc-cinderplugin
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: csi-sc-cinderplugin
---
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
ports:
- containerPort: 80
protocol: TCP
volumeMounts:
- mountPath: /var/lib/www/html
name: csi-data-cinderplugin
volumes:
- name: csi-data-cinderplugin
persistentVolumeClaim:
claimName: csi-pvc-cinderplugin
readOnly: false

View File

@ -1,32 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"golang.org/x/net/context"
)
var fakeNodeID = "CSINodeID"
var fakeEndpoint = "tcp://127.0.0.1:10000"
var fakeConfig = "/etc/cloud.conf"
var fakeCtx = context.Background()
var fakeVolName = "CSIVolumeName"
var fakeVolID = "CSIVolumeID"
var fakeVolType = ""
var fakeAvailability = ""
var fakeDevicePath = "/dev/xxx"
var fakeTargetPath = "/mnt/cinder"

View File

@ -1,161 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"io/ioutil"
"os"
"strings"
"time"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util"
utilexec "k8s.io/utils/exec"
"github.com/golang/glog"
)
const (
probeVolumeDuration = 1 * time.Second
probeVolumeTimeout = 60 * time.Second
instanceIDFile = "/var/lib/cloud/data/instance-id"
)
type IMount interface {
ScanForAttach(devicePath string) error
IsLikelyNotMountPointAttach(targetpath string) (bool, error)
FormatAndMount(source string, target string, fstype string, options []string) error
IsLikelyNotMountPointDetach(targetpath string) (bool, error)
UnmountPath(mountPath string) error
GetInstanceID() (string, error)
}
type Mount struct {
}
var MInstance IMount = nil
func GetMountProvider() (IMount, error) {
if MInstance == nil {
MInstance = &Mount{}
}
return MInstance, nil
}
// probeVolume probes volume in compute
func probeVolume() error {
// rescan scsi bus
scsi_path := "/sys/class/scsi_host/"
if dirs, err := ioutil.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
ioutil.WriteFile(name, data, 0666)
}
}
executor := utilexec.New()
args := []string{"trigger"}
cmd := executor.Command("udevadm", args...)
_, err := cmd.CombinedOutput()
if err != nil {
glog.V(3).Infof("error running udevadm trigger %v\n", err)
return err
}
glog.V(4).Infof("Successfully probed all attachments")
return nil
}
// ScanForAttach
func (m *Mount) ScanForAttach(devicePath string) error {
ticker := time.NewTicker(probeVolumeDuration)
defer ticker.Stop()
timer := time.NewTimer(probeVolumeTimeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking Cinder disk %q is attached.", devicePath)
probeVolume()
exists, err := util.PathExists(devicePath)
if exists && err == nil {
return nil
} else {
glog.V(3).Infof("Could not find attached Cinder disk %s", devicePath)
}
case <-timer.C:
return fmt.Errorf("Could not find attached Cinder disk %s. Timeout waiting for mount paths to be created.", devicePath)
}
}
}
// FormatAndMount
func (m *Mount) FormatAndMount(source string, target string, fstype string, options []string) error {
diskMounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: mount.NewOsExec()}
return diskMounter.FormatAndMount(source, target, fstype, options)
}
// IsLikelyNotMountPointAttach
func (m *Mount) IsLikelyNotMountPointAttach(targetpath string) (bool, error) {
notMnt, err := mount.New("").IsLikelyNotMountPoint(targetpath)
if err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(targetpath, 0750)
if err == nil {
notMnt = true
}
}
}
return notMnt, err
}
// IsLikelyNotMountPointDetach
func (m *Mount) IsLikelyNotMountPointDetach(targetpath string) (bool, error) {
notMnt, err := mount.New("").IsLikelyNotMountPoint(targetpath)
if err != nil {
if os.IsNotExist(err) {
return notMnt, fmt.Errorf("targetpath not found")
} else {
return notMnt, err
}
}
return notMnt, nil
}
// UnmountPath
func (m *Mount) UnmountPath(mountPath string) error {
return util.UnmountPath(mountPath, mount.New(""))
}
// GetInstanceID from file
func (m *Mount) GetInstanceID() (string, error) {
// Try to find instance ID on the local filesystem (created by cloud-init)
idBytes, err := ioutil.ReadFile(instanceIDFile)
if err == nil {
instanceID := string(idBytes)
instanceID = strings.TrimSpace(instanceID)
glog.V(3).Infof("Got instance id from %s: %s", instanceIDFile, instanceID)
if instanceID != "" {
return instanceID, nil
}
}
return "", err
}

View File

@ -1,130 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import mock "github.com/stretchr/testify/mock"
// MountMock is an autogenerated mock type for the IMount type
// ORIGINALLY GENERATED BY mockery with hand edits
type MountMock struct {
mock.Mock
}
// FormatAndMount provides a mock function with given fields: source, target, fstype, options
func (_m *MountMock) FormatAndMount(source string, target string, fstype string, options []string) error {
ret := _m.Called(source, target, fstype, options)
var r0 error
if rf, ok := ret.Get(0).(func(string, string, string, []string) error); ok {
r0 = rf(source, target, fstype, options)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetInstanceID provides a mock function with given fields:
func (_m *MountMock) GetInstanceID() (string, error) {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IsLikelyNotMountPointAttach provides a mock function with given fields: targetpath
func (_m *MountMock) IsLikelyNotMountPointAttach(targetpath string) (bool, error) {
ret := _m.Called(targetpath)
var r0 bool
if rf, ok := ret.Get(0).(func(string) bool); ok {
r0 = rf(targetpath)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(targetpath)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IsLikelyNotMountPointDetach provides a mock function with given fields: targetpath
func (_m *MountMock) IsLikelyNotMountPointDetach(targetpath string) (bool, error) {
ret := _m.Called(targetpath)
var r0 bool
if rf, ok := ret.Get(0).(func(string) bool); ok {
r0 = rf(targetpath)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(targetpath)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ScanForAttach provides a mock function with given fields: devicePath
func (_m *MountMock) ScanForAttach(devicePath string) error {
ret := _m.Called(devicePath)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(devicePath)
} else {
r0 = ret.Error(0)
}
return r0
}
// UnmountPath provides a mock function with given fields: mountPath
func (_m *MountMock) UnmountPath(mountPath string) error {
ret := _m.Called(mountPath)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(mountPath)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@ -1,167 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/kubernetes-csi/drivers/pkg/cinder/mount"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type nodeServer struct {
*csicommon.DefaultNodeServer
}
func (ns *nodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRequest) (*csi.NodeGetIdResponse, error) {
nodeID, err := getNodeID()
if err != nil {
return nil, err
}
if len(nodeID) > 0 {
return &csi.NodeGetIdResponse{
NodeId: nodeID,
}, nil
}
// Using default function
return ns.DefaultNodeServer.NodeGetId(ctx, req)
}
func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
nodeID, err := getNodeID()
if err != nil {
return nil, err
}
if len(nodeID) > 0 {
return &csi.NodeGetInfoResponse{
NodeId: nodeID,
}, nil
}
// Using default function
return ns.DefaultNodeServer.NodeGetInfo(ctx, req)
}
func getNodeID() (string, error) {
// Get Mount Provider
m, err := mount.GetMountProvider()
if err != nil {
glog.V(3).Infof("Failed to GetMountProvider: %v", err)
return "", err
}
nodeID, err := m.GetInstanceID()
if err != nil {
glog.V(3).Infof("Failed to GetInstanceID: %v", err)
return "", err
}
return nodeID, nil
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
fsType := req.GetVolumeCapability().GetMount().GetFsType()
devicePath := req.GetPublishInfo()["DevicePath"]
// Get Mount Provider
m, err := mount.GetMountProvider()
if err != nil {
glog.V(3).Infof("Failed to GetMountProvider: %v", err)
return nil, err
}
// Device Scan
err = m.ScanForAttach(devicePath)
if err != nil {
glog.V(3).Infof("Failed to ScanForAttach: %v", err)
return nil, err
}
// Verify whether mounted
notMnt, err := m.IsLikelyNotMountPointAttach(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// Volume Mount
if notMnt {
// Get Options
var options []string
if req.GetReadonly() {
options = append(options, "ro")
} else {
options = append(options, "rw")
}
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
options = append(options, mountFlags...)
// Mount
err = m.FormatAndMount(devicePath, targetPath, fsType, options)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
// Get Mount Provider
m, err := mount.GetMountProvider()
if err != nil {
glog.V(3).Infof("Failed to GetMountProvider: %v", err)
return nil, err
}
notMnt, err := m.IsLikelyNotMountPointDetach(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if notMnt {
return nil, status.Error(codes.NotFound, "Volume not mounted")
}
err = m.UnmountPath(req.GetTargetPath())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return &csi.NodeStageVolumeResponse{}, nil
}

View File

@ -1,168 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"testing"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/kubernetes-csi/drivers/pkg/cinder/mount"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
var fakeNs *nodeServer
// Init Node Server
func init() {
if fakeNs == nil {
d := NewDriver(fakeNodeID, fakeEndpoint, fakeConfig)
fakeNs = NewNodeServer(d)
}
}
// Test NodeGetId
func TestNodeGetId(t *testing.T) {
// mock MountMock
mmock := new(mount.MountMock)
// GetInstanceID() (string, error)
mmock.On("GetInstanceID").Return(fakeNodeID, nil)
mount.MInstance = mmock
// Init assert
assert := assert.New(t)
// Expected Result
expectedRes := &csi.NodeGetIdResponse{
NodeId: fakeNodeID,
}
// Fake request
fakeReq := &csi.NodeGetIdRequest{}
// Invoke NodeGetId
actualRes, err := fakeNs.NodeGetId(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to NodeGetId: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}
// Test NodeGetInfo
func TestNodeGetInfo(t *testing.T) {
// mock MountMock
mmock := new(mount.MountMock)
// GetInstanceID() (string, error)
mmock.On("GetInstanceID").Return(fakeNodeID, nil)
mount.MInstance = mmock
// Init assert
assert := assert.New(t)
// Expected Result
expectedRes := &csi.NodeGetInfoResponse{
NodeId: fakeNodeID,
}
// Fake request
fakeReq := &csi.NodeGetInfoRequest{}
// Invoke NodeGetId
actualRes, err := fakeNs.NodeGetInfo(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to NodeGetInfo: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}
// Test NodePublishVolume
func TestNodePublishVolume(t *testing.T) {
// mock MountMock
mmock := new(mount.MountMock)
// ScanForAttach(devicePath string) error
mmock.On("ScanForAttach", fakeDevicePath).Return(nil)
// IsLikelyNotMountPointAttach(targetpath string) (bool, error)
mmock.On("IsLikelyNotMountPointAttach", fakeTargetPath).Return(true, nil)
// FormatAndMount(source string, target string, fstype string, options []string) error
mmock.On("FormatAndMount", fakeDevicePath, fakeTargetPath, mock.AnythingOfType("string"), []string{"rw"}).Return(nil)
mount.MInstance = mmock
// Init assert
assert := assert.New(t)
// Expected Result
expectedRes := &csi.NodePublishVolumeResponse{}
// Fake request
fakeReq := &csi.NodePublishVolumeRequest{
VolumeId: fakeVolID,
PublishInfo: map[string]string{"DevicePath": fakeDevicePath},
TargetPath: fakeTargetPath,
VolumeCapability: nil,
Readonly: false,
}
// Invoke NodePublishVolume
actualRes, err := fakeNs.NodePublishVolume(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to NodePublishVolume: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}
// Test NodeUnpublishVolume
func TestNodeUnpublishVolume(t *testing.T) {
// mock MountMock
mmock := new(mount.MountMock)
// IsLikelyNotMountPointDetach(targetpath string) (bool, error)
mmock.On("IsLikelyNotMountPointDetach", fakeTargetPath).Return(false, nil)
// UnmountPath(mountPath string) error
mmock.On("UnmountPath", fakeTargetPath).Return(nil)
mount.MInstance = mmock
// Init assert
assert := assert.New(t)
// Expected Result
expectedRes := &csi.NodeUnpublishVolumeResponse{}
// Fake request
fakeReq := &csi.NodeUnpublishVolumeRequest{
VolumeId: fakeVolID,
TargetPath: fakeTargetPath,
}
// Invoke NodeUnpublishVolume
actualRes, err := fakeNs.NodeUnpublishVolume(fakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to NodeUnpublishVolume: %v", err)
}
// Assert
assert.Equal(expectedRes, actualRes)
}

View File

@ -1,163 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"os"
"github.com/golang/glog"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"gopkg.in/gcfg.v1"
)
type IOpenStack interface {
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error)
DeleteVolume(volumeID string) error
AttachVolume(instanceID, volumeID string) (string, error)
WaitDiskAttached(instanceID string, volumeID string) error
DetachVolume(instanceID, volumeID string) error
WaitDiskDetached(instanceID string, volumeID string) error
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)
}
type OpenStack struct {
compute *gophercloud.ServiceClient
blockstorage *gophercloud.ServiceClient
}
type Config struct {
Global struct {
AuthUrl string `gcfg:"auth-url"`
Username string
UserId string `gcfg:"user-id"`
Password string
TenantId string `gcfg:"tenant-id"`
TenantName string `gcfg:"tenant-name"`
DomainId string `gcfg:"domain-id"`
DomainName string `gcfg:"domain-name"`
Region string
}
}
func (cfg Config) toAuthOptions() gophercloud.AuthOptions {
return gophercloud.AuthOptions{
IdentityEndpoint: cfg.Global.AuthUrl,
Username: cfg.Global.Username,
UserID: cfg.Global.UserId,
Password: cfg.Global.Password,
TenantID: cfg.Global.TenantId,
TenantName: cfg.Global.TenantName,
DomainID: cfg.Global.DomainId,
DomainName: cfg.Global.DomainName,
// Persistent service, so we need to be able to renew tokens.
AllowReauth: true,
}
}
func GetConfigFromFile(configFilePath string) (gophercloud.AuthOptions, gophercloud.EndpointOpts, error) {
// Get config from file
var authOpts gophercloud.AuthOptions
var epOpts gophercloud.EndpointOpts
config, err := os.Open(configFilePath)
if err != nil {
glog.V(3).Infof("Failed to open OpenStack configuration file: %v", err)
return authOpts, epOpts, err
}
defer config.Close()
// Read configuration
var cfg Config
err = gcfg.ReadInto(&cfg, config)
if err != nil {
glog.V(3).Infof("Failed to read OpenStack configuration file: %v", err)
return authOpts, epOpts, err
}
authOpts = cfg.toAuthOptions()
epOpts = gophercloud.EndpointOpts{
Region: cfg.Global.Region,
}
return authOpts, epOpts, nil
}
func GetConfigFromEnv() (gophercloud.AuthOptions, gophercloud.EndpointOpts, error) {
// Get config from env
authOpts, err := openstack.AuthOptionsFromEnv()
var epOpts gophercloud.EndpointOpts
if err != nil {
glog.V(3).Infof("Failed to read OpenStack configuration from env: %v", err)
return authOpts, epOpts, err
}
epOpts = gophercloud.EndpointOpts{
Region: os.Getenv("OS_REGION_NAME"),
}
return authOpts, epOpts, nil
}
var OsInstance IOpenStack = nil
var configFile string = "/etc/cloud.conf"
func InitOpenStackProvider(cfg string) {
configFile = cfg
glog.V(2).Infof("InitOpenStackProvider configFile: %s", configFile)
}
func GetOpenStackProvider() (IOpenStack, error) {
if OsInstance == nil {
// Get config from file
authOpts, epOpts, err := GetConfigFromFile(configFile)
if err != nil {
// Get config from env
authOpts, epOpts, err = GetConfigFromEnv()
if err != nil {
return nil, err
}
}
// Authenticate Client
provider, err := openstack.AuthenticatedClient(authOpts)
if err != nil {
return nil, err
}
// Init Nova ServiceClient
computeclient, err := openstack.NewComputeV2(provider, epOpts)
if err != nil {
return nil, err
}
// Init Cinder ServiceClient
blockstorageclient, err := openstack.NewBlockStorageV3(provider, epOpts)
if err != nil {
return nil, err
}
// Init OpenStack
OsInstance = &OpenStack{
compute: computeclient,
blockstorage: blockstorageclient,
}
}
return OsInstance, nil
}

View File

@ -1,151 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import "github.com/stretchr/testify/mock"
// OpenStackMock is an autogenerated mock type for the IOpenStack type
// ORIGINALLY GENERATED BY mockery with hand edits
type OpenStackMock struct {
mock.Mock
}
// AttachVolume provides a mock function with given fields: instanceID, volumeID
func (_m *OpenStackMock) AttachVolume(instanceID string, volumeID string) (string, error) {
ret := _m.Called(instanceID, volumeID)
var r0 string
if rf, ok := ret.Get(0).(func(string, string) string); ok {
r0 = rf(instanceID, volumeID)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(instanceID, volumeID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateVolume provides a mock function with given fields: name, size, vtype, availability, tags
func (_m *OpenStackMock) CreateVolume(name string, size int, vtype string, availability string, tags *map[string]string) (string, string, error) {
ret := _m.Called(name, size, vtype, availability, tags)
var r0 string
if rf, ok := ret.Get(0).(func(string, int, string, string, *map[string]string) string); ok {
r0 = rf(name, size, vtype, availability, tags)
} else {
r0 = ret.Get(0).(string)
}
var r1 string
if rf, ok := ret.Get(1).(func(string, int, string, string, *map[string]string) string); ok {
r1 = rf(name, size, vtype, availability, tags)
} else {
r1 = ret.Get(1).(string)
}
var r2 error
if rf, ok := ret.Get(2).(func(string, int, string, string, *map[string]string) error); ok {
r2 = rf(name, size, vtype, availability, tags)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// DeleteVolume provides a mock function with given fields: volumeID
func (_m *OpenStackMock) DeleteVolume(volumeID string) error {
ret := _m.Called(volumeID)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(volumeID)
} else {
r0 = ret.Error(0)
}
return r0
}
// DetachVolume provides a mock function with given fields: instanceID, volumeID
func (_m *OpenStackMock) DetachVolume(instanceID string, volumeID string) error {
ret := _m.Called(instanceID, volumeID)
var r0 error
if rf, ok := ret.Get(0).(func(string, string) error); ok {
r0 = rf(instanceID, volumeID)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetAttachmentDiskPath provides a mock function with given fields: instanceID, volumeID
func (_m *OpenStackMock) GetAttachmentDiskPath(instanceID string, volumeID string) (string, error) {
ret := _m.Called(instanceID, volumeID)
var r0 string
if rf, ok := ret.Get(0).(func(string, string) string); ok {
r0 = rf(instanceID, volumeID)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(instanceID, volumeID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// WaitDiskAttached provides a mock function with given fields: instanceID, volumeID
func (_m *OpenStackMock) WaitDiskAttached(instanceID string, volumeID string) error {
ret := _m.Called(instanceID, volumeID)
var r0 error
if rf, ok := ret.Get(0).(func(string, string) error); ok {
r0 = rf(instanceID, volumeID)
} else {
r0 = ret.Error(0)
}
return r0
}
// WaitDiskDetached provides a mock function with given fields: instanceID, volumeID
func (_m *OpenStackMock) WaitDiskDetached(instanceID string, volumeID string) error {
ret := _m.Called(instanceID, volumeID)
var r0 error
if rf, ok := ret.Get(0).(func(string, string) error); ok {
r0 = rf(instanceID, volumeID)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@ -1,119 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"os"
"testing"
"github.com/gophercloud/gophercloud"
"github.com/stretchr/testify/assert"
)
var fakeFileName = "cloud.conf"
var fakeUserName = "user"
var fakePassword = "pass"
var fakeAuthUrl = "https://169.254.169.254/identity/v3"
var fakeTenantID = "c869168a828847f39f7f06edd7305637"
var fakeDomainID = "2a73b8f597c04551a0fdc8e95544be8a"
var fakeRegion = "RegionOne"
// Test GetConfigFromFile
func TestGetConfigFromFile(t *testing.T) {
// init file
var fakeFileContent = `
[Global]
username=` + fakeUserName + `
password=` + fakePassword + `
auth-url=` + fakeAuthUrl + `
tenant-id=` + fakeTenantID + `
domain-id=` + fakeDomainID + `
region=` + fakeRegion + `
`
f, err := os.Create(fakeFileName)
if err != nil {
t.Errorf("failed to create file: %v", err)
}
_, err = f.WriteString(fakeFileContent)
f.Close()
if err != nil {
t.Errorf("failed to write file: %v", err)
}
defer os.Remove(fakeFileName)
// Init assert
assert := assert.New(t)
expectedAuthOpts := gophercloud.AuthOptions{
IdentityEndpoint: fakeAuthUrl,
Username: fakeUserName,
Password: fakePassword,
TenantID: fakeTenantID,
DomainID: fakeDomainID,
AllowReauth: true,
}
expectedEpOpts := gophercloud.EndpointOpts{
Region: fakeRegion,
}
// Invoke GetConfigFromFile
actualAuthOpts, actualEpOpts, err := GetConfigFromFile(fakeFileName)
if err != nil {
t.Errorf("failed to GetConfigFromFile: %v", err)
}
// Assert
assert.Equal(expectedAuthOpts, actualAuthOpts)
assert.Equal(expectedEpOpts, actualEpOpts)
}
// Test GetConfigFromEnv
func TestGetConfigFromEnv(t *testing.T) {
// init env
os.Setenv("OS_AUTH_URL", fakeAuthUrl)
os.Setenv("OS_USERNAME", fakeUserName)
os.Setenv("OS_PASSWORD", fakePassword)
os.Setenv("OS_TENANT_ID", fakeTenantID)
os.Setenv("OS_DOMAIN_ID", fakeDomainID)
os.Setenv("OS_REGION_NAME", fakeRegion)
// Init assert
assert := assert.New(t)
expectedAuthOpts := gophercloud.AuthOptions{
IdentityEndpoint: fakeAuthUrl,
Username: fakeUserName,
Password: fakePassword,
TenantID: fakeTenantID,
DomainID: fakeDomainID,
}
expectedEpOpts := gophercloud.EndpointOpts{
Region: fakeRegion,
}
// Invoke GetConfigFromEnv
actualAuthOpts, actualEpOpts, err := GetConfigFromEnv()
if err != nil {
t.Errorf("failed to GetConfigFromEnv: %v", err)
}
// Assert
assert.Equal(expectedAuthOpts, actualAuthOpts)
assert.Equal(expectedEpOpts, actualEpOpts)
}

View File

@ -1,253 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"fmt"
"time"
"github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/golang/glog"
)
const (
VolumeAvailableStatus = "available"
VolumeInUseStatus = "in-use"
VolumeDeletedStatus = "deleted"
VolumeErrorStatus = "error"
operationFinishInitDelay = 1 * time.Second
operationFinishFactor = 1.1
operationFinishSteps = 10
diskAttachInitDelay = 1 * time.Second
diskAttachFactor = 1.2
diskAttachSteps = 15
diskDetachInitDelay = 1 * time.Second
diskDetachFactor = 1.2
diskDetachSteps = 13
)
type Volume struct {
// ID of the instance, to which this volume is attached. "" if not attached
AttachedServerId string
// Device file path
AttachedDevice string
// Unique identifier for the volume.
ID string
// Human-readable display name for the volume.
Name string
// Current status of the volume.
Status string
// Volume size in GB
Size int
}
// CreateVolume creates a volume of given size
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) {
opts := &volumes.CreateOpts{
Name: name,
Size: size,
VolumeType: vtype,
AvailabilityZone: availability,
}
if tags != nil {
opts.Metadata = *tags
}
vol, err := volumes.Create(os.blockstorage, opts).Extract()
if err != nil {
return "", "", err
}
return vol.ID, vol.AvailabilityZone, nil
}
// DeleteVolume delete a volume
func (os *OpenStack) DeleteVolume(volumeID string) error {
used, err := os.diskIsUsed(volumeID)
if err != nil {
return err
}
if used {
return fmt.Errorf("Cannot delete the volume %q, it's still attached to a node", volumeID)
}
err = volumes.Delete(os.blockstorage, volumeID).ExtractErr()
return err
}
// GetVolume retrieves Volume by its ID.
func (os *OpenStack) GetVolume(volumeID string) (Volume, error) {
vol, err := volumes.Get(os.blockstorage, volumeID).Extract()
if err != nil {
return Volume{}, err
}
volume := Volume{
ID: vol.ID,
Name: vol.Name,
Status: vol.Status,
}
if len(vol.Attachments) > 0 {
volume.AttachedServerId = vol.Attachments[0].ServerID
volume.AttachedDevice = vol.Attachments[0].Device
}
return volume, nil
}
// AttachVolume attaches given cinder volume to the compute
func (os *OpenStack) AttachVolume(instanceID, volumeID string) (string, error) {
volume, err := os.GetVolume(volumeID)
if err != nil {
return "", err
}
if volume.AttachedServerId != "" {
if instanceID == volume.AttachedServerId {
glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
return volume.ID, nil
}
return "", fmt.Errorf("disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId)
}
_, err = volumeattach.Create(os.compute, instanceID, &volumeattach.CreateOpts{
VolumeID: volume.ID,
}).Extract()
if err != nil {
return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err)
}
glog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID)
return volume.ID, nil
}
// WaitDiskAttached waits for attched
func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error {
backoff := wait.Backoff{
Duration: diskAttachInitDelay,
Factor: diskAttachFactor,
Steps: diskAttachSteps,
}
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
attached, err := os.diskIsAttached(instanceID, volumeID)
if err != nil {
return false, err
}
return attached, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Volume %q failed to be attached within the alloted time", volumeID)
}
return err
}
// DetachVolume detaches given cinder volume from the compute
func (os *OpenStack) DetachVolume(instanceID, volumeID string) error {
volume, err := os.GetVolume(volumeID)
if err != nil {
return err
}
if volume.Status == VolumeAvailableStatus {
glog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID)
return nil
}
if volume.Status != VolumeInUseStatus {
return fmt.Errorf("can not detach volume %s, its status is %s", volume.Name, volume.Status)
}
if volume.AttachedServerId != instanceID {
return fmt.Errorf("disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID)
} else {
err = volumeattach.Delete(os.compute, instanceID, volume.ID).ExtractErr()
if err != nil {
return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
}
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
}
return nil
}
// WaitDiskDetached waits for detached
func (os *OpenStack) WaitDiskDetached(instanceID string, volumeID string) error {
backoff := wait.Backoff{
Duration: diskDetachInitDelay,
Factor: diskDetachFactor,
Steps: diskDetachSteps,
}
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
attached, err := os.diskIsAttached(instanceID, volumeID)
if err != nil {
return false, err
}
return !attached, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Volume %q failed to detach within the alloted time", volumeID)
}
return err
}
// GetAttachmentDiskPath gets device path of attached volume to the compute
func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
volume, err := os.GetVolume(volumeID)
if err != nil {
return "", err
}
if volume.Status != VolumeInUseStatus {
return "", fmt.Errorf("can not get device path of volume %s, its status is %s ", volume.Name, volume.Status)
}
if volume.AttachedServerId != "" {
if instanceID == volume.AttachedServerId {
return volume.AttachedDevice, nil
} else {
return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerId)
}
}
return "", fmt.Errorf("volume %s has no ServerId", volumeID)
}
// diskIsAttached queries if a volume is attached to a compute instance
func (os *OpenStack) diskIsAttached(instanceID, volumeID string) (bool, error) {
volume, err := os.GetVolume(volumeID)
if err != nil {
return false, err
}
return instanceID == volume.AttachedServerId, nil
}
// diskIsUsed returns true a disk is attached to any node.
func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) {
volume, err := os.GetVolume(volumeID)
if err != nil {
return false, err
}
return volume.AttachedServerId != "", nil
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package csicommon
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
@ -45,27 +45,7 @@ func (cs *DefaultControllerServer) ControllerUnpublishVolume(ctx context.Context
}
func (cs *DefaultControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
glog.V(5).Infof("Using default ValidateVolumeCapabilities")
for _, c := range req.GetVolumeCapabilities() {
found := false
for _, c1 := range cs.Driver.vc {
if c1.GetMode() == c.GetAccessMode().GetMode() {
found = true
}
}
if !found {
return &csi.ValidateVolumeCapabilitiesResponse{
Supported: false,
Message: "Driver doesnot support mode:" + c.GetAccessMode().GetMode().String(),
}, status.Error(codes.InvalidArgument, "Driver doesnot support mode:"+c.GetAccessMode().GetMode().String())
}
// TODO: Ignoring mount & block tyeps for now.
}
return &csi.ValidateVolumeCapabilitiesResponse{
Supported: true,
}, nil
return nil, status.Error(codes.Unimplemented, "")
}
func (cs *DefaultControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {

View File

@ -18,11 +18,12 @@ package csicommon
import (
"fmt"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
)
type CSIDriver struct {

View File

@ -19,7 +19,7 @@ package csicommon
import (
"testing"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -31,7 +31,7 @@ const (
)
var (
vendorVersion = "0.3.0"
vendorVersion = "1.0.0-rc2"
)
func NewFakeDriver() *CSIDriver {

View File

@ -17,7 +17,7 @@ limitations under the License.
package csicommon
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"

View File

@ -20,7 +20,7 @@ import (
"context"
"testing"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/stretchr/testify/assert"
)

View File

@ -17,7 +17,7 @@ limitations under the License.
package csicommon
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
@ -36,14 +36,6 @@ func (ns *DefaultNodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.N
return nil, status.Error(codes.Unimplemented, "")
}
func (ns *DefaultNodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRequest) (*csi.NodeGetIdResponse, error) {
glog.V(5).Infof("Using default NodeGetId")
return &csi.NodeGetIdResponse{
NodeId: ns.Driver.nodeID,
}, nil
}
func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
glog.V(5).Infof("Using default NodeGetInfo")
@ -67,3 +59,7 @@ func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.N
},
}, nil
}
func (ns *DefaultNodeServer) NodeGetVolumeStats(ctx context.Context, in *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

View File

@ -20,24 +20,12 @@ import (
"context"
"testing"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestNodeGetId(t *testing.T) {
d := NewFakeDriver()
ns := NewDefaultNodeServer(d)
// Test valid request
req := csi.NodeGetIdRequest{}
resp, err := ns.NodeGetId(context.Background(), &req)
assert.NoError(t, err)
assert.Equal(t, resp.GetNodeId(), fakeNodeID)
}
func TestNodeGetInfo(t *testing.T) {
d := NewFakeDriver()

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/glog"
"google.golang.org/grpc"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
)
// Defines Non blocking GRPC server interfaces

View File

@ -20,7 +20,7 @@ import (
"fmt"
"strings"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc"

View File

@ -17,7 +17,7 @@ limitations under the License.
package flexadapter
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -47,7 +47,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs
}
call := cs.flexDriver.NewDriverCall(attachCmd)
call.AppendSpec(req.GetVolumeId(), fsType, req.GetReadonly(), req.GetVolumeAttributes())
call.AppendSpec(req.GetVolumeId(), fsType, req.GetReadonly(), req.GetVolumeContext())
call.Append(req.GetNodeId())
callStatus, err := call.Run()
@ -57,12 +57,12 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs
return nil, status.Error(codes.Internal, err.Error())
}
pvInfo := map[string]string{}
publishContext := map[string]string{}
pvInfo[deviceID] = callStatus.DevicePath
publishContext[deviceID] = callStatus.DevicePath
return &csi.ControllerPublishVolumeResponse{
PublishInfo: pvInfo,
PublishContext: publishContext,
}, nil
}
@ -86,10 +86,5 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req *
}
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
for _, cap := range req.VolumeCapabilities {
if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
return &csi.ValidateVolumeCapabilitiesResponse{Supported: false, Message: ""}, nil
}
}
return &csi.ValidateVolumeCapabilitiesResponse{Supported: true, Message: ""}, nil
return cs.DefaultControllerServer.ValidateVolumeCapabilities(ctx, req)
}

View File

@ -19,7 +19,7 @@ package flexadapter
import (
"os"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -38,7 +38,7 @@ type flexAdapter struct {
}
var (
version = "0.3.0"
version = "1.0.0-rc2"
)
func New() *flexAdapter {

View File

@ -19,7 +19,7 @@ package flexadapter
import (
"os"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -53,9 +53,9 @@ func (ns *nodeServer) waitForAttach(req *csi.NodePublishVolumeRequest, fsType st
var dID string
if req.GetPublishInfo() != nil {
if req.GetPublishContext() != nil {
var ok bool
dID, ok = req.GetPublishInfo()[deviceID]
dID, ok = req.GetPublishContext()[deviceID]
if !ok {
return status.Error(codes.InvalidArgument, "Missing device ID")
}
@ -65,7 +65,7 @@ func (ns *nodeServer) waitForAttach(req *csi.NodePublishVolumeRequest, fsType st
call := ns.flexDriver.NewDriverCall(waitForAttachCmd)
call.Append(dID)
call.AppendSpec(req.GetVolumeId(), fsType, req.GetReadonly(), req.GetVolumeAttributes())
call.AppendSpec(req.GetVolumeId(), fsType, req.GetReadonly(), req.GetVolumeContext())
_, err := call.Run()
if isCmdNotSupportedErr(err) {
@ -116,15 +116,15 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
call.Append(req.GetTargetPath())
if req.GetPublishInfo() != nil {
call.Append(req.GetPublishInfo()[deviceID])
if req.GetPublishContext() != nil {
call.Append(req.GetPublishContext()[deviceID])
}
call.AppendSpec(req.GetVolumeId(), fsType, req.GetReadonly(), req.GetVolumeAttributes())
call.AppendSpec(req.GetVolumeId(), fsType, req.GetReadonly(), req.GetVolumeContext())
_, err = call.Run()
if isCmdNotSupportedErr(err) {
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
err := mountDevice(req.VolumeAttributes[deviceID], targetPath, fsType, req.GetReadonly(), mountFlags)
err := mountDevice(req.VolumeContext[deviceID], targetPath, fsType, req.GetReadonly(), mountFlags)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

View File

@ -18,7 +18,12 @@ package hostpath
import (
"fmt"
"math"
"os"
"sort"
"strconv"
"github.com/golang/protobuf/ptypes"
"github.com/golang/glog"
"github.com/pborman/uuid"
@ -26,13 +31,15 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
utilexec "k8s.io/utils/exec"
)
const (
deviceID = "deviceID"
provisionRoot = "/tmp/"
snapshotRoot = "/tmp/"
maxStorageCapacity = tib
)
@ -64,9 +71,9 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// TODO (sbezverk) Do I need to make sure that RBD volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
Id: exVol.VolID,
VolumeId: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
Attributes: req.GetParameters(),
VolumeContext: req.GetParameters(),
},
}, nil
}
@ -84,6 +91,26 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
glog.V(3).Infof("failed to create volume: %v", err)
return nil, err
}
if req.GetVolumeContentSource() != nil {
contentSource := req.GetVolumeContentSource()
if contentSource.GetSnapshot() != nil {
snapshotId := contentSource.GetSnapshot().GetSnapshotId()
snapshot, ok := hostPathVolumeSnapshots[snapshotId]
if !ok {
return nil, status.Errorf(codes.NotFound, "cannot find snapshot %v", snapshotId)
}
if snapshot.ReadyToUse != true {
return nil, status.Errorf(codes.Internal, "Snapshot %v is not yet ready to use.", snapshotId)
}
snapshotPath := snapshot.Path
args := []string{"zxvf", snapshotPath, "-C", path}
executor := utilexec.New()
out, err := executor.Command("tar", args...).CombinedOutput()
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed pre-populate data for volume: %v: %s", err, out))
}
}
}
glog.V(4).Infof("create volume %s", path)
hostPathVol := hostPathVolume{}
hostPathVol.VolName = req.GetName()
@ -93,9 +120,9 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
hostPathVolumes[volumeID] = hostPathVol
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
Id: volumeID,
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
Attributes: req.GetParameters(),
VolumeContext: req.GetParameters(),
},
}, nil
}
@ -120,22 +147,221 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
}
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
return cs.DefaultControllerServer.ValidateVolumeCapabilities(ctx, req)
}
// CreateSnapshot uses tar command to create snapshot for hostpath volume. The tar command can quickly create
// archives of entire directories. The host image must have "tar" binaries in /bin, /usr/sbin, or /usr/bin.
func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
glog.V(3).Infof("invalid create snapshot req: %v", req)
return nil, err
}
if len(req.GetName()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request")
}
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
if _, ok := hostPathVolumes[req.GetVolumeId()]; !ok {
return nil, status.Error(codes.NotFound, "Volume does not exist")
if len(req.GetSourceVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "SourceVolumeId missing in request")
}
for _, cap := range req.VolumeCapabilities {
if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
return &csi.ValidateVolumeCapabilitiesResponse{Supported: false, Message: ""}, nil
// Need to check for already existing snapshot name, and if found check for the
// requested sourceVolumeId and sourceVolumeId of snapshot that has been created.
if exSnap, err := getSnapshotByName(req.GetName()); err == nil {
// Since err is nil, it means the snapshot with the same name already exists need
// to check if the sourceVolumeId of existing snapshot is the same as in new request.
if exSnap.VolID == req.GetSourceVolumeId() {
// same snapshot has been created.
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
SnapshotId: exSnap.Id,
SourceVolumeId: exSnap.VolID,
CreationTime: &exSnap.CreationTime,
SizeBytes: exSnap.SizeBytes,
ReadyToUse: exSnap.ReadyToUse,
},
}, nil
}
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("snapshot with the same name: %s but with different SourceVolumeId already exist", req.GetName()))
}
volumeID := req.GetSourceVolumeId()
hostPathVolume, ok := hostPathVolumes[volumeID]
if !ok {
return nil, status.Error(codes.Internal, "volumeID is not exist")
}
snapshotID := uuid.NewUUID().String()
creationTime := ptypes.TimestampNow()
volPath := hostPathVolume.VolPath
file := snapshotRoot + snapshotID + ".tgz"
args := []string{"czf", file, "-C", volPath, "."}
executor := utilexec.New()
out, err := executor.Command("tar", args...).CombinedOutput()
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed create snapshot: %v: %s", err, out))
}
glog.V(4).Infof("create volume snapshot %s", file)
snapshot := hostPathSnapshot{}
snapshot.Name = req.GetName()
snapshot.Id = snapshotID
snapshot.VolID = volumeID
snapshot.Path = file
snapshot.CreationTime = *creationTime
snapshot.SizeBytes = hostPathVolume.VolSize
snapshot.ReadyToUse = true
hostPathVolumeSnapshots[snapshotID] = snapshot
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
SnapshotId: snapshot.Id,
SourceVolumeId: snapshot.VolID,
CreationTime: &snapshot.CreationTime,
SizeBytes: snapshot.SizeBytes,
ReadyToUse: snapshot.ReadyToUse,
},
}, nil
}
func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
// Check arguments
if len(req.GetSnapshotId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Snapshot ID missing in request")
}
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
glog.V(3).Infof("invalid delete snapshot req: %v", req)
return nil, err
}
snapshotID := req.GetSnapshotId()
glog.V(4).Infof("deleting volume %s", snapshotID)
path := snapshotRoot + snapshotID + ".tgz"
os.RemoveAll(path)
delete(hostPathVolumeSnapshots, snapshotID)
return &csi.DeleteSnapshotResponse{}, nil
}
func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS); err != nil {
glog.V(3).Infof("invalid list snapshot req: %v", req)
return nil, err
}
// case 1: SnapshotId is not empty, return snapshots that match the snapshot id.
if len(req.GetSnapshotId()) != 0 {
snapshotID := req.SnapshotId
if snapshot, ok := hostPathVolumeSnapshots[snapshotID]; ok {
return convertSnapshot(snapshot), nil
}
}
return &csi.ValidateVolumeCapabilitiesResponse{Supported: true, Message: ""}, nil
// case 2: SourceVolumeId is not empty, return snapshots that match the source volume id.
if len(req.GetSourceVolumeId()) != 0 {
for _, snapshot := range hostPathVolumeSnapshots {
if snapshot.VolID == req.SourceVolumeId {
return convertSnapshot(snapshot), nil
}
}
}
var snapshots []csi.Snapshot
// case 3: no parameter is set, so we return all the snapshots.
sortedKeys := make([]string, 0)
for k := range hostPathVolumeSnapshots {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
for _, key := range sortedKeys {
snap := hostPathVolumeSnapshots[key]
snapshot := csi.Snapshot{
SnapshotId: snap.Id,
SourceVolumeId: snap.VolID,
CreationTime: &snap.CreationTime,
SizeBytes: snap.SizeBytes,
ReadyToUse: snap.ReadyToUse,
}
snapshots = append(snapshots, snapshot)
}
var (
ulenSnapshots = int32(len(snapshots))
maxEntries = req.MaxEntries
startingToken int32
)
if v := req.StartingToken; v != "" {
i, err := strconv.ParseUint(v, 10, 32)
if err != nil {
return nil, status.Errorf(
codes.Aborted,
"startingToken=%d !< int32=%d",
startingToken, math.MaxUint32)
}
startingToken = int32(i)
}
if startingToken > ulenSnapshots {
return nil, status.Errorf(
codes.Aborted,
"startingToken=%d > len(snapshots)=%d",
startingToken, ulenSnapshots)
}
// Discern the number of remaining entries.
rem := ulenSnapshots - startingToken
// If maxEntries is 0 or greater than the number of remaining entries then
// set maxEntries to the number of remaining entries.
if maxEntries == 0 || maxEntries > rem {
maxEntries = rem
}
var (
i int
j = startingToken
entries = make(
[]*csi.ListSnapshotsResponse_Entry,
maxEntries)
)
for i = 0; i < len(entries); i++ {
entries[i] = &csi.ListSnapshotsResponse_Entry{
Snapshot: &snapshots[j],
}
j++
}
var nextToken string
if j < ulenSnapshots {
nextToken = fmt.Sprintf("%d", j)
}
return &csi.ListSnapshotsResponse{
Entries: entries,
NextToken: nextToken,
}, nil
}
func convertSnapshot(snap hostPathSnapshot) *csi.ListSnapshotsResponse {
entries := []*csi.ListSnapshotsResponse_Entry{
{
Snapshot: &csi.Snapshot{
SnapshotId: snap.Id,
SourceVolumeId: snap.VolID,
CreationTime: &snap.CreationTime,
SizeBytes: snap.SizeBytes,
ReadyToUse: snap.ReadyToUse,
},
},
}
rsp := &csi.ListSnapshotsResponse{
Entries: entries,
}
return rsp
}

View File

@ -19,9 +19,10 @@ package hostpath
import (
"fmt"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
)
@ -52,15 +53,27 @@ type hostPathVolume struct {
VolPath string `json:"volPath"`
}
type hostPathSnapshot struct {
Name string `json:"name"`
Id string `json:"id"`
VolID string `json:"volID"`
Path string `json:"path"`
CreationTime timestamp.Timestamp `json:"creationTime"`
SizeBytes int64 `json:"sizeBytes"`
ReadyToUse bool `json:"readyToUse"`
}
var hostPathVolumes map[string]hostPathVolume
var hostPathVolumeSnapshots map[string]hostPathSnapshot
var (
hostPathDriver *hostPath
vendorVersion = "0.3.0"
vendorVersion = "dev"
)
func init() {
hostPathVolumes = map[string]hostPathVolume{}
hostPathVolumeSnapshots = map[string]hostPathSnapshot{}
}
func GetHostPathDriver() *hostPath {
@ -87,13 +100,19 @@ func NewNodeServer(d *csicommon.CSIDriver) *nodeServer {
func (hp *hostPath) Run(driverName, nodeID, endpoint string) {
glog.Infof("Driver: %v ", driverName)
glog.Infof("Version: %s", vendorVersion)
// Initialize default library driver
hp.driver = csicommon.NewCSIDriver(driverName, vendorVersion, nodeID)
if hp.driver == nil {
glog.Fatalln("Failed to initialize CSI Driver.")
}
hp.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME})
hp.driver.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
})
hp.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
// Create GRPC servers
@ -121,3 +140,12 @@ func getVolumeByName(volName string) (hostPathVolume, error) {
}
return hostPathVolume{}, fmt.Errorf("volume name %s does not exit in the volumes list", volName)
}
func getSnapshotByName(name string) (hostPathSnapshot, error) {
for _, snapshot := range hostPathVolumeSnapshots {
if snapshot.Name == name {
return snapshot, nil
}
}
return hostPathSnapshot{}, fmt.Errorf("snapshot name %s does not exit in the snapshots list", name)
}

View File

@ -22,7 +22,7 @@ import (
"github.com/golang/glog"
"golang.org/x/net/context"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/kubernetes/pkg/util/mount"
@ -67,13 +67,13 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
fsType := req.GetVolumeCapability().GetMount().GetFsType()
deviceId := ""
if req.GetPublishInfo() != nil {
deviceId = req.GetPublishInfo()[deviceID]
if req.GetPublishContext() != nil {
deviceId = req.GetPublishContext()[deviceID]
}
readOnly := req.GetReadonly()
volumeId := req.GetVolumeId()
attrib := req.GetVolumeAttributes()
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v\nfstype %v\ndevice %v\nreadonly %v\nvolumeId %v\nattributes %v\nmountflags %v\n",

View File

@ -17,7 +17,7 @@ limitations under the License.
package iscsi
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -39,7 +39,7 @@ const (
)
var (
version = "0.3.0"
version = "1.0.0-rc2"
)
func NewDriver(nodeID, endpoint string) *driver {

View File

@ -21,22 +21,22 @@ import (
"fmt"
"strings"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util"
)
func getISCSIInfo(req *csi.NodePublishVolumeRequest) (*iscsiDisk, error) {
volName := req.GetVolumeId()
tp := req.GetVolumeAttributes()["targetPortal"]
iqn := req.GetVolumeAttributes()["iqn"]
lun := req.GetVolumeAttributes()["lun"]
tp := req.GetVolumeContext()["targetPortal"]
iqn := req.GetVolumeContext()["iqn"]
lun := req.GetVolumeContext()["lun"]
if tp == "" || iqn == "" || lun == "" {
return nil, fmt.Errorf("iSCSI target information is missing")
}
portalList := req.GetVolumeAttributes()["portals"]
secretParams := req.GetVolumeAttributes()["secret"]
portalList := req.GetVolumeContext()["portals"]
secretParams := req.GetVolumeContext()["secret"]
secret := parseSecret(secretParams)
portal := portalMounter(tp)
@ -52,15 +52,15 @@ func getISCSIInfo(req *csi.NodePublishVolumeRequest) (*iscsiDisk, error) {
bkportal = append(bkportal, portalMounter(string(portal)))
}
iface := req.GetVolumeAttributes()["iscsiInterface"]
initiatorName := req.GetVolumeAttributes()["initiatorName"]
iface := req.GetVolumeContext()["iscsiInterface"]
initiatorName := req.GetVolumeContext()["initiatorName"]
chapDiscovery := false
if req.GetVolumeAttributes()["discoveryCHAPAuth"] == "true" {
if req.GetVolumeContext()["discoveryCHAPAuth"] == "true" {
chapDiscovery = true
}
chapSession := false
if req.GetVolumeAttributes()["sessionCHAPAuth"] == "true" {
if req.GetVolumeContext()["sessionCHAPAuth"] == "true" {
chapSession = true
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package iscsi
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"

View File

@ -17,7 +17,7 @@ limitations under the License.
package nfs
import (
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -39,7 +39,7 @@ const (
)
var (
version = "0.3.0"
version = "1.0.0-rc2"
)
func NewDriver(nodeID, endpoint string) *driver {

View File

@ -21,7 +21,7 @@ import (
"os"
"strings"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -58,8 +58,8 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
mo = append(mo, "ro")
}
s := req.GetVolumeAttributes()["server"]
ep := req.GetVolumeAttributes()["share"]
s := req.GetVolumeContext()["server"]
ep := req.GetVolumeContext()["share"]
source := fmt.Sprintf("%s:%s", s, ep)
mounter := mount.New("")