diff --git a/docs/deploy-cephfs.md b/docs/deploy-cephfs.md index 27dfd18b1..4e5b68c77 100644 --- a/docs/deploy-cephfs.md +++ b/docs/deploy-cephfs.md @@ -33,7 +33,7 @@ Parameter | Required | Description --------- | -------- | ----------- `monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) `mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments. -`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing CephFS will be used. +`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used. `pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created `rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume `csiProvisionerSecretName`, `csiNodeStageSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value @@ -49,6 +49,8 @@ User credentials with access to an existing volume * `userID`: ID of a user client * `userKey`: key of a user client +Notes on volume size: when provisioning a new volume, `max_bytes` quota attribute for this volume will be set to the requested volume size (see [Ceph quota documentation](http://docs.ceph.com/docs/mimic/cephfs/quota/)). A request for a zero-sized volume means no quota attribute will be set. + ## Deployment with Kubernetes Requires Kubernetes 1.11 diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 1afd761f5..10ee1084a 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -30,10 +30,6 @@ type controllerServer struct { *csicommon.DefaultControllerServer } -const ( - oneGB = 1073741824 -) - func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { if err := cs.validateCreateVolumeRequest(req); err != nil { glog.Errorf("CreateVolumeRequest validation failed: %v", err) @@ -86,11 +82,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol glog.Infof("cephfs: volume %s is provisioned statically", volId) } - sz := req.GetCapacityRange().GetRequiredBytes() - if sz == 0 { - sz = oneGB - } - if err = ctrCache.insert(&controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId}); err != nil { glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err) return nil, status.Error(codes.Internal, err.Error()) @@ -99,7 +90,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ Id: string(volId), - CapacityBytes: sz, + CapacityBytes: req.GetCapacityRange().GetRequiredBytes(), Attributes: req.GetParameters(), }, }, nil diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index f28d14197..27cb63228 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -81,8 +81,10 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI return err } - if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil { - return err + if bytesQuota > 0 { + if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil { + return err + } } if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil {