mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-22 22:30:23 +00:00
Merge pull request #103 from gman0/cephfs-zeroquota
cephfs: don't set quotas for zero-sized volumes
This commit is contained in:
commit
9a7ef258fc
@ -33,7 +33,7 @@ Parameter | Required | Description
|
|||||||
--------- | -------- | -----------
|
--------- | -------- | -----------
|
||||||
`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||||
`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments.
|
`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments.
|
||||||
`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing CephFS will be used.
|
`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used.
|
||||||
`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created
|
`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created
|
||||||
`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume
|
`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume
|
||||||
`csiProvisionerSecretName`, `csiNodeStageSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
`csiProvisionerSecretName`, `csiNodeStageSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
||||||
@ -49,6 +49,8 @@ User credentials with access to an existing volume
|
|||||||
* `userID`: ID of a user client
|
* `userID`: ID of a user client
|
||||||
* `userKey`: key of a user client
|
* `userKey`: key of a user client
|
||||||
|
|
||||||
|
Notes on volume size: when provisioning a new volume, `max_bytes` quota attribute for this volume will be set to the requested volume size (see [Ceph quota documentation](http://docs.ceph.com/docs/mimic/cephfs/quota/)). A request for a zero-sized volume means no quota attribute will be set.
|
||||||
|
|
||||||
## Deployment with Kubernetes
|
## Deployment with Kubernetes
|
||||||
|
|
||||||
Requires Kubernetes 1.11
|
Requires Kubernetes 1.11
|
||||||
|
@ -30,10 +30,6 @@ type controllerServer struct {
|
|||||||
*csicommon.DefaultControllerServer
|
*csicommon.DefaultControllerServer
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
oneGB = 1073741824
|
|
||||||
)
|
|
||||||
|
|
||||||
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||||
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
||||||
glog.Errorf("CreateVolumeRequest validation failed: %v", err)
|
glog.Errorf("CreateVolumeRequest validation failed: %v", err)
|
||||||
@ -86,11 +82,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||||||
glog.Infof("cephfs: volume %s is provisioned statically", volId)
|
glog.Infof("cephfs: volume %s is provisioned statically", volId)
|
||||||
}
|
}
|
||||||
|
|
||||||
sz := req.GetCapacityRange().GetRequiredBytes()
|
|
||||||
if sz == 0 {
|
|
||||||
sz = oneGB
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = ctrCache.insert(&controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId}); err != nil {
|
if err = ctrCache.insert(&controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId}); err != nil {
|
||||||
glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err)
|
glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
@ -99,7 +90,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||||||
return &csi.CreateVolumeResponse{
|
return &csi.CreateVolumeResponse{
|
||||||
Volume: &csi.Volume{
|
Volume: &csi.Volume{
|
||||||
Id: string(volId),
|
Id: string(volId),
|
||||||
CapacityBytes: sz,
|
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
|
||||||
Attributes: req.GetParameters(),
|
Attributes: req.GetParameters(),
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -81,9 +81,11 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if bytesQuota > 0 {
|
||||||
if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
||||||
return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool)
|
return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool)
|
||||||
|
Loading…
Reference in New Issue
Block a user