mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 08:10:20 +00:00
Merge branch 'master' of github.com:ceph/ceph-csi into csi-v1.0.0
This commit is contained in:
commit
04872e5ebf
@ -33,7 +33,7 @@ Parameter | Required | Description
|
|||||||
--------- | -------- | -----------
|
--------- | -------- | -----------
|
||||||
`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||||
`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments.
|
`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments.
|
||||||
`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing CephFS will be used.
|
`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used.
|
||||||
`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created
|
`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created
|
||||||
`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume
|
`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume
|
||||||
`csiProvisionerSecretName`, `csiNodeStageSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
`csiProvisionerSecretName`, `csiNodeStageSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
||||||
@ -49,6 +49,8 @@ User credentials with access to an existing volume
|
|||||||
* `userID`: ID of a user client
|
* `userID`: ID of a user client
|
||||||
* `userKey`: key of a user client
|
* `userKey`: key of a user client
|
||||||
|
|
||||||
|
Notes on volume size: when provisioning a new volume, `max_bytes` quota attribute for this volume will be set to the requested volume size (see [Ceph quota documentation](http://docs.ceph.com/docs/mimic/cephfs/quota/)). A request for a zero-sized volume means no quota attribute will be set.
|
||||||
|
|
||||||
## Deployment with Kubernetes
|
## Deployment with Kubernetes
|
||||||
|
|
||||||
Requires Kubernetes 1.11
|
Requires Kubernetes 1.11
|
||||||
|
@ -30,10 +30,6 @@ type controllerServer struct {
|
|||||||
*csicommon.DefaultControllerServer
|
*csicommon.DefaultControllerServer
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
oneGB = 1073741824
|
|
||||||
)
|
|
||||||
|
|
||||||
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||||
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
||||||
glog.Errorf("CreateVolumeRequest validation failed: %v", err)
|
glog.Errorf("CreateVolumeRequest validation failed: %v", err)
|
||||||
@ -85,11 +81,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||||||
glog.Infof("cephfs: volume %s is provisioned statically", volId)
|
glog.Infof("cephfs: volume %s is provisioned statically", volId)
|
||||||
}
|
}
|
||||||
|
|
||||||
sz := req.GetCapacityRange().GetRequiredBytes()
|
|
||||||
if sz == 0 {
|
|
||||||
sz = oneGB
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = ctrCache.insert(&controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId}); err != nil {
|
if err = ctrCache.insert(&controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId}); err != nil {
|
||||||
glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err)
|
glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
@ -98,7 +89,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
|||||||
return &csi.CreateVolumeResponse{
|
return &csi.CreateVolumeResponse{
|
||||||
Volume: &csi.Volume{
|
Volume: &csi.Volume{
|
||||||
VolumeId: string(volId),
|
VolumeId: string(volId),
|
||||||
CapacityBytes: sz,
|
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
|
||||||
VolumeContext: req.GetParameters(),
|
VolumeContext: req.GetParameters(),
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -81,8 +81,10 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
if bytesQuota > 0 {
|
||||||
return err
|
if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user