mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 00:00:23 +00:00
Provide options to pass in Ceph cluster-id
This commit provides the option to pass in Ceph cluster-id instead of a MON list from the storage class. This helps in moving towards a stateless CSI implementation. Tested the following, - PV provisioning and staging using cluster-id in storage class - PV provisioning and staging using MON list in storage class Did not test, - snapshot operations in either forms of the storage class Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
parent
ff7d649c9d
commit
97f8c4b677
@ -31,6 +31,7 @@ var (
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
containerized = flag.Bool("containerized", true, "whether run as containerized")
|
||||
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
|
||||
configRoot = flag.String("configroot", "/etc", "Directory under which Ceph CSI configuration files will be present")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -56,7 +57,7 @@ func main() {
|
||||
}
|
||||
|
||||
driver := rbd.NewDriver()
|
||||
driver.Run(*driverName, *nodeID, *endpoint, *containerized, cp)
|
||||
driver.Run(*driverName, *nodeID, *endpoint, *containerized, *configRoot, cp)
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
@ -50,8 +50,9 @@ the configmaps to be stored
|
||||
|
||||
Parameter | Required | Description
|
||||
--------- | -------- | -----------
|
||||
`monitors` | one of `monitors` and `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||
`monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors.
|
||||
`monitors` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||
`monValueFromSecret` | one of `monitors`, `clusterID` or and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors.
|
||||
`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Value of Ceph cluster fsid, into which RBD images shall be created (e.g. `4ae5ae3d-ebfb-4150-bfc8-798970f4e3d9`)
|
||||
`pool` | yes | Ceph pool into which the RBD image shall be created
|
||||
`imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format)
|
||||
`imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature)
|
||||
@ -65,6 +66,11 @@ Admin credentials are required for provisioning new RBD images `ADMIN_NAME`:
|
||||
`ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the
|
||||
client with admin privileges, and the value is its password
|
||||
|
||||
If clusterID is specified, then a pair of secrets are required, with keys named
|
||||
`subjectid` and `credentials`. Where, `subjectid` is the name of the client
|
||||
with admin privileges and `credentials` contain its password. The pair required
|
||||
are provisioner and publish secrets, and should contain the same value.
|
||||
|
||||
## Deployment with Kubernetes
|
||||
|
||||
Requires Kubernetes 1.11
|
||||
|
@ -12,6 +12,11 @@ Once the plugin is successfully deployed, you'll need to customize
|
||||
setup.
|
||||
Please consult the documentation for info about available parameters.
|
||||
|
||||
**NOTE:** See section
|
||||
[Cluster ID based configuration](#cluster-id-based-configuration) if using
|
||||
the `clusterID` instead of `monitors` or `monValueFromSecret` options in the
|
||||
storage class for RBD based provisioning before proceeding.
|
||||
|
||||
After configuring the secrets, monitors, etc. you can deploy a
|
||||
testing Pod mounting a RBD image / CephFS volume:
|
||||
|
||||
@ -213,3 +218,34 @@ Units: sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
|
||||
```
|
||||
|
||||
## Cluster ID based configuration
|
||||
|
||||
Before creating a storage class that uses the option `clusterID` to refer to a
|
||||
Ceph cluster,
|
||||
|
||||
**NOTE**: Substitute the output of `ceph fsid` instead of `<cluster-fsid>` in
|
||||
the mentioned template YAML files, and also the Ceph admin ID and
|
||||
credentials in their respective options. Further, update options like
|
||||
`monitors` and `pools` in the respective YAML files to contain the
|
||||
appropriate information.
|
||||
|
||||
Create the following config maps and secrets
|
||||
|
||||
* `kubectl create -f ./rbd/template-ceph-cluster-ID-provisioner-secret.yaml`
|
||||
* `kubectl create -f ./rbd/template-ceph-cluster-ID-publish-secret.yaml`
|
||||
* `kubectl create -f ./rbd/template-ceph-cluster-ID-config.yaml`
|
||||
|
||||
Modify the deployed CSI pods to additionally pass in the config maps and
|
||||
secrets as volumes,
|
||||
|
||||
* `kubectl patch daemonset csi-rbdplugin --patch "$(cat ./rbd/template-csi-rbdplugin-patch.yaml)"`
|
||||
* `kubectl patch statefulset csi-rbdplugin-provisioner --patch "$(cat ./rbd/template-csi-rbdplugin-provisioner-patch.yaml)"`
|
||||
|
||||
Restart the provisioner and node plugin daemonset.
|
||||
|
||||
Storage class and snapshot class, using the `<cluster-fsid>` as the value for
|
||||
the option `clusterID`, can now be created on the cluster.
|
||||
|
||||
Remaining steps to test functionality remains the same as mentioned in the
|
||||
sections above.
|
||||
|
@ -6,6 +6,12 @@ metadata:
|
||||
snapshotter: rbd.csi.ceph.com
|
||||
parameters:
|
||||
pool: rbd
|
||||
# Comma separated list of Ceph monitors
|
||||
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||
monitors: mon1:port,mon2:port,...
|
||||
# OR,
|
||||
# Ceph cluster fsid, of the cluster to provision storage from
|
||||
# clusterID: <ceph-fsid>
|
||||
|
||||
csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
|
||||
csi.storage.k8s.io/snapshotter-secret-namespace: default
|
||||
|
@ -8,7 +8,10 @@ parameters:
|
||||
# Comma separated list of Ceph monitors
|
||||
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||
monitors: mon1:port,mon2:port,...
|
||||
|
||||
# OR,
|
||||
# Ceph cluster fsid, of the cluster to provision storage from
|
||||
# clusterID: <ceph-fsid>
|
||||
# OR,
|
||||
# if "monitors" parameter is not set, driver to get monitors from same
|
||||
# secret as admin/user credentials. "monValueFromSecret" provides the
|
||||
# key in the secret whose value is the mons
|
||||
|
22
examples/rbd/template-ceph-cluster-ID-config.yaml
Normal file
22
examples/rbd/template-ceph-cluster-ID-config.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ceph-cluster-<cluster-fsid>
|
||||
namespace: default
|
||||
data:
|
||||
cluster-config: |
|
||||
{
|
||||
"version": 1,
|
||||
"cluster-config": {
|
||||
"cluster-fsid": "<ceph-fsid>",
|
||||
"monitors": [
|
||||
"<IP/DNS:port>",
|
||||
"<IP/DNS:port>"
|
||||
],
|
||||
"pools": [
|
||||
"<pool-name>",
|
||||
"<pool-name>"
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
# The <cluster-fsid> is used by the CSI plugin to uniquely identify and use a
|
||||
# Ceph cluster, hence the value MUST match the output of the following
|
||||
# command.
|
||||
# - Output of: `ceph fsid`
|
||||
name: ceph-cluster-<cluster-fsid>-provisioner-secret
|
||||
namespace: default
|
||||
data:
|
||||
# Base64 encoded ID of the admin name
|
||||
# - Typically output of: `echo -n "<admin-id>" | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
subjectid: <BASE64-ENCODED-ID>
|
||||
# Credentials of the above admin/user
|
||||
# - Output of: `ceph auth get-key client.admin | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
credentials: <BASE64-ENCODED-PASSWORD>
|
19
examples/rbd/template-ceph-cluster-ID-publish-secret.yaml
Normal file
19
examples/rbd/template-ceph-cluster-ID-publish-secret.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
# The <cluster-fsid> is used by the CSI plugin to uniquely identify and use a
|
||||
# Ceph cluster, hence the value MUST match the output of the following
|
||||
# command.
|
||||
# - Output of: `ceph fsid`
|
||||
name: ceph-cluster-<cluster-fsid>-publish-secret
|
||||
namespace: default
|
||||
data:
|
||||
# Base64 encoded ID of the admin name
|
||||
# - Typically output of: `echo -n "<admin-id>" | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
subjectid: <BASE64-ENCODED-ID>
|
||||
# Credentials of the above admin/user
|
||||
# - Output of: `ceph auth get-key client.admin | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
credentials: <BASE64-ENCODED-PASSWORD>
|
33
examples/rbd/template-csi-rbdplugin-patch.yaml
Normal file
33
examples/rbd/template-csi-rbdplugin-patch.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
# This is a patch to the existing daemonset deployment of CSI rbdplugin.
|
||||
# This is to be used when adding a new Ceph cluster to the CSI plugin.
|
||||
# NOTE: Update csi-rbdplugin-provisioner StatefulSet as well with similar patch
|
||||
# Post substituting the <cluster-fsid> in all places execute,
|
||||
# `kubectl patch daemonset csi-rbdplugin --patch\
|
||||
# "$(cat template-csi-rbdplugin-patch.yaml)"`
|
||||
# to patch the statefulset deployment.
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-rbdplugin
|
||||
volumeMounts:
|
||||
- name: provisioner-secret-<cluster-fsid>
|
||||
mountPath: "/etc/ceph-cluster-<cluster-fsid>-provisioner-secret"
|
||||
readOnly: true
|
||||
- name: publish-secret-<cluster-fsid>
|
||||
mountPath: "/etc/ceph-cluster-<cluster-fsid>-publish-secret"
|
||||
readOnly: true
|
||||
- name: ceph-cluster-<cluster-fsid>
|
||||
mountPath: "/etc/ceph-cluster-<cluster-fsid>/"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: provisioner-secret-<cluster-fsid>
|
||||
secret:
|
||||
secretName: ceph-cluster-<cluster-fsid>-provisioner-secret
|
||||
- name: publish-secret-<cluster-fsid>
|
||||
secret:
|
||||
secretName: ceph-cluster-<cluster-fsid>-publish-secret
|
||||
- name: ceph-cluster-<cluster-fsid>
|
||||
configMap:
|
||||
name: ceph-cluster-<cluster-fsid>
|
33
examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml
Normal file
33
examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
# This is a patch to the existing statefulset deployment of CSI rbdplugin.
|
||||
# This is to be used when adding a new Ceph cluster to the CSI plugin.
|
||||
# NOTE: Update csi-rbdplugin DaemonSet as well with similar patch
|
||||
# Post substituting the <cluster-fsid> in all places execute,
|
||||
# `kubectl patch statefulset csi-rbdplugin-provisioner --patch\
|
||||
# "$(cat template-csi-rbdplugin-provisioner-patch.yaml)"`
|
||||
# to patch the statefulset deployment.
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-rbdplugin
|
||||
volumeMounts:
|
||||
- name: provisioner-secret-<cluster-fsid>
|
||||
mountPath: "/etc/ceph-cluster-<cluster-fsid>-provisioner-secret"
|
||||
readOnly: true
|
||||
- name: publish-secret-<cluster-fsid>
|
||||
mountPath: "/etc/ceph-cluster-<cluster-fsid>-publish-secret"
|
||||
readOnly: true
|
||||
- name: ceph-cluster-<cluster-fsid>
|
||||
mountPath: "/etc/ceph-cluster-<cluster-fsid>/"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: provisioner-secret-<cluster-fsid>
|
||||
secret:
|
||||
secretName: ceph-cluster-<cluster-fsid>-provisioner-secret
|
||||
- name: publish-secret-<cluster-fsid>
|
||||
secret:
|
||||
secretName: ceph-cluster-<cluster-fsid>-publish-secret
|
||||
- name: ceph-cluster-<cluster-fsid>
|
||||
configMap:
|
||||
name: ceph-cluster-<cluster-fsid>
|
@ -47,6 +47,9 @@ type Driver struct {
|
||||
|
||||
var (
|
||||
version = "1.0.0"
|
||||
// Fc is the global file config type, and stores the top level directory
|
||||
// under which rest of the Ceph config files can be found
|
||||
Fc util.FileConfig
|
||||
)
|
||||
|
||||
// NewDriver returns new rbd driver
|
||||
@ -87,10 +90,13 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, err
|
||||
|
||||
// Run start a non-blocking grpc controller,node and identityserver for
|
||||
// rbd CSI driver which can serve multiple parallel requests
|
||||
func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, cachePersister util.CachePersister) {
|
||||
func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, configroot string, cachePersister util.CachePersister) {
|
||||
var err error
|
||||
klog.Infof("Driver: %v version: %v", driverName, version)
|
||||
|
||||
// Initialize fileconfig base path
|
||||
Fc.BasePath = configroot
|
||||
|
||||
// Initialize default library driver
|
||||
r.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
|
||||
if r.cd == nil {
|
||||
|
@ -280,7 +280,7 @@ func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (stri
|
||||
}
|
||||
|
||||
klog.V(5).Infof("rbd: map mon %s", mon)
|
||||
key, err := getRBDKey(userID, creds)
|
||||
key, err := getRBDKey(volOpt.FsID, userID, creds)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ type rbdVolume struct {
|
||||
UserID string `json:"userId"`
|
||||
Mounter string `json:"mounter"`
|
||||
DisableInUseChecks bool `json:"disableInUseChecks"`
|
||||
FsID string `json:"fsid"`
|
||||
}
|
||||
|
||||
type rbdSnapshot struct {
|
||||
@ -66,6 +67,7 @@ type rbdSnapshot struct {
|
||||
SizeBytes int64 `json:"sizeBytes"`
|
||||
AdminID string `json:"adminId"`
|
||||
UserID string `json:"userId"`
|
||||
FsID string `json:"fsid"`
|
||||
}
|
||||
|
||||
var (
|
||||
@ -85,12 +87,23 @@ var (
|
||||
supportedFeatures = sets.NewString("layering")
|
||||
)
|
||||
|
||||
func getRBDKey(id string, credentials map[string]string) (string, error) {
|
||||
|
||||
if key, ok := credentials[id]; ok {
|
||||
return key, nil
|
||||
func getRBDKey(fsid string, id string, credentials map[string]string) (string, error) {
|
||||
var ok bool
|
||||
var err error
|
||||
var key string
|
||||
if key, ok = credentials[id]; !ok {
|
||||
if fsid != "" {
|
||||
key, err = Fc.GetCredentialForSubject(fsid, id)
|
||||
if err != nil {
|
||||
klog.Errorf("failed getting credentials (%s)", err)
|
||||
return "", fmt.Errorf("RBD key for ID: %s not found in config store", id)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("RBD key for ID: %s not found", id)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("RBD key for ID: %s not found", id)
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func getMon(pOpts *rbdVolume, credentials map[string]string) (string, error) {
|
||||
@ -123,7 +136,7 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map
|
||||
image := pOpts.VolName
|
||||
volSzMiB := fmt.Sprintf("%dM", volSz)
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.FsID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -154,7 +167,7 @@ func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) (
|
||||
image := pOpts.VolName
|
||||
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
|
||||
|
||||
key, err := getRBDKey(userID, credentials)
|
||||
key, err := getRBDKey(pOpts.FsID, userID, credentials)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
@ -202,7 +215,7 @@ func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]str
|
||||
klog.Info("rbd is still being used ", image)
|
||||
return fmt.Errorf("rbd %s is still being used", image)
|
||||
}
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.FsID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -227,24 +240,79 @@ func execCommand(command string, args []string) ([]byte, error) {
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret string, noerr error) {
|
||||
var err error
|
||||
var ok bool
|
||||
|
||||
monitors, ok = options["monitors"]
|
||||
if !ok {
|
||||
// if mons are not set in options, check if they are set in secret
|
||||
if monInSecret, ok = options["monValueFromSecret"]; !ok {
|
||||
// if mons are not in secret, check if we have a cluster-fsid
|
||||
if fsID, ok = options["clusterID"]; !ok {
|
||||
return "", "", "", fmt.Errorf("either monitors or monValueFromSecret or clusterID must be set")
|
||||
}
|
||||
if monitors, err = Fc.GetMons(fsID); err != nil {
|
||||
klog.Errorf("failed getting mons (%s)", err)
|
||||
return "", "", "", fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getIDs(options map[string]string, fsID string) (adminID, userID string, noerr error) {
|
||||
var err error
|
||||
var ok bool
|
||||
|
||||
adminID, ok = options["adminid"]
|
||||
if !ok {
|
||||
if fsID != "" {
|
||||
if adminID, err = Fc.GetProvisionerSubjectID(fsID); err != nil {
|
||||
klog.Errorf("failed getting subject (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID)
|
||||
}
|
||||
} else {
|
||||
adminID = rbdDefaultAdminID
|
||||
}
|
||||
}
|
||||
|
||||
userID, ok = options["userid"]
|
||||
if !ok {
|
||||
if fsID != "" {
|
||||
if userID, err = Fc.GetPublishSubjectID(fsID); err != nil {
|
||||
klog.Errorf("failed getting subject (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID)
|
||||
}
|
||||
} else {
|
||||
userID = rbdDefaultUserID
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) {
|
||||
var ok bool
|
||||
var err error
|
||||
|
||||
rbdVol := &rbdVolume{}
|
||||
rbdVol.Pool, ok = volOptions["pool"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing required parameter pool")
|
||||
}
|
||||
rbdVol.Monitors, ok = volOptions["monitors"]
|
||||
if !ok {
|
||||
// if mons are not set in options, check if they are set in secret
|
||||
if rbdVol.MonValueFromSecret, ok = volOptions["monValueFromSecret"]; !ok {
|
||||
return nil, errors.New("either monitors or monValueFromSecret must be set")
|
||||
}
|
||||
|
||||
rbdVol.Monitors, rbdVol.FsID, rbdVol.MonValueFromSecret, err = getMonsAndFsID(volOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rbdVol.ImageFormat, ok = volOptions["imageFormat"]
|
||||
if !ok {
|
||||
rbdVol.ImageFormat = rbdImageFormat2
|
||||
}
|
||||
|
||||
if rbdVol.ImageFormat == rbdImageFormat2 {
|
||||
// if no image features is provided, it results in empty string
|
||||
// which disable all RBD image format 2 features as we expected
|
||||
@ -264,48 +332,50 @@ func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool)
|
||||
klog.V(3).Infof("setting disableInUseChecks on rbd volume to: %v", disableInUseChecks)
|
||||
rbdVol.DisableInUseChecks = disableInUseChecks
|
||||
|
||||
getCredsFromVol(rbdVol, volOptions)
|
||||
err = getCredsFromVol(rbdVol, volOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rbdVol, nil
|
||||
}
|
||||
|
||||
func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) {
|
||||
func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) error {
|
||||
var ok bool
|
||||
rbdVol.AdminID, ok = volOptions["adminid"]
|
||||
if !ok {
|
||||
rbdVol.AdminID = rbdDefaultAdminID
|
||||
}
|
||||
rbdVol.UserID, ok = volOptions["userid"]
|
||||
if !ok {
|
||||
rbdVol.UserID = rbdDefaultUserID
|
||||
var err error
|
||||
|
||||
rbdVol.AdminID, rbdVol.UserID, err = getIDs(volOptions, rbdVol.FsID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rbdVol.Mounter, ok = volOptions["mounter"]
|
||||
if !ok {
|
||||
rbdVol.Mounter = rbdDefaultMounter
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) {
|
||||
var ok bool
|
||||
var err error
|
||||
|
||||
rbdSnap := &rbdSnapshot{}
|
||||
rbdSnap.Pool, ok = snapOptions["pool"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing required parameter pool")
|
||||
}
|
||||
rbdSnap.Monitors, ok = snapOptions["monitors"]
|
||||
if !ok {
|
||||
// if mons are not set in options, check if they are set in secret
|
||||
if rbdSnap.MonValueFromSecret, ok = snapOptions["monValueFromSecret"]; !ok {
|
||||
return nil, errors.New("either monitors or monValueFromSecret must be set")
|
||||
}
|
||||
}
|
||||
rbdSnap.AdminID, ok = snapOptions["adminid"]
|
||||
if !ok {
|
||||
rbdSnap.AdminID = rbdDefaultAdminID
|
||||
}
|
||||
rbdSnap.UserID, ok = snapOptions["userid"]
|
||||
if !ok {
|
||||
rbdSnap.UserID = rbdDefaultUserID
|
||||
|
||||
rbdSnap.Monitors, rbdSnap.FsID, rbdSnap.MonValueFromSecret, err = getMonsAndFsID(snapOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rbdSnap.AdminID, rbdSnap.UserID, err = getIDs(snapOptions, rbdSnap.FsID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rbdSnap, nil
|
||||
}
|
||||
|
||||
@ -367,7 +437,7 @@ func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.FsID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -430,7 +500,7 @@ func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.FsID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -457,7 +527,7 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[strin
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.FsID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -484,7 +554,7 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.FsID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -511,7 +581,7 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string
|
||||
image := pVolOpts.VolName
|
||||
snapID := pSnapOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pVolOpts.FsID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
257
pkg/util/fileconfig.go
Normal file
257
pkg/util/fileconfig.go
Normal file
@ -0,0 +1,257 @@
|
||||
/*
|
||||
Copyright 2019 ceph-csi authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/* FileConfig processes config information stored in files, mostly mapped into
|
||||
the runtime container.
|
||||
|
||||
The calls explicitly do not cache any information, to ensure that updated
|
||||
configuration is always read from the files (for example when these are
|
||||
mapped in as k8s config maps or secrets).
|
||||
|
||||
The BasePath is the path where config files are found, and config files are
|
||||
expected to be named in the following manner,
|
||||
- BasePath/ceph-cluster-<cluster-fsid>/cluster-config
|
||||
- BasePath/ceph-cluster-<cluster-fsid>-provisioner-secret/credentials
|
||||
- BasePath/ceph-cluster-<cluster-fsid>-provisioner-secret/subjectid
|
||||
- BasePath/ceph-cluster-<cluster-fsid>-publish-secret/credentials
|
||||
- BasePath/ceph-cluster-<cluster-fsid>-publish-secret/subjectid
|
||||
Where,
|
||||
- cluster-fsid is the Ceph cluster fsid in UUID ascii notation
|
||||
- The cluster-fsid corresponds to the cluster for which the
|
||||
configuration information is present in the mentioned files
|
||||
- cluster-config is expected to be a JSON blob with the following
|
||||
structure,
|
||||
{
|
||||
"version": 1,
|
||||
"cluster-config": {
|
||||
"cluster-fsid": "<ceph-fsid>",
|
||||
"monitors": [
|
||||
"IP/DNS:port",
|
||||
"IP/DNS:port"
|
||||
],
|
||||
"pools": [
|
||||
"<pool-name>",
|
||||
"<pool-name>"
|
||||
]
|
||||
}
|
||||
}
|
||||
- credentials is expected to contain Base64 encoded credentials for the
|
||||
user encoded in subjectid
|
||||
- subjectid is the username/subject to use with calls to Ceph, and is
|
||||
also Base64 encoded
|
||||
- Provisioner secret contains secrets to use by the provisioning system
|
||||
- Publish secret contains secrets to use by the publishing/staging
|
||||
system
|
||||
*/
|
||||
|
||||
// FileConfig type with basepath that points to source of all config files
|
||||
type FileConfig struct {
|
||||
BasePath string
|
||||
}
|
||||
|
||||
// ClusterConfigv1 strongly typed JSON spec for cluster-config above
|
||||
type ClusterConfigv1 struct {
|
||||
ClusterFsID string `json:"cluster-fsid"`
|
||||
Monitors []string `json:"monitors"`
|
||||
Pools []string `json:"pools"`
|
||||
}
|
||||
|
||||
// ClusterConfigJSONv1 strongly typed JSON spec for cluster-config above
|
||||
type ClusterConfigJSONv1 struct {
|
||||
Version int `json:"version"`
|
||||
ClusterConf *ClusterConfigv1 `json:"cluster-config"`
|
||||
}
|
||||
|
||||
// Constants and enum for constructPath operation
|
||||
type pathType int
|
||||
|
||||
const (
|
||||
clusterConfig pathType = 0
|
||||
pubSubject pathType = 1
|
||||
pubCreds pathType = 2
|
||||
provSubject pathType = 3
|
||||
provCreds pathType = 4
|
||||
)
|
||||
|
||||
const (
|
||||
fNamePrefix = "ceph-cluster"
|
||||
fNameSep = "-"
|
||||
fNamePubPrefix = "publish-secret"
|
||||
fNameProvPrefix = "provisioner-secret"
|
||||
fNameCephConfig = "cluster-config"
|
||||
fNamePubSubject = "subjectid"
|
||||
fNameProvSubject = "subjectid"
|
||||
fNamePubCred = "credentials"
|
||||
fNameProvCred = "credentials"
|
||||
)
|
||||
|
||||
// constructPath constructs well defined paths based on the type of config
|
||||
// file that needs to be accessed.
|
||||
func (pType pathType) constructPath(basepath string, fsid string) (filePath string, noerr error) {
|
||||
if fsid == "" || basepath == "" {
|
||||
return "", fmt.Errorf("missing/empty fsid (%s) or basepath (%s) for config files", fsid, basepath)
|
||||
}
|
||||
|
||||
switch pType {
|
||||
case clusterConfig:
|
||||
filePath = basepath + "/" + fNamePrefix + fNameSep + fsid +
|
||||
"/" + fNameCephConfig
|
||||
case pubSubject:
|
||||
filePath = basepath + "/" + fNamePrefix + fNameSep + fsid +
|
||||
fNameSep + fNamePubPrefix + "/" + fNamePubSubject
|
||||
case pubCreds:
|
||||
filePath = basepath + "/" + fNamePrefix + fNameSep + fsid +
|
||||
fNameSep + fNamePubPrefix + "/" + fNamePubCred
|
||||
case provSubject:
|
||||
filePath = basepath + "/" + fNamePrefix + fNameSep + fsid +
|
||||
fNameSep + fNameProvPrefix + "/" + fNameProvSubject
|
||||
case provCreds:
|
||||
filePath = basepath + "/" + fNamePrefix + fNameSep + fsid +
|
||||
fNameSep + fNameProvPrefix + "/" + fNameProvCred
|
||||
default:
|
||||
return "", fmt.Errorf("invalid path type (%d) specified", pType)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetMons returns a comma separated MON list, that is read in from the config
|
||||
// files, based on the passed in fsid
|
||||
func (fc *FileConfig) GetMons(fsid string) (string, error) {
|
||||
fPath, err := clusterConfig.constructPath(fc.BasePath, fsid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// #nosec
|
||||
contentRaw, err := ioutil.ReadFile(fPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var cephConfig ClusterConfigJSONv1
|
||||
|
||||
err = json.Unmarshal(contentRaw, &cephConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if cephConfig.ClusterConf.ClusterFsID != fsid {
|
||||
return "", fmt.Errorf("mismatching Ceph cluster fsid (%s) in file, passed in (%s)", cephConfig.ClusterConf.ClusterFsID, fsid)
|
||||
}
|
||||
|
||||
if len(cephConfig.ClusterConf.Monitors) == 0 {
|
||||
return "", fmt.Errorf("monitor list empty in configuration file")
|
||||
}
|
||||
|
||||
return strings.Join(cephConfig.ClusterConf.Monitors, ","), nil
|
||||
}
|
||||
|
||||
// GetProvisionerSubjectID returns the provisioner subject ID from the on-disk
|
||||
// configuration file, based on the passed in fsid
|
||||
func (fc *FileConfig) GetProvisionerSubjectID(fsid string) (string, error) {
|
||||
fPath, err := provSubject.constructPath(fc.BasePath, fsid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// #nosec
|
||||
contentRaw, err := ioutil.ReadFile(fPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if string(contentRaw) == "" {
|
||||
return "", fmt.Errorf("missing/empty provisioner subject ID from file (%s)", fPath)
|
||||
}
|
||||
|
||||
return string(contentRaw), nil
|
||||
}
|
||||
|
||||
// GetPublishSubjectID returns the publish subject ID from the on-disk
|
||||
// configuration file, based on the passed in fsid
|
||||
func (fc *FileConfig) GetPublishSubjectID(fsid string) (string, error) {
|
||||
fPath, err := pubSubject.constructPath(fc.BasePath, fsid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// #nosec
|
||||
contentRaw, err := ioutil.ReadFile(fPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if string(contentRaw) == "" {
|
||||
return "", fmt.Errorf("missing/empty publish subject ID from file (%s)", fPath)
|
||||
}
|
||||
|
||||
return string(contentRaw), nil
|
||||
}
|
||||
|
||||
// GetCredentialForSubject returns the credentials for the requested subject
|
||||
// from the cluster config for the passed in fsid
|
||||
func (fc *FileConfig) GetCredentialForSubject(fsid, subject string) (string, error) {
|
||||
var fPath string
|
||||
var err error
|
||||
|
||||
tmpSubject, err := fc.GetPublishSubjectID(fsid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tmpSubject != subject {
|
||||
tmpSubject, err = fc.GetProvisionerSubjectID(fsid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tmpSubject != subject {
|
||||
return "", fmt.Errorf("requested subject did not match stored publish/provisioner subjectID")
|
||||
}
|
||||
|
||||
fPath, err = provCreds.constructPath(fc.BasePath, fsid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
fPath, err = pubCreds.constructPath(fc.BasePath, fsid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// #nosec
|
||||
contentRaw, err := ioutil.ReadFile(fPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if string(contentRaw) == "" {
|
||||
return "", fmt.Errorf("missing/empty credentials in file (%s)", fPath)
|
||||
}
|
||||
|
||||
return string(contentRaw), nil
|
||||
}
|
338
pkg/util/fileconfig_test.go
Normal file
338
pkg/util/fileconfig_test.go
Normal file
@ -0,0 +1,338 @@
|
||||
/*
|
||||
Copyright 2019 ceph-csi authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// nolint: gocyclo
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testFsid = "dummy-fs-id"
|
||||
var basePath = "./test_artifacts"
|
||||
|
||||
// nolint: gocyclo
|
||||
func TestGetMons(t *testing.T) {
|
||||
var fc FileConfig
|
||||
var err error
|
||||
|
||||
configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
fc.BasePath = basePath
|
||||
|
||||
// TEST: Empty fsid should error out
|
||||
_, err = fc.GetMons("")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to fsid missing!")
|
||||
}
|
||||
|
||||
// TEST: Missing file should error out
|
||||
_, err = fc.GetMons(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing config file!")
|
||||
}
|
||||
|
||||
// TEST: Empty file should error out
|
||||
err = os.MkdirAll(configFileDir, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
data := []byte{}
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
_, err = fc.GetMons(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing config file!")
|
||||
}
|
||||
|
||||
/* Tests with bad JSON content should get caught due to strongly typed JSON
|
||||
struct in implementation and are not tested here */
|
||||
|
||||
// TEST: Send JSON with incorrect fsid
|
||||
data = []byte(`
|
||||
{
|
||||
"version": 1,
|
||||
"cluster-config": {
|
||||
"cluster-fsid": "bad_fsid",
|
||||
"monitors": ["IP1:port1","IP2:port2"],
|
||||
"pools": ["pool1","pool2"]
|
||||
}
|
||||
}`)
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
_, err = fc.GetMons(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Expected to fail on bad fsid in JSON")
|
||||
}
|
||||
|
||||
// TEST: Send JSON with empty mon list
|
||||
data = []byte(`
|
||||
{
|
||||
"version": 1,
|
||||
"cluster-config": {
|
||||
"cluster-fsid": "` + testFsid + `",
|
||||
"monitors": [],
|
||||
"pools": ["pool1","pool2"]
|
||||
}
|
||||
}`)
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
_, err = fc.GetMons(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Expected to fail in empty MON list in JSON")
|
||||
}
|
||||
|
||||
// TEST: Check valid return from successful call
|
||||
data = []byte(`
|
||||
{
|
||||
"version": 1,
|
||||
"cluster-config": {
|
||||
"cluster-fsid": "` + testFsid + `",
|
||||
"monitors": ["IP1:port1","IP2:port2"],
|
||||
"pools": ["pool1","pool2"]
|
||||
}
|
||||
}`)
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
output, err := fc.GetMons(testFsid)
|
||||
if err != nil {
|
||||
t.Errorf("Call failed %s", err)
|
||||
}
|
||||
if output != "IP1:port1,IP2:port2" {
|
||||
t.Errorf("Failed to generate correct output: expected %s, got %s",
|
||||
"IP1:port1,IP2:port2", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetProvisionerSubjectID(t *testing.T) {
|
||||
var fc FileConfig
|
||||
var err error
|
||||
|
||||
configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNameProvPrefix
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
fc.BasePath = basePath
|
||||
|
||||
// TEST: Empty fsid should error out
|
||||
_, err = fc.GetProvisionerSubjectID("")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to fsid missing!")
|
||||
}
|
||||
|
||||
// TEST: Missing file should error out
|
||||
_, err = fc.GetProvisionerSubjectID(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing config file!")
|
||||
}
|
||||
|
||||
// TEST: Empty file should error out
|
||||
err = os.MkdirAll(configFileDir, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
data := []byte{}
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
_, err = fc.GetProvisionerSubjectID(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing config file!")
|
||||
}
|
||||
|
||||
// TEST: Check valid return from successful call
|
||||
data = []byte("admin")
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
output, err := fc.GetProvisionerSubjectID(testFsid)
|
||||
if err != nil || output != "admin" {
|
||||
t.Errorf("Failed to get valid subject ID: expected %s, got %s, err %s", "admin", output, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPublishSubjectID(t *testing.T) {
|
||||
var fc FileConfig
|
||||
var err error
|
||||
|
||||
configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNamePubPrefix
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
fc.BasePath = basePath
|
||||
|
||||
// TEST: Empty fsid should error out
|
||||
_, err = fc.GetPublishSubjectID("")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to fsid missing!")
|
||||
}
|
||||
|
||||
// TEST: Missing file should error out
|
||||
_, err = fc.GetPublishSubjectID(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing config file!")
|
||||
}
|
||||
|
||||
// TEST: Empty file should error out
|
||||
err = os.MkdirAll(configFileDir, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
data := []byte{}
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
_, err = fc.GetPublishSubjectID(testFsid)
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing config file!")
|
||||
}
|
||||
|
||||
// TEST: Check valid return from successful call
|
||||
data = []byte("admin")
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
output, err := fc.GetPublishSubjectID(testFsid)
|
||||
if err != nil || output != "admin" {
|
||||
t.Errorf("Failed to get valid subject ID: expected %s, got %s, err %s", "admin", output, err)
|
||||
}
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func TestGetCredentialForSubject(t *testing.T) {
|
||||
var fc FileConfig
|
||||
var err error
|
||||
|
||||
configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNamePubPrefix
|
||||
defer os.RemoveAll(basePath)
|
||||
|
||||
fc.BasePath = basePath
|
||||
|
||||
// TEST: Empty fsid should error out
|
||||
_, err = fc.GetCredentialForSubject("", "subject")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to fsid missing!")
|
||||
}
|
||||
|
||||
// TEST: Missing file should error out
|
||||
_, err = fc.GetCredentialForSubject(testFsid, "")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing config file!")
|
||||
}
|
||||
|
||||
// TEST: Empty subject file should error out
|
||||
err = os.MkdirAll(configFileDir, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
data := []byte{}
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
_, err = fc.GetCredentialForSubject(testFsid, "adminpub")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to empty subject file!")
|
||||
}
|
||||
|
||||
// TEST: Empty subject cred file should error out
|
||||
data = []byte("adminpub")
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
data = []byte{}
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNamePubCred, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
_, err = fc.GetCredentialForSubject(testFsid, "adminpub")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing cred content!")
|
||||
}
|
||||
|
||||
// TEST: Success fetching pub creds
|
||||
data = []byte("testpwd")
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNamePubCred, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
output, err := fc.GetCredentialForSubject(testFsid, "adminpub")
|
||||
if err != nil || output != "testpwd" {
|
||||
t.Errorf("Failed to get valid Publish credentials: expected %s, got %s, err %s", "testpwd", output, err)
|
||||
}
|
||||
|
||||
// TEST: Fetch missing prov creds
|
||||
configFileDir = basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNameProvPrefix
|
||||
err = os.MkdirAll(configFileDir, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
data = []byte("adminprov")
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Starting test")
|
||||
_, err = fc.GetCredentialForSubject(testFsid, "adminprov")
|
||||
if err == nil {
|
||||
t.Errorf("Call passed, expected to fail due to missing cred content!")
|
||||
}
|
||||
|
||||
// TEST: Fetch prov creds successfully
|
||||
data = []byte("testpwd")
|
||||
err = ioutil.WriteFile(configFileDir+"/"+fNameProvCred, data, 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test utility error %s", err)
|
||||
}
|
||||
|
||||
output, err = fc.GetCredentialForSubject(testFsid, "adminprov")
|
||||
if err != nil || output != "testpwd" {
|
||||
t.Errorf("Call passed, expected to fail due to missing cred content!")
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user