From 892d65d387420095e072158a05ee902b91de768f Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 14 Feb 2019 11:36:34 +0100 Subject: [PATCH 01/89] added StripSecretInArgs in pkg/util --- pkg/util/stripsecrets.go | 76 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 pkg/util/stripsecrets.go diff --git a/pkg/util/stripsecrets.go b/pkg/util/stripsecrets.go new file mode 100644 index 000000000..7a69c56a2 --- /dev/null +++ b/pkg/util/stripsecrets.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" +) + +const ( + keyArg = "--key=" + secretArg = "secret=" + optionsArgSeparator = ',' + strippedKey = "--key=***stripped***" + strippedSecret = "secret=***stripped***" +) + +// StripSecretInArgs strips values of either "--key" or "secret=". +// `args` is left unchanged. +// Expects only one occurrence of either "--key" or "secret=". +func StripSecretInArgs(args []string) []string { + out := make([]string, len(args)) + copy(out, args) + + if !stripKey(out) { + stripSecret(out) + } + + return out +} + +func stripKey(out []string) bool { + for i := range out { + if strings.HasPrefix(out[i], keyArg) { + out[i] = strippedKey + return true + } + } + + return false +} + +func stripSecret(out []string) bool { + for i := range out { + arg := out[i] + begin := strings.Index(arg, secretArg) + + if begin == -1 { + continue + } + + end := strings.IndexByte(arg[begin+len(secretArg):], optionsArgSeparator) + + out[i] = arg[:begin] + strippedSecret + if end != -1 { + out[i] += arg[end+len(secretArg):] + } + + return true + } + + return false +} From b3944f31216f3dfdf164313fb26fc91ff6c48a95 Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 14 Feb 2019 11:39:07 +0100 Subject: [PATCH 02/89] cephfs exec: read stdout and stderr separately --- pkg/cephfs/util.go | 94 ++++++++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 36 deletions(-) diff --git a/pkg/cephfs/util.go b/pkg/cephfs/util.go index e9314f532..75569a2cc 100644 --- a/pkg/cephfs/util.go +++ b/pkg/cephfs/util.go @@ -17,16 +17,18 @@ limitations under the License. package cephfs import ( - "bytes" "encoding/json" "errors" "fmt" + "io" + "io/ioutil" "os/exec" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/klog" + "github.com/ceph/ceph-csi/pkg/util" "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/kubernetes/pkg/util/mount" ) @@ -37,31 +39,72 @@ func makeVolumeID(volName string) volumeID { return volumeID("csi-cephfs-" + volName) } -func execCommand(command string, args ...string) ([]byte, error) { - klog.V(4).Infof("cephfs: EXEC %s %s", command, args) - - cmd := exec.Command(command, args...) // #nosec - return cmd.CombinedOutput() +func closePipeOnError(pipe io.Closer, err error) { + if err != nil { + if err = pipe.Close(); err != nil { + klog.Warningf("failed to close pipe: %v", err) + } + } } -func execCommandAndValidate(program string, args ...string) error { - out, err := execCommand(program, args...) +func execCommand(program string, args ...string) (stdout, stderr []byte, err error) { + cmd := exec.Command(program, args...) // nolint: gosec + klog.V(4).Infof("cephfs: EXEC %s %s", program, util.StripSecretInArgs(args)) + + stdoutPipe, err := cmd.StdoutPipe() if err != nil { - return fmt.Errorf("cephfs: %s failed with following error: %s\ncephfs: %s output: %s", program, err, program, out) + return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, args, err) + } + + defer closePipeOnError(stdoutPipe, err) + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, args, err) + } + + defer closePipeOnError(stderrPipe, err) + + if err = cmd.Start(); err != nil { + return nil, nil, fmt.Errorf("failed to run %s %v: %v", program, args, err) + } + + stdout, err = ioutil.ReadAll(stdoutPipe) + if err != nil { + return nil, nil, fmt.Errorf("failed to read from stdout for %s %v: %v", program, args, err) + } + + stderr, err = ioutil.ReadAll(stderrPipe) + if err != nil { + return nil, nil, fmt.Errorf("failed to read from stderr for %s %v: %v", program, args, err) + } + + if waitErr := cmd.Wait(); waitErr != nil { + return nil, nil, fmt.Errorf("an error occurred while running %s %v: %v: %s", program, args, waitErr, stderr) + } + + return +} + +func execCommandErr(program string, args ...string) error { + if _, _, err := execCommand(program, args...); err != nil { + return err } return nil } -func execCommandJSON(v interface{}, args ...string) error { - program := "ceph" - out, err := execCommand(program, args...) - +func execCommandJSON(v interface{}, program string, args ...string) error { + stdout, _, err := execCommand(program, args...) if err != nil { - return fmt.Errorf("cephfs: %s failed with following error: %s\ncephfs: %s output: %s", program, err, program, out) + return err } - return json.NewDecoder(bytes.NewReader(out)).Decode(v) + if err = json.Unmarshal(stdout, v); err != nil { + return fmt.Errorf("failed to unmarshal JSON for %s %v: %s: %v", program, args, stdout, err) + } + + return nil } // Used in isMountPoint() @@ -76,27 +119,6 @@ func isMountPoint(p string) (bool, error) { return !notMnt, nil } -func storeCephCredentials(volID volumeID, cr *credentials) error { - keyringData := cephKeyringData{ - UserID: cr.id, - Key: cr.key, - VolumeID: volID, - } - - if err := keyringData.writeToFile(); err != nil { - return err - } - - secret := cephSecretData{ - UserID: cr.id, - Key: cr.key, - VolumeID: volID, - } - - err := secret.writeToFile() - return err -} - // // Controller service request validation // From 8e371f62fa7aa521388e2d74cd68b3429a94f086 Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 14 Feb 2019 11:47:16 +0100 Subject: [PATCH 03/89] cephfs: ceph-fuse mimic supports passing keys via args, let's use it --- pkg/cephfs/cephuser.go | 97 +++++++++++-------------------------- pkg/cephfs/volume.go | 4 +- pkg/cephfs/volumemounter.go | 24 +++++---- 3 files changed, 41 insertions(+), 84 deletions(-) diff --git a/pkg/cephfs/cephuser.go b/pkg/cephfs/cephuser.go index f642fd654..fb06d5bf3 100644 --- a/pkg/cephfs/cephuser.go +++ b/pkg/cephfs/cephuser.go @@ -17,12 +17,7 @@ limitations under the License. package cephfs import ( - "bytes" - "encoding/json" "fmt" - "os" - - "k8s.io/klog" ) const ( @@ -53,83 +48,47 @@ func getCephUserName(volID volumeID) string { return cephUserPrefix + string(volID) } -func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) { - entityName := cephEntityClientPrefix + getCephUserName(volID) - +func getSingleCephEntity(args ...string) (*cephEntity, error) { var ents []cephEntity - args := [...]string{ - "-m", volOptions.Monitors, - "auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id), - "get", entityName, - } - - out, err := execCommand("ceph", args[:]...) - if err != nil { - return nil, fmt.Errorf("cephfs: ceph failed with following error: %s\ncephfs: ceph output: %s", err, out) - } - - // Workaround for output from `ceph auth get` - // Contains non-json data: "exported keyring for ENTITY\n\n" - offset := bytes.Index(out, []byte("[{")) - - if err = json.NewDecoder(bytes.NewReader(out[offset:])).Decode(&ents); err != nil { - return nil, fmt.Errorf("failed to decode json: %v", err) + if err := execCommandJSON(&ents, "ceph", args...); err != nil { + return nil, err } if len(ents) != 1 { - return nil, fmt.Errorf("got unexpected number of entities for %s: expected 1, got %d", entityName, len(ents)) + return nil, fmt.Errorf("got unexpected number of entities: expected 1, got %d", len(ents)) } return &ents[0], nil } +func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) { + return getSingleCephEntity( + "-m", volOptions.Monitors, + "-n", cephEntityClientPrefix+adminCr.id, "--key="+adminCr.key, + "-c", cephConfigPath, + "-f", "json", + "auth", "get", cephEntityClientPrefix+getCephUserName(volID), + ) +} + func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) { - caps := cephEntityCaps{ - Mds: fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)), - Mon: "allow r", - Osd: fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)), - } - - var ents []cephEntity - args := [...]string{ + return getSingleCephEntity( "-m", volOptions.Monitors, - "auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id), - "get-or-create", cephEntityClientPrefix + getCephUserName(volID), - "mds", caps.Mds, - "mon", caps.Mon, - "osd", caps.Osd, - } - - if err := execCommandJSON(&ents, args[:]...); err != nil { - return nil, fmt.Errorf("error creating ceph user: %v", err) - } - - return &ents[0], nil + "-n", cephEntityClientPrefix+adminCr.id, "--key="+adminCr.key, + "-c", cephConfigPath, + "-f", "json", + "auth", "get-or-create", cephEntityClientPrefix+getCephUserName(volID), + "mds", fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)), + "mon", "allow r", + "osd", fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)), + ) } func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error { - userID := getCephUserName(volID) - - args := [...]string{ + return execCommandErr("ceph", "-m", volOptions.Monitors, - "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id), - "auth", "rm", cephEntityClientPrefix + userID, - } - - var err error - if err = execCommandAndValidate("ceph", args[:]...); err != nil { - return err - } - - keyringPath := getCephKeyringPath(volID, adminCr.id) - if err = os.Remove(keyringPath); err != nil { - klog.Errorf("failed to remove keyring file %s with error %s", keyringPath, err) - } - - secretPath := getCephSecretPath(volID, adminCr.id) - if err = os.Remove(secretPath); err != nil { - klog.Errorf("failed to remove secret file %s with error %s", secretPath, err) - } - - return nil + "-n", cephEntityClientPrefix+adminCr.id, "--key="+adminCr.key, + "-c", cephConfigPath, + "auth", "rm", cephEntityClientPrefix+getCephUserName(volID), + ) } diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index e228892c8..683677b53 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -48,7 +48,7 @@ func getVolumeNamespace(volID volumeID) string { } func setVolumeAttribute(root, attrName, attrValue string) error { - return execCommandAndValidate("setfattr", "-n", attrName, "-v", attrValue, root) + return execCommandErr("setfattr", "-n", attrName, "-v", attrValue, root) } func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeID, bytesQuota int64) error { @@ -124,7 +124,7 @@ func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions defer unmountAndRemove(cephRoot) if err := os.Rename(volRoot, volRootDeleting); err != nil { - return fmt.Errorf("coudln't mark volume %s for deletion: %v", volID, err) + return fmt.Errorf("couldn't mark volume %s for deletion: %v", volID, err) } if err := os.RemoveAll(volRootDeleting); err != nil { diff --git a/pkg/cephfs/volumemounter.go b/pkg/cephfs/volumemounter.go index 6a78f8266..3119a2474 100644 --- a/pkg/cephfs/volumemounter.go +++ b/pkg/cephfs/volumemounter.go @@ -106,19 +106,18 @@ func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, vo mountPoint, "-m", volOptions.Monitors, "-c", cephConfigPath, - "-n", cephEntityClientPrefix + cr.id, - "--keyring", getCephKeyringPath(volID, cr.id), + "-n", cephEntityClientPrefix + cr.id, "--key=" + cr.key, "-r", volOptions.RootPath, "-o", "nonempty", } - out, err := execCommand("ceph-fuse", args[:]...) + _, stderr, err := execCommand("ceph-fuse", args[:]...) if err != nil { - return fmt.Errorf("cephfs: ceph-fuse failed with following error: %s\ncephfs: ceph-fuse output: %s", err, out) + return err } - if !bytes.Contains(out, []byte("starting fuse")) { - return fmt.Errorf("cephfs: ceph-fuse failed:\ncephfs: ceph-fuse output: %s", out) + if !bytes.Contains(stderr, []byte("starting fuse")) { + return fmt.Errorf("ceph-fuse failed: %s", stderr) } return nil @@ -137,16 +136,15 @@ func (m *fuseMounter) name() string { return "Ceph FUSE driver" } type kernelMounter struct{} func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error { - if err := execCommandAndValidate("modprobe", "ceph"); err != nil { + if err := execCommandErr("modprobe", "ceph"); err != nil { return err } - return execCommandAndValidate("mount", + return execCommandErr("mount", "-t", "ceph", fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath), mountPoint, - "-o", - fmt.Sprintf("name=%s,secretfile=%s", cr.id, getCephSecretPath(volID, cr.id)), + "-o", fmt.Sprintf("name=%s,secret=%s", cr.id, cr.key), ) } @@ -161,12 +159,12 @@ func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *vo func (m *kernelMounter) name() string { return "Ceph kernel client" } func bindMount(from, to string, readOnly bool) error { - if err := execCommandAndValidate("mount", "--bind", from, to); err != nil { + if err := execCommandErr("mount", "--bind", from, to); err != nil { return fmt.Errorf("failed to bind-mount %s to %s: %v", from, to, err) } if readOnly { - if err := execCommandAndValidate("mount", "-o", "remount,ro,bind", to); err != nil { + if err := execCommandErr("mount", "-o", "remount,ro,bind", to); err != nil { return fmt.Errorf("failed read-only remount of %s: %v", to, err) } } @@ -175,7 +173,7 @@ func bindMount(from, to string, readOnly bool) error { } func unmountVolume(mountPoint string) error { - return execCommandAndValidate("umount", mountPoint) + return execCommandErr("umount", mountPoint) } func createMountPoint(root string) error { From 96bf4a98bd11fa592fe747dc8150f0156b4d7524 Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 14 Feb 2019 11:48:52 +0100 Subject: [PATCH 04/89] cephfs: don't need to store keyrings anymore --- pkg/cephfs/cephconf.go | 85 +--------------------------------- pkg/cephfs/controllerserver.go | 5 -- pkg/cephfs/nodeserver.go | 10 +--- 3 files changed, 3 insertions(+), 97 deletions(-) diff --git a/pkg/cephfs/cephconf.go b/pkg/cephfs/cephconf.go index 65e79a1d1..dac9dc901 100644 --- a/pkg/cephfs/cephconf.go +++ b/pkg/cephfs/cephconf.go @@ -17,13 +17,8 @@ limitations under the License. package cephfs import ( - "fmt" "io/ioutil" "os" - "path" - "text/template" - - "k8s.io/klog" ) var cephConfig = []byte(`[global] @@ -35,39 +30,11 @@ auth_client_required = cephx fuse_set_user_groups = false `) -const cephKeyring = `[client.{{.UserID}}] -key = {{.Key}} -` - -const cephSecret = `{{.Key}}` // #nosec - const ( - cephConfigRoot = "/etc/ceph" - cephConfigPath = "/etc/ceph/ceph.conf" - cephKeyringFileNameFmt = "ceph.share.%s.client.%s.keyring" - cephSecretFileNameFmt = "ceph.share.%s.client.%s.secret" // #nosec + cephConfigRoot = "/etc/ceph" + cephConfigPath = "/etc/ceph/ceph.conf" ) -var ( - cephKeyringTempl *template.Template - cephSecretTempl *template.Template -) - -func init() { - fm := map[string]interface{}{ - "perms": func(readOnly bool) string { - if readOnly { - return "r" - } - - return "rw" - }, - } - - cephKeyringTempl = template.Must(template.New("keyring").Funcs(fm).Parse(cephKeyring)) - cephSecretTempl = template.Must(template.New("secret").Parse(cephSecret)) -} - func createCephConfigRoot() error { return os.MkdirAll(cephConfigRoot, 0755) // #nosec } @@ -79,51 +46,3 @@ func writeCephConfig() error { return ioutil.WriteFile(cephConfigPath, cephConfig, 0640) } - -func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, data interface{}) error { - if err := createCephConfigRoot(); err != nil { - return err - } - - f, err := os.OpenFile(path.Join(cephConfigRoot, fileName), os.O_CREATE|os.O_EXCL|os.O_WRONLY, m) - if err != nil { - if os.IsExist(err) { - return nil - } - return err - } - - defer func() { - if err := f.Close(); err != nil { - klog.Errorf("failed to close file %s with error %s", f.Name(), err) - } - }() - - return t.Execute(f, data) -} - -type cephKeyringData struct { - UserID, Key string - VolumeID volumeID -} - -func (d *cephKeyringData) writeToFile() error { - return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserID), 0600, cephKeyringTempl, d) -} - -type cephSecretData struct { - UserID, Key string - VolumeID volumeID -} - -func (d *cephSecretData) writeToFile() error { - return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserID), 0600, cephSecretTempl, d) -} - -func getCephSecretPath(volID volumeID, userID string) string { - return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volID, userID)) -} - -func getCephKeyringPath(volID volumeID, userID string) string { - return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volID, userID)) -} diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 76bb99059..235a99f3a 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -67,11 +67,6 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, status.Error(codes.InvalidArgument, err.Error()) } - if err = storeCephCredentials(volID, cr); err != nil { - klog.Errorf("failed to store admin credentials for '%s': %v", cr.id, err) - return nil, status.Error(codes.Internal, err.Error()) - } - if err = createVolume(volOptions, cr, volID, req.GetCapacityRange().GetRequiredBytes()); err != nil { klog.Errorf("failed to create volume %s: %v", req.GetName(), err) return nil, status.Error(codes.Internal, err.Error()) diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index 4fff317b4..b9ec7284c 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -51,10 +51,6 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err) } - if err = storeCephCredentials(volID, adminCr); err != nil { - return nil, fmt.Errorf("failed to store ceph admin credentials: %v", err) - } - // Then get the ceph user entity, err := getCephUser(volOptions, adminCr, volID) @@ -74,10 +70,6 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi cr = userCr } - if err := storeCephCredentials(volID, cr); err != nil { - return nil, fmt.Errorf("failed to store ceph user credentials: %v", err) - } - return cr, nil } @@ -241,7 +233,7 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag return nil, status.Error(codes.Internal, err.Error()) } - klog.Infof("cephfs: successfully umounted volume %s from %s", req.GetVolumeId(), stagingTargetPath) + klog.Infof("cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath) return &csi.NodeUnstageVolumeResponse{}, nil } From 8223ae325b0bd9ab232a3f405737bd8146ffac06 Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 14 Feb 2019 14:38:53 +0100 Subject: [PATCH 05/89] addressed review comments --- pkg/cephfs/cephuser.go | 26 ++++++++++++++++++++------ pkg/util/stripsecrets.go | 2 +- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/pkg/cephfs/cephuser.go b/pkg/cephfs/cephuser.go index fb06d5bf3..113933019 100644 --- a/pkg/cephfs/cephuser.go +++ b/pkg/cephfs/cephuser.go @@ -61,23 +61,34 @@ func getSingleCephEntity(args ...string) (*cephEntity, error) { return &ents[0], nil } +func genUserIDs(adminCr *credentials, volID volumeID) (adminID, userID string) { + return cephEntityClientPrefix + adminCr.id, cephEntityClientPrefix + getCephUserName(volID) +} + func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) { + adminID, userID := genUserIDs(adminCr, volID) + return getSingleCephEntity( "-m", volOptions.Monitors, - "-n", cephEntityClientPrefix+adminCr.id, "--key="+adminCr.key, + "-n", adminID, + "--key="+adminCr.key, "-c", cephConfigPath, "-f", "json", - "auth", "get", cephEntityClientPrefix+getCephUserName(volID), + "auth", "get", userID, ) } func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) { + adminID, userID := genUserIDs(adminCr, volID) + return getSingleCephEntity( "-m", volOptions.Monitors, - "-n", cephEntityClientPrefix+adminCr.id, "--key="+adminCr.key, + "-n", adminID, + "--key="+adminCr.key, "-c", cephConfigPath, "-f", "json", - "auth", "get-or-create", cephEntityClientPrefix+getCephUserName(volID), + "auth", "get-or-create", userID, + // User capabilities "mds", fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)), "mon", "allow r", "osd", fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)), @@ -85,10 +96,13 @@ func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volum } func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error { + adminID, userID := genUserIDs(adminCr, volID) + return execCommandErr("ceph", "-m", volOptions.Monitors, - "-n", cephEntityClientPrefix+adminCr.id, "--key="+adminCr.key, + "-n", adminID, + "--key="+adminCr.key, "-c", cephConfigPath, - "auth", "rm", cephEntityClientPrefix+getCephUserName(volID), + "auth", "rm", userID, ) } diff --git a/pkg/util/stripsecrets.go b/pkg/util/stripsecrets.go index 7a69c56a2..8b818de9d 100644 --- a/pkg/util/stripsecrets.go +++ b/pkg/util/stripsecrets.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 1803a1be97cf4368145eb95a12fa04cc9f09a0ce Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 14 Feb 2019 20:31:26 +0100 Subject: [PATCH 06/89] rbd: don't delete volume/snapshot if metadata creation fails --- pkg/rbd/controllerserver.go | 59 +++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 616012095..5950572a7 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -115,6 +115,15 @@ func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { return rbdVol, nil } +func storeVolumeMetadata(vol *rbdVolume, cp util.CachePersister) error { + if err := cp.Create(vol.VolID, vol); err != nil { + klog.Errorf("failed to store metadata for volume %s: %v", vol.VolID, err) + return err + } + + return nil +} + // CreateVolume creates the volume in backend and store the volume metadata func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { @@ -136,6 +145,11 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol // request if exVol.VolSize >= req.GetCapacityRange().GetRequiredBytes() { // existing volume is compatible with new request and should be reused. + + if err = storeVolumeMetadata(exVol, cs.MetadataStore); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // TODO (sbezverk) Do I need to make sure that RBD volume still exists? return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -160,16 +174,13 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol if err != nil { return nil, err } - if createErr := cs.MetadataStore.Create(rbdVol.VolID, rbdVol); createErr != nil { - klog.Warningf("failed to store volume metadata with error: %v", err) - if err = deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil { - klog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err) - return nil, err - } - return nil, createErr - } rbdVolumes[rbdVol.VolID] = rbdVol + + if err = storeVolumeMetadata(rbdVol, cs.MetadataStore); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ VolumeId: rbdVol.VolID, @@ -311,6 +322,10 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS // check for the requested source volume id and already allocated source volume id if exSnap, err := getRBDSnapshotByName(req.GetName()); err == nil { if req.SourceVolumeId == exSnap.SourceVolumeID { + if err = storeSnapshotMetadata(exSnap, cs.MetadataStore); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &csi.CreateSnapshotResponse{ Snapshot: &csi.Snapshot{ SizeBytes: exSnap.SizeBytes, @@ -357,11 +372,12 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS rbdSnap.CreatedAt = ptypes.TimestampNow().GetSeconds() - if err = cs.storeSnapMetadata(rbdSnap, req.GetSecrets()); err != nil { - return nil, err + rbdSnapshots[snapshotID] = rbdSnap + + if err = storeSnapshotMetadata(rbdSnap, cs.MetadataStore); err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - rbdSnapshots[snapshotID] = rbdSnap return &csi.CreateSnapshotResponse{ Snapshot: &csi.Snapshot{ SizeBytes: rbdSnap.SizeBytes, @@ -375,22 +391,13 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS }, nil } -func (cs *ControllerServer) storeSnapMetadata(rbdSnap *rbdSnapshot, secret map[string]string) error { - errCreate := cs.MetadataStore.Create(rbdSnap.SnapID, rbdSnap) - if errCreate != nil { - klog.Warningf("rbd: failed to store snapInfo with error: %v", errCreate) - // Unprotect snapshot - err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, secret) - if err != nil { - return status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) - } - // Deleting snapshot - klog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName) - if err = deleteSnapshot(rbdSnap, rbdSnap.AdminID, secret); err != nil { - return status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err) - } +func storeSnapshotMetadata(rbdSnap *rbdSnapshot, cp util.CachePersister) error { + if err := cp.Create(rbdSnap.SnapID, rbdSnap); err != nil { + klog.Errorf("failed to store metadata for snapshot %s: %v", rbdSnap.SnapID, err) + return err } - return errCreate + + return nil } func (cs *ControllerServer) validateSnapshotReq(req *csi.CreateSnapshotRequest) error { From 49f5d4a54ef8de68cf7597d698718d38b2409e33 Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 14 Feb 2019 22:25:32 +0100 Subject: [PATCH 07/89] fix lint error --- pkg/rbd/controllerserver.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 5950572a7..6b4e2a630 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -305,6 +305,7 @@ func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *cs // CreateSnapshot creates the snapshot in backend and stores metadata // in store +// nolint: gocyclo func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { if err := cs.validateSnapshotReq(req); err != nil { From 13a025680fbd3d3400f47ebe7fbe111334be9efe Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 18 Feb 2019 10:17:31 +0530 Subject: [PATCH 08/89] Fix missspelled words Signed-off-by: Madhu Rajanna --- docs/deploy-cephfs.md | 15 ++++++++------- docs/deploy-rbd.md | 3 ++- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/deploy-cephfs.md b/docs/deploy-cephfs.md index 7db96cb3c..659407d31 100644 --- a/docs/deploy-cephfs.md +++ b/docs/deploy-cephfs.md @@ -5,8 +5,8 @@ and attach and mount existing ones to workloads. ## Building -CSI CephFS plugin can be compiled in a form of a binary file or in a form of a -Docker image. +CSI CephFS plugin can be compiled in the form of a binary file or in the form +of a Docker image. When compiled as a binary file, the result is stored in `_output/` directory with the name `cephfsplugin`. When compiled as an image, it's stored in the local Docker image store. @@ -33,12 +33,13 @@ Option | Default value | Description `--drivername` | `csi-cephfsplugin` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) `--nodeid` | _empty_ | This node's ID `--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed. -`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) +`--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) -**Available environmental variables:** `KUBERNETES_CONFIG_PATH`: if you use -`k8s_configmap` as metadata store, specify the path of your k8s config file (if -not specified, the plugin will assume you're running it inside a k8s cluster and -find the config itself). +**Available environmental variables:** + +`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify +the path of your k8s config file (if not specified, the plugin will assume +you're running it inside a k8s cluster and find the config itself). `POD_NAMESPACE`: if you use `k8s_configmap` as metadata store, `POD_NAMESPACE` is used to define in which namespace you want the configmaps to be stored diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index 000228044..9d25d600b 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -1,7 +1,7 @@ # CSI RBD Plugin The RBD CSI plugin is able to provision new RBD images and -attach and mount those to worlkoads. +attach and mount those to workloads. ## Building @@ -35,6 +35,7 @@ Option | Default value | Description `--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) **Available environmental variables:** + `HOST_ROOTFS`: rbdplugin searches `/proc` directory under the directory set by `HOST_ROOTFS`. `KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify From 6fbde110cded520023cca8afbb78069a100d05e0 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 18 Feb 2019 17:16:59 +0530 Subject: [PATCH 09/89] skip redundant error check correct misspelled word Signed-off-by: Madhu Rajanna --- pkg/cephfs/util.go | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/pkg/cephfs/util.go b/pkg/cephfs/util.go index 75569a2cc..daab8019f 100644 --- a/pkg/cephfs/util.go +++ b/pkg/cephfs/util.go @@ -87,11 +87,8 @@ func execCommand(program string, args ...string) (stdout, stderr []byte, err err } func execCommandErr(program string, args ...string) error { - if _, _, err := execCommand(program, args...); err != nil { - return err - } - - return nil + _, _, err := execCommand(program, args...) + return err } func execCommandJSON(v interface{}, program string, args ...string) error { @@ -119,10 +116,7 @@ func isMountPoint(p string) (bool, error) { return !notMnt, nil } -// // Controller service request validation -// - func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { return fmt.Errorf("invalid CreateVolumeRequest: %v", err) @@ -154,10 +148,7 @@ func (cs *ControllerServer) validateDeleteVolumeRequest() error { return nil } -// // Node service request validation -// - func validateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error { if req.GetVolumeCapability() == nil { return errors.New("volume capability missing in request") @@ -200,7 +191,7 @@ func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error { } if req.GetTargetPath() == "" { - return errors.New("varget path missing in request") + return errors.New("target path missing in request") } return nil From 2881a8eddeb353ce184f084b599e1dc3923d5015 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 18 Feb 2019 17:51:59 +0530 Subject: [PATCH 10/89] update k8s to latest 1.13.3 Signed-off-by: Madhu Rajanna --- Gopkg.lock | 19 ++++++++++--------- Gopkg.toml | 8 ++++---- .../runtime/serializer/streaming/streaming.go | 2 +- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index f6322c559..0a46d8f9b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -376,11 +376,11 @@ "storage/v1beta1", ] pruneopts = "NUT" - revision = "67edc246be36579e46a89e29a2f165d47e012109" - version = "kubernetes-1.13.2" + revision = "74b699b93c15473932b89e3d1818ba8282f3b5ab" + version = "kubernetes-1.13.3" [[projects]] - digest = "1:a2da0cbc8dfda27eeffa54b53195e607497c6cac737d17f45a667963aeae5f02" + digest = "1:09dee8b7c6cb2fc9c6bee525de3b95199a82a8647a189e153d072a1dfce17de7" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -421,8 +421,8 @@ "third_party/forked/golang/reflect", ] pruneopts = "NUT" - revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd" - version = "kubernetes-1.13.2" + revision = "572dfc7bdfcb4531361a17d27b92851f59acf0dc" + version = "kubernetes-1.13.3" [[projects]] digest = "1:638623327cb201b425a328d0bddb3379b05eb05ef4cab589380f0be07ac1dc17" @@ -485,8 +485,8 @@ "util/integer", ] pruneopts = "NUT" - revision = "6bf63545bd0257ed9e701ad95307ffa51b4407c0" - version = "kubernetes-1.13.2" + revision = "6e4752048fde21176ab35eb54ec1117359830d8a" + version = "kubernetes-1.13.3" [[projects]] digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b" @@ -507,8 +507,8 @@ "pkg/util/nsenter", ] pruneopts = "NUT" - revision = "cff46ab41ff0bb44d8584413b598ad8360ec1def" - version = "v1.13.2" + revision = "721bfa751924da8d1680787490c54b9179b1fed0" + version = "v1.13.3" [[projects]] branch = "master" @@ -533,6 +533,7 @@ "github.com/container-storage-interface/spec/lib/go/csi", "github.com/golang/protobuf/ptypes", "github.com/golang/protobuf/ptypes/timestamp", + "github.com/kubernetes-csi/csi-lib-utils/protosanitizer", "github.com/kubernetes-csi/drivers/pkg/csi-common", "github.com/pborman/uuid", "github.com/pkg/errors", diff --git a/Gopkg.toml b/Gopkg.toml index 6a0a2d8c5..fe76bd62f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -15,15 +15,15 @@ version = "1.10.0" [[constraint]] - version = "kubernetes-1.13.2" + version = "kubernetes-1.13.3" name = "k8s.io/apimachinery" [[constraint]] name = "k8s.io/kubernetes" - version = "v1.13.2" + version = "v1.13.3" [[override]] - version = "kubernetes-1.13.2" + version = "kubernetes-1.13.3" name = "k8s.io/api" [[override]] @@ -32,7 +32,7 @@ [[constraint]] name = "k8s.io/client-go" - version = "kubernetes-1.13.2" + version = "kubernetes-1.13.3" [prune] go-tests = true diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 91fd4ed4f..a60a7c041 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { reader: r, decoder: d, buf: make([]byte, 1024), - maxBytes: 1024 * 1024, + maxBytes: 16 * 1024 * 1024, } } From e91a59d2f948a1c81320d61f2e1a2b613a7f8183 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 19 Feb 2019 12:17:40 +0530 Subject: [PATCH 11/89] RBD: Remove dead code during volume creation we are validating that volume name cannot be empty,removing this check as we are not going to hit this case Fixes: #204 Signed-off-by: Madhu Rajanna --- pkg/rbd/controllerserver.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 6b4e2a630..b1eddf29e 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -99,9 +99,6 @@ func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { // Generating Volume Name and Volume ID, as according to CSI spec they MUST be different volName := req.GetName() uniqueID := uuid.NewUUID().String() - if len(volName) == 0 { - volName = rbdVol.Pool + "-dynamic-pvc-" + uniqueID - } rbdVol.VolName = volName volumeID := "csi-rbd-vol-" + uniqueID rbdVol.VolID = volumeID From fd4c019aba846f16f744ef50735b1ba7f26253da Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 19 Feb 2019 13:44:10 +0530 Subject: [PATCH 12/89] cleanup: remove duplicate code Signed-off-by: Madhu Rajanna --- cmd/cephfs/main.go | 19 +----------------- cmd/rbd/main.go | 25 +---------------------- pkg/util/util.go | 49 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 42 deletions(-) create mode 100644 pkg/util/util.go diff --git a/cmd/cephfs/main.go b/cmd/cephfs/main.go index 907246f8e..719887763 100644 --- a/cmd/cephfs/main.go +++ b/cmd/cephfs/main.go @@ -19,11 +19,9 @@ package main import ( "flag" "os" - "path" "github.com/ceph/ceph-csi/pkg/cephfs" "github.com/ceph/ceph-csi/pkg/util" - "k8s.io/klog" ) var ( @@ -37,19 +35,8 @@ var ( func main() { util.InitLogging() - if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "controller")); err != nil { - klog.Errorf("failed to create persistent storage for controller: %v", err) - os.Exit(1) - } - - if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "node")); err != nil { - klog.Errorf("failed to create persistent storage for node: %v", err) - os.Exit(1) - } - - cp, err := util.NewCachePersister(*metadataStorage, *driverName) + cp, err := util.CreatePersistanceStorage(cephfs.PluginFolder, *metadataStorage, *driverName) if err != nil { - klog.Errorf("failed to define cache persistence method: %v", err) os.Exit(1) } @@ -58,7 +45,3 @@ func main() { os.Exit(0) } - -func createPersistentStorage(persistentStoragePath string) error { - return os.MkdirAll(persistentStoragePath, os.FileMode(0755)) -} diff --git a/cmd/rbd/main.go b/cmd/rbd/main.go index cd30bef68..6aec53ea7 100644 --- a/cmd/rbd/main.go +++ b/cmd/rbd/main.go @@ -19,11 +19,9 @@ package main import ( "flag" "os" - "path" "github.com/ceph/ceph-csi/pkg/rbd" "github.com/ceph/ceph-csi/pkg/util" - "k8s.io/klog" ) var ( @@ -37,18 +35,8 @@ var ( func main() { util.InitLogging() - if err := createPersistentStorage(path.Join(rbd.PluginFolder, "controller")); err != nil { - klog.Errorf("failed to create persistent storage for controller %v", err) - os.Exit(1) - } - if err := createPersistentStorage(path.Join(rbd.PluginFolder, "node")); err != nil { - klog.Errorf("failed to create persistent storage for node %v", err) - os.Exit(1) - } - - cp, err := util.NewCachePersister(*metadataStorage, *driverName) + cp, err := util.CreatePersistanceStorage(rbd.PluginFolder, *metadataStorage, *driverName) if err != nil { - klog.Errorf("failed to define cache persistence method: %v", err) os.Exit(1) } @@ -57,14 +45,3 @@ func main() { os.Exit(0) } - -func createPersistentStorage(persistentStoragePath string) error { - if _, err := os.Stat(persistentStoragePath); os.IsNotExist(err) { - if err = os.MkdirAll(persistentStoragePath, os.FileMode(0755)); err != nil { - return err - } - } else { - return err - } - return nil -} diff --git a/pkg/util/util.go b/pkg/util/util.go new file mode 100644 index 000000000..2382f87d7 --- /dev/null +++ b/pkg/util/util.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "os" + "path" + + "k8s.io/klog" +) + +// CreatePersistanceStorage creates storage path and initializes new cache +func CreatePersistanceStorage(sPath, metaDataStore, driverName string) (CachePersister, error) { + var err error + if err = createPersistentStorage(path.Join(sPath, "controller")); err != nil { + klog.Errorf("failed to create persistent storage for controller: %v", err) + return nil, err + } + + if err = createPersistentStorage(path.Join(sPath, "node")); err != nil { + klog.Errorf("failed to create persistent storage for node: %v", err) + return nil, err + } + + cp, err := NewCachePersister(metaDataStore, driverName) + if err != nil { + klog.Errorf("failed to define cache persistence method: %v", err) + return nil, err + } + return cp, err +} + +func createPersistentStorage(persistentStoragePath string) error { + return os.MkdirAll(persistentStoragePath, os.FileMode(0755)) +} From a04bef4430caf91a44ff5c4e02c37e115b9c3087 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 20 Feb 2019 15:30:18 +0530 Subject: [PATCH 13/89] fix misspelled words Signed-off-by: Madhu Rajanna --- docs/deploy-rbd.md | 2 +- examples/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index 9d25d600b..2f09e4435 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -111,7 +111,7 @@ Deploys a daemon set with two containers: CSI driver-registrar and the CSI RBD d ## Verifying the deployment in Kubernetes -After successfuly completing the steps above, you should see output similar to this: +After successfully completing the steps above, you should see output similar to this: ```bash $ kubectl get all diff --git a/examples/README.md b/examples/README.md index beada6280..d309cdcaf 100644 --- a/examples/README.md +++ b/examples/README.md @@ -7,7 +7,7 @@ By default, they look for the YAML manifests in `../../deploy/{rbd,cephfs}/kubernetes`. You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`. -Once the plugin is successfuly deployed, you'll need to customize +Once the plugin is successfully deployed, you'll need to customize `storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster setup. Please consult the documentation for info about available parameters. From 0235b9c24945ac66c9243bdcf2665df1dccc2c39 Mon Sep 17 00:00:00 2001 From: gman Date: Wed, 20 Feb 2019 15:04:30 +0100 Subject: [PATCH 14/89] k8s metadata cache: delete shouldn't fail on NotFound errors --- pkg/util/k8scmcache.go | 17 +++++++++++------ pkg/util/nodecache.go | 10 +++++++--- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/pkg/util/k8scmcache.go b/pkg/util/k8scmcache.go index 570857f89..9ba1f9de5 100644 --- a/pkg/util/k8scmcache.go +++ b/pkg/util/k8scmcache.go @@ -110,7 +110,7 @@ func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFun continue } if err = json.Unmarshal([]byte(data), destObj); err != nil { - return errors.Wrap(err, "k8s-cm-cache: unmarshal error") + return errors.Wrapf(err, "k8s-cm-cache: JSON unmarshaling failed for configmap %s", cm.ObjectMeta.Name) } if err = f(cm.ObjectMeta.Name); err != nil { return err @@ -123,12 +123,12 @@ func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFun func (k8scm *K8sCMCache) Create(identifier string, data interface{}) error { cm, err := k8scm.getMetadataCM(identifier) if cm != nil && err == nil { - klog.V(4).Infof("k8s-cm-cache: configmap already exists, skipping configmap creation") + klog.V(4).Infof("k8s-cm-cache: configmap %s already exists, skipping configmap creation", identifier) return nil } dataJSON, err := json.Marshal(data) if err != nil { - return errors.Wrap(err, "k8s-cm-cache: marshal error") + return errors.Wrapf(err, "k8s-cm-cache: JSON marshaling failed for configmap %s", identifier) } cm = &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -145,13 +145,13 @@ func (k8scm *K8sCMCache) Create(identifier string, data interface{}) error { _, err = k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).Create(cm) if err != nil { if apierrs.IsAlreadyExists(err) { - klog.V(4).Infof("k8s-cm-cache: configmap already exists") + klog.V(4).Infof("k8s-cm-cache: configmap %s already exists", identifier) return nil } return errors.Wrapf(err, "k8s-cm-cache: couldn't persist %s metadata as configmap", identifier) } - klog.V(4).Infof("k8s-cm-cache: configmap %s successfully created\n", identifier) + klog.V(4).Infof("k8s-cm-cache: configmap %s successfully created", identifier) return nil } @@ -163,7 +163,7 @@ func (k8scm *K8sCMCache) Get(identifier string, data interface{}) error { } err = json.Unmarshal([]byte(cm.Data[cmDataKey]), data) if err != nil { - return errors.Wrap(err, "k8s-cm-cache: unmarshal error") + return errors.Wrapf(err, "k8s-cm-cache: JSON unmarshaling failed for configmap %s", identifier) } return nil } @@ -172,6 +172,11 @@ func (k8scm *K8sCMCache) Get(identifier string, data interface{}) error { func (k8scm *K8sCMCache) Delete(identifier string) error { err := k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).Delete(identifier, nil) if err != nil { + if apierrs.IsNotFound(err) { + klog.V(4).Infof("k8s-cm-cache: cannot delete missing metadata configmap %s, assuming it's already deleted", identifier) + return nil + } + return errors.Wrapf(err, "k8s-cm-cache: couldn't delete metadata configmap %s", identifier) } klog.V(4).Infof("k8s-cm-cache: successfully deleted metadata configmap %s", identifier) diff --git a/pkg/util/nodecache.go b/pkg/util/nodecache.go index 947375b00..510ffa246 100644 --- a/pkg/util/nodecache.go +++ b/pkg/util/nodecache.go @@ -44,7 +44,7 @@ func (nc *NodeCache) EnsureCacheDirectory(cacheDir string) error { if _, err := os.Stat(fullPath); os.IsNotExist(err) { // #nosec if err := os.Mkdir(fullPath, 0755); err != nil { - return errors.Wrapf(err, "node-cache: failed to create %s folder with error: %v", fullPath, err) + return errors.Wrapf(err, "node-cache: failed to create %s folder", fullPath) } } return nil @@ -152,9 +152,13 @@ func (nc *NodeCache) Delete(identifier string) error { file := path.Join(nc.BasePath, cacheDir, identifier+".json") err := os.Remove(file) if err != nil { - if err != os.ErrNotExist { - return errors.Wrapf(err, "node-cache: error removing file %s", file) + if err == os.ErrNotExist { + klog.V(4).Infof("node-cache: cannot delete missing metadata storage file %s, assuming it's already deleted", file) + return nil } + + return errors.Wrapf(err, "node-cache: error removing file %s", file) + } klog.V(4).Infof("node-cache: successfully deleted metadata storage file at: %+v\n", file) return nil From 27b46aba0895d2f3b5aca94c01d81d4ce518c24a Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 21 Feb 2019 10:00:21 +0530 Subject: [PATCH 15/89] Add helm chat for cephfs Signed-off-by: Madhu Rajanna --- deploy/cephfs/helm/.helmignore | 21 +++ deploy/cephfs/helm/Chart.yaml | 14 ++ deploy/cephfs/helm/README.md | 23 +++ deploy/cephfs/helm/templates/NOTES.txt | 2 + deploy/cephfs/helm/templates/_helpers.tpl | 119 +++++++++++++++ .../helm/templates/attacher-clusterrole.yaml | 25 ++++ .../attacher-clusterrolebinding.yaml | 20 +++ .../helm/templates/attacher-service.yaml | 18 +++ .../templates/attacher-serviceaccount.yaml | 12 ++ .../helm/templates/attacher-statefulset.yaml | 60 ++++++++ .../templates/nodeplugin-clusterrole.yaml | 28 ++++ .../nodeplugin-clusterrolebinding.yaml | 20 +++ .../helm/templates/nodeplugin-daemonset.yaml | 139 ++++++++++++++++++ .../templates/nodeplugin-serviceaccount.yaml | 12 ++ .../templates/provisioner-clusterrole.yaml | 31 ++++ .../provisioner-clusterrolebinding.yaml | 20 +++ .../helm/templates/provisioner-service.yaml | 18 +++ .../templates/provisioner-serviceaccount.yaml | 12 ++ .../templates/provisioner-statefulset.yaml | 92 ++++++++++++ deploy/cephfs/helm/values.yaml | 80 ++++++++++ 20 files changed, 766 insertions(+) create mode 100644 deploy/cephfs/helm/.helmignore create mode 100644 deploy/cephfs/helm/Chart.yaml create mode 100644 deploy/cephfs/helm/README.md create mode 100644 deploy/cephfs/helm/templates/NOTES.txt create mode 100644 deploy/cephfs/helm/templates/_helpers.tpl create mode 100644 deploy/cephfs/helm/templates/attacher-clusterrole.yaml create mode 100644 deploy/cephfs/helm/templates/attacher-clusterrolebinding.yaml create mode 100644 deploy/cephfs/helm/templates/attacher-service.yaml create mode 100644 deploy/cephfs/helm/templates/attacher-serviceaccount.yaml create mode 100644 deploy/cephfs/helm/templates/attacher-statefulset.yaml create mode 100644 deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml create mode 100644 deploy/cephfs/helm/templates/nodeplugin-clusterrolebinding.yaml create mode 100644 deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml create mode 100644 deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml create mode 100644 deploy/cephfs/helm/templates/provisioner-clusterrole.yaml create mode 100644 deploy/cephfs/helm/templates/provisioner-clusterrolebinding.yaml create mode 100644 deploy/cephfs/helm/templates/provisioner-service.yaml create mode 100644 deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml create mode 100644 deploy/cephfs/helm/templates/provisioner-statefulset.yaml create mode 100644 deploy/cephfs/helm/values.yaml diff --git a/deploy/cephfs/helm/.helmignore b/deploy/cephfs/helm/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/deploy/cephfs/helm/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deploy/cephfs/helm/Chart.yaml b/deploy/cephfs/helm/Chart.yaml new file mode 100644 index 000000000..d3e94d26d --- /dev/null +++ b/deploy/cephfs/helm/Chart.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +appVersion: "1.0.0" +description: "Container Storage Interface (CSI) driver, +provisioner, and attacher for Ceph cephfs" +name: ceph-csi-cephfs +version: 0.4.0 +keywords: + - ceph + - cephfs + - ceph-csi +home: https://github.com/ceph/ceph-csi +sources: + - https://github.com/ceph/ceph-csi/tree/csi-v1.0/deploy/cephfs/helm diff --git a/deploy/cephfs/helm/README.md b/deploy/cephfs/helm/README.md new file mode 100644 index 000000000..3fa72d336 --- /dev/null +++ b/deploy/cephfs/helm/README.md @@ -0,0 +1,23 @@ +# ceph-csi-cephfs + +The ceph-csi-cephfs chart adds cephfs volume support to your cluster. + +## Install Chart + +To install the Chart into your Kubernetes cluster + +```bash +helm install --name "ceph-csi-cephfs" ceph-csi/ceph-csi-cephfs +``` + +After installation succeeds, you can get a status of Chart + +```bash +helm status "ceph-csi-cephfs" +``` + +If you want to delete your Chart, use this command + +```bash +helm delete --purge "ceph-csi-cephfs" +``` diff --git a/deploy/cephfs/helm/templates/NOTES.txt b/deploy/cephfs/helm/templates/NOTES.txt new file mode 100644 index 000000000..3af9f2b57 --- /dev/null +++ b/deploy/cephfs/helm/templates/NOTES.txt @@ -0,0 +1,2 @@ +Examples on how to configure a storage class and start using the driver are here: +https://github.com/ceph/ceph-csi/tree/csi-v1.0/examples/cephfs diff --git a/deploy/cephfs/helm/templates/_helpers.tpl b/deploy/cephfs/helm/templates/_helpers.tpl new file mode 100644 index 000000000..e604150ae --- /dev/null +++ b/deploy/cephfs/helm/templates/_helpers.tpl @@ -0,0 +1,119 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ceph-csi-cephfs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ceph-csi-cephfs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ceph-csi-cephfs.attacher.fullname" -}} +{{- if .Values.attacher.fullnameOverride -}} +{{- .Values.attacher.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.attacher.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.attacher.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ceph-csi-cephfs.nodeplugin.fullname" -}} +{{- if .Values.nodeplugin.fullnameOverride -}} +{{- .Values.nodeplugin.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ceph-csi-cephfs.provisioner.fullname" -}} +{{- if .Values.provisioner.fullnameOverride -}} +{{- .Values.provisioner.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ceph-csi-cephfs.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ceph-csi-cephfs.serviceAccountName.attacher" -}} +{{- if .Values.serviceAccounts.attacher.create -}} + {{ default (include "ceph-csi-cephfs.attacher.fullname" .) .Values.serviceAccounts.attacher.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.attacher.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ceph-csi-cephfs.serviceAccountName.nodeplugin" -}} +{{- if .Values.serviceAccounts.nodeplugin.create -}} + {{ default (include "ceph-csi-cephfs.nodeplugin.fullname" .) .Values.serviceAccounts.nodeplugin.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.nodeplugin.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ceph-csi-cephfs.serviceAccountName.provisioner" -}} +{{- if .Values.serviceAccounts.provisioner.create -}} + {{ default (include "ceph-csi-cephfs.provisioner.fullname" .) .Values.serviceAccounts.provisioner.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.provisioner.name }} +{{- end -}} +{{- end -}} diff --git a/deploy/cephfs/helm/templates/attacher-clusterrole.yaml b/deploy/cephfs/helm/templates/attacher-clusterrole.yaml new file mode 100644 index 000000000..2f70448e2 --- /dev/null +++ b/deploy/cephfs/helm/templates/attacher-clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.attacher.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] +{{- end -}} diff --git a/deploy/cephfs/helm/templates/attacher-clusterrolebinding.yaml b/deploy/cephfs/helm/templates/attacher-clusterrolebinding.yaml new file mode 100644 index 000000000..832e23dec --- /dev/null +++ b/deploy/cephfs/helm/templates/attacher-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.attacher.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "ceph-csi-cephfs.attacher.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/deploy/cephfs/helm/templates/attacher-service.yaml b/deploy/cephfs/helm/templates/attacher-service.yaml new file mode 100644 index 000000000..379830d53 --- /dev/null +++ b/deploy/cephfs/helm/templates/attacher-service.yaml @@ -0,0 +1,18 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ include "ceph-csi-cephfs.attacher.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + app: {{ include "ceph-csi-cephfs.name" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + ports: + - name: dummy + port: 12345 diff --git a/deploy/cephfs/helm/templates/attacher-serviceaccount.yaml b/deploy/cephfs/helm/templates/attacher-serviceaccount.yaml new file mode 100644 index 000000000..dbb70ccc2 --- /dev/null +++ b/deploy/cephfs/helm/templates/attacher-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccounts.attacher.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/deploy/cephfs/helm/templates/attacher-statefulset.yaml b/deploy/cephfs/helm/templates/attacher-statefulset.yaml new file mode 100644 index 000000000..88514d062 --- /dev/null +++ b/deploy/cephfs/helm/templates/attacher-statefulset.yaml @@ -0,0 +1,60 @@ +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: {{ include "ceph-csi-cephfs.attacher.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + serviceName: {{ include "ceph-csi-cephfs.attacher.fullname" . }} + replicas: {{ .Values.attacher.replicas }} + selector: + matchLabels: + app: {{ include "ceph-csi-cephfs.name" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.attacher.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: + serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }} + containers: + - name: csi-cephfsplugin-attacher + image: "{{ .Values.attacher.image.repository }}:{{ .Values.attacher.image.tag }}" + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}" + imagePullPolicy: {{ .Values.attacher.image.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: {{ .Values.socketDir }} + resources: +{{ toYaml .Values.attacher.resources | indent 12 }} + volumes: + - name: socket-dir + hostPath: + path: {{ .Values.socketDir }} + type: DirectoryOrCreate + {{- if .Values.attacher.affinity -}} + affinity: +{{ toYaml .Values.attacher.affinity . | indent 8 }} + {{- end -}} + {{- if .Values.attacher.nodeSelector -}} + nodeSelector: +{{ toYaml .Values.attacher.nodeSelector | indent 8 }} + {{- end -}} + {{- if .Values.attacher.tolerations -}} + tolerations: +{{ toYaml .Values.attacher.tolerations | indent 8 }} + {{- end -}} diff --git a/deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml b/deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml new file mode 100644 index 000000000..290dd3f33 --- /dev/null +++ b/deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.nodeplugin.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list"] +{{- end -}} diff --git a/deploy/cephfs/helm/templates/nodeplugin-clusterrolebinding.yaml b/deploy/cephfs/helm/templates/nodeplugin-clusterrolebinding.yaml new file mode 100644 index 000000000..24e21351c --- /dev/null +++ b/deploy/cephfs/helm/templates/nodeplugin-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.nodeplugin.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml new file mode 100644 index 000000000..20ac11933 --- /dev/null +++ b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml @@ -0,0 +1,139 @@ +kind: DaemonSet +apiVersion: apps/v1beta2 +metadata: + name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.nodeplugin.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ include "ceph-csi-cephfs.name" . }} + component: {{ .Values.nodeplugin.name }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.nodeplugin.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: + serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }} + hostNetwork: true + hostPID: true + # to use e.g. Rook orchestrated cluster, and mons' FQDN is + # resolved through k8s service, set dns policy to cluster first + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: driver-registrar + image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}" + args: + - "--v=5" + - "--csi-address=/csi/{{ .Values.socketFile }}" + - "--kubelet-registration-path={{ .Values.socketDir }}/{{ .Values.socketFile }}" + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi-cephfsplugin /registration/csi-cephfsplugin-reg.sock"] + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + imagePullPolicy: {{ .Values.nodeplugin.registrar.image.imagePullPolicy }} + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: +{{ toYaml .Values.nodeplugin.registrar.resources | indent 12 }} + - name: csi-cephfsplugin + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}" + args : + - "--nodeid=$(NODE_ID)" + - "--endpoint=$(CSI_ENDPOINT)" + - "--v=5" + - "--drivername=csi-cephfsplugin" + - "--metadatastorage=k8s_configmap" + env: + - name: HOST_ROOTFS + value: "/rootfs" + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}" + imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }} + volumeMounts: + - name: plugin-dir + mountPath: {{ .Values.socketDir }} + - name: pods-mount-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: "Bidirectional" + - name: plugin-mount-dir + mountPath: {{ .Values.volumeDevicesDir }} + mountPropagation: "Bidirectional" + - mountPath: /dev + name: host-dev + - mountPath: /rootfs + name: host-rootfs + - mountPath: /sys + name: host-sys + - mountPath: /lib/modules + name: lib-modules + readOnly: true + resources: +{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} + volumes: + - name: plugin-dir + hostPath: + path: {{ .Values.socketDir }} + type: DirectoryOrCreate + - name: plugin-mount-dir + hostPath: + path: {{ .Values.volumeDevicesDir }} + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: {{ .Values.registrationDir }} + type: Directory + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + - name: host-dev + hostPath: + path: /dev + - name: host-rootfs + hostPath: + path: / + - name: host-sys + hostPath: + path: /sys + - name: lib-modules + hostPath: + path: /lib/modules + {{- if .Values.nodeplugin.affinity -}} + affinity: +{{ toYaml .Values.nodeplugin.affinity . | indent 8 }} + {{- end -}} + {{- if .Values.nodeplugin.nodeSelector -}} + nodeSelector: +{{ toYaml .Values.nodeplugin.nodeSelector | indent 8 }} + {{- end -}} + {{- if .Values.nodeplugin.tolerations -}} + tolerations: +{{ toYaml .Values.nodeplugin.tolerations | indent 8 }} + {{- end -}} diff --git a/deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml b/deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml new file mode 100644 index 000000000..88bd8f1bc --- /dev/null +++ b/deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccounts.nodeplugin.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.nodeplugin.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml b/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml new file mode 100644 index 000000000..590521ab2 --- /dev/null +++ b/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml @@ -0,0 +1,31 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "create", "delete"] +{{- end -}} diff --git a/deploy/cephfs/helm/templates/provisioner-clusterrolebinding.yaml b/deploy/cephfs/helm/templates/provisioner-clusterrolebinding.yaml new file mode 100644 index 000000000..82d5d1316 --- /dev/null +++ b/deploy/cephfs/helm/templates/provisioner-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/deploy/cephfs/helm/templates/provisioner-service.yaml b/deploy/cephfs/helm/templates/provisioner-service.yaml new file mode 100644 index 000000000..93d62ffb7 --- /dev/null +++ b/deploy/cephfs/helm/templates/provisioner-service.yaml @@ -0,0 +1,18 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + app: {{ include "ceph-csi-cephfs.name" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + ports: + - name: dummy + port: 12345 diff --git a/deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml b/deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml new file mode 100644 index 000000000..2c1d9f74f --- /dev/null +++ b/deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccounts.provisioner.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/deploy/cephfs/helm/templates/provisioner-statefulset.yaml b/deploy/cephfs/helm/templates/provisioner-statefulset.yaml new file mode 100644 index 000000000..2f5c48c8a --- /dev/null +++ b/deploy/cephfs/helm/templates/provisioner-statefulset.yaml @@ -0,0 +1,92 @@ +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + serviceName: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + replicas: {{ .Values.provisioner.replicas }} + selector: + matchLabels: + app: {{ include "ceph-csi-cephfs.name" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: + serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }} + containers: + - name: csi-provisioner + image: "{{ .Values.provisioner.image.repository }}:{{ .Values.provisioner.image.tag }}" + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + env: + - name: ADDRESS + value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}" + imagePullPolicy: {{ .Values.provisioner.image.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: {{ .Values.socketDir }} + resources: +{{ toYaml .Values.provisioner.resources | indent 12 }} + - name: csi-cephfsplugin + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}" + args : + - "--nodeid=$(NODE_ID)" + - "--endpoint=$(CSI_ENDPOINT)" + - "--v=5" + - "--drivername=csi-cephfsplugin" + - "--metadatastorage=k8s_configmap" + env: + - name: HOST_ROOTFS + value: "/rootfs" + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}" + imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: {{ .Values.socketDir }} + - name: host-rootfs + mountPath: "/rootfs" + resources: +{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} + volumes: + - name: socket-dir + emptyDir: {} +#FIXME this seems way too much. Why is it needed at all for this? + - name: host-rootfs + hostPath: + path: / + {{- if .Values.provisioner.affinity -}} + affinity: +{{ toYaml .Values.provisioner.affinity . | indent 8 }} + {{- end -}} + {{- if .Values.provisioner.nodeSelector -}} + nodeSelector: +{{ toYaml .Values.provisioner.nodeSelector | indent 8 }} + {{- end -}} + {{- if .Values.provisioner.tolerations -}} + tolerations: +{{ toYaml .Values.provisioner.tolerations | indent 8 }} + {{- end -}} diff --git a/deploy/cephfs/helm/values.yaml b/deploy/cephfs/helm/values.yaml new file mode 100644 index 000000000..f662c8849 --- /dev/null +++ b/deploy/cephfs/helm/values.yaml @@ -0,0 +1,80 @@ +--- +rbac: + create: true + +serviceAccounts: + attacher: + create: true + name: + nodeplugin: + create: true + name: + provisioner: + create: true + name: + +socketDir: /var/lib/kubelet/plugins/csi-cephfsplugin +socketFile: csi.sock +registrationDir: /var/lib/kubelet/plugins_registry +volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices + +attacher: + name: attacher + + replicaCount: 1 + + image: + repository: quay.io/k8scsi/csi-attacher + tag: v1.0.1 + pullPolicy: IfNotPresent + + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +nodeplugin: + name: nodeplugin + + registrar: + image: + repository: quay.io/k8scsi/csi-node-driver-registrar + tag: v1.0.2 + pullPolicy: IfNotPresent + + resources: {} + + plugin: + image: + repository: quay.io/cephcsi/cephfsplugin + tag: v1.0.0 + pullPolicy: IfNotPresent + + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +provisioner: + name: provisioner + + replicaCount: 1 + + image: + repository: quay.io/k8scsi/csi-provisioner + tag: v1.0.1 + pullPolicy: IfNotPresent + + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} From 50091acd0ca49dcba4b4d2f03237f3174944a695 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 21 Feb 2019 10:00:57 +0530 Subject: [PATCH 16/89] update deploy.sh to push cephfs chat Signed-off-by: Madhu Rajanna --- deploy.sh | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/deploy.sh b/deploy.sh index 9ce90e442..dff25413b 100755 --- a/deploy.sh +++ b/deploy.sh @@ -1,5 +1,29 @@ #!/bin/bash +push_helm_chats() { + PACKAGE=$1 + CHANGED=0 + VERSION=$(grep 'version:' deploy/"$PACKAGE"/helm/Chart.yaml | awk '{print $2}') + + if [ ! -f "tmp/csi-charts/docs/$PACKAGE/ceph-csi-$PACKAGE-$VERSION.tgz" ]; then + CHANGED=1 + ln -s helm deploy/"$PACKAGE"/ceph-csi-"$PACKAGE" + mkdir -p tmp/csi-charts/docs/"$PACKAGE" + pushd tmp/csi-charts/docs/"$PACKAGE" >/dev/null + helm init --client-only + helm package ../../../../deploy/"$PACKAGE"/ceph-csi-"$PACKAGE" + popd >/dev/null + fi + + if [ $CHANGED -eq 1 ]; then + pushd tmp/csi-charts/docs >/dev/null + helm repo index . + git add --all :/ && git commit -m "Update repo" + git push https://"$GITHUB_TOKEN"@github.com/ceph/csi-charts + popd >/dev/null + fi +} + if [ "${TRAVIS_BRANCH}" == 'master' ]; then export RBD_IMAGE_VERSION='v0.3.0' export CEPHFS_IMAGE_VERSION='v0.3.0' @@ -29,25 +53,6 @@ if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then mkdir -p csi-charts/docs popd >/dev/null - CHANGED=0 - VERSION=$(grep 'version:' deploy/rbd/helm/Chart.yaml | awk '{print $2}') - - if [ ! -f "tmp/csi-charts/docs/rbd/ceph-csi-rbd-$VERSION.tgz" ]; then - CHANGED=1 - ln -s helm deploy/rbd/ceph-csi-rbd - mkdir -p tmp/csi-charts/docs/rbd - pushd tmp/csi-charts/docs/rbd >/dev/null - helm init --client-only - helm package ../../../../deploy/rbd/ceph-csi-rbd - popd >/dev/null - fi - - if [ $CHANGED -eq 1 ]; then - pushd tmp/csi-charts/docs >/dev/null - helm repo index . - git add --all :/ && git commit -m "Update repo" - git push https://"$GITHUB_TOKEN"@github.com/ceph/csi-charts - popd >/dev/null - fi - + push_helm_chats rbd + push_helm_chats cephfs fi From 3ac5af6ce6efd9900027a25172615a84db597941 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 21 Feb 2019 10:02:39 +0530 Subject: [PATCH 17/89] Fix formatting in helm rbd readme Signed-off-by: Madhu Rajanna --- deploy/rbd/helm/README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deploy/rbd/helm/README.md b/deploy/rbd/helm/README.md index 42a55e55b..35ce742a0 100644 --- a/deploy/rbd/helm/README.md +++ b/deploy/rbd/helm/README.md @@ -4,7 +4,7 @@ The ceph-csi-rbd chart adds rbd volume support to your cluster. ## Install Chart -To install the Chart into your Kubernetes cluster : +To install the Chart into your Kubernetes cluster ```bash helm install --namespace "ceph-csi-rbd" --name "ceph-csi-rbd" ceph-csi/ceph-csi-rbd @@ -16,9 +16,8 @@ After installation succeeds, you can get a status of Chart helm status "ceph-csi-rbd" ``` -If you want to delete your Chart, use this command: +If you want to delete your Chart, use this command ```bash helm delete --purge "ceph-csi-rbd" ``` - From 68e69502e81ccf36b1a4ca1a1f1eaa95cf05e27d Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 21 Feb 2019 17:05:24 +0530 Subject: [PATCH 18/89] remove secret and key from logging Signed-off-by: Madhu Rajanna --- pkg/cephfs/util.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/cephfs/util.go b/pkg/cephfs/util.go index daab8019f..323b965f5 100644 --- a/pkg/cephfs/util.go +++ b/pkg/cephfs/util.go @@ -49,38 +49,39 @@ func closePipeOnError(pipe io.Closer, err error) { func execCommand(program string, args ...string) (stdout, stderr []byte, err error) { cmd := exec.Command(program, args...) // nolint: gosec - klog.V(4).Infof("cephfs: EXEC %s %s", program, util.StripSecretInArgs(args)) + stripArgs := util.StripSecretInArgs(args) + klog.V(4).Infof("cephfs: EXEC %s %s", program, stripArgs) stdoutPipe, err := cmd.StdoutPipe() if err != nil { - return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, args, err) + return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, stripArgs, err) } defer closePipeOnError(stdoutPipe, err) stderrPipe, err := cmd.StderrPipe() if err != nil { - return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, args, err) + return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, stripArgs, err) } defer closePipeOnError(stderrPipe, err) if err = cmd.Start(); err != nil { - return nil, nil, fmt.Errorf("failed to run %s %v: %v", program, args, err) + return nil, nil, fmt.Errorf("failed to run %s %v: %v", program, stripArgs, err) } stdout, err = ioutil.ReadAll(stdoutPipe) if err != nil { - return nil, nil, fmt.Errorf("failed to read from stdout for %s %v: %v", program, args, err) + return nil, nil, fmt.Errorf("failed to read from stdout for %s %v: %v", program, stripArgs, err) } stderr, err = ioutil.ReadAll(stderrPipe) if err != nil { - return nil, nil, fmt.Errorf("failed to read from stderr for %s %v: %v", program, args, err) + return nil, nil, fmt.Errorf("failed to read from stderr for %s %v: %v", program, stripArgs, err) } if waitErr := cmd.Wait(); waitErr != nil { - return nil, nil, fmt.Errorf("an error occurred while running %s %v: %v: %s", program, args, waitErr, stderr) + return nil, nil, fmt.Errorf("an error occurred while running %s %v: %v: %s", program, stripArgs, waitErr, stderr) } return @@ -98,7 +99,7 @@ func execCommandJSON(v interface{}, program string, args ...string) error { } if err = json.Unmarshal(stdout, v); err != nil { - return fmt.Errorf("failed to unmarshal JSON for %s %v: %s: %v", program, args, stdout, err) + return fmt.Errorf("failed to unmarshal JSON for %s %v: %s: %v", program, util.StripSecretInArgs(args), stdout, err) } return nil From 85b5e7ed5103f9959f9123c2dce8f64becd2fee9 Mon Sep 17 00:00:00 2001 From: Fred Rolland Date: Mon, 25 Feb 2019 15:09:59 +0200 Subject: [PATCH 19/89] Remove requirement for ceph config and keyring files --- docs/deploy-rbd.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index 2f09e4435..bfaa46bb9 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -65,8 +65,6 @@ Admin credentials are required for provisioning new RBD images `ADMIN_NAME`: `ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the client with admin privileges, and the value is its password -Also note that CSI RBD expects admin keyring and Ceph config file in `/etc/ceph`. - ## Deployment with Kubernetes Requires Kubernetes 1.11 From b0c86df9785beb2f29cfc294333ccdb246f844f7 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 21 Feb 2019 10:33:46 +0530 Subject: [PATCH 20/89] update yamllint to ignore cephfs helm Signed-off-by: Madhu Rajanna --- scripts/lint-text.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint-text.sh b/scripts/lint-text.sh index 066c8d382..c7bcf2f9f 100755 --- a/scripts/lint-text.sh +++ b/scripts/lint-text.sh @@ -44,6 +44,6 @@ run_check '.*\.(ba)?sh' bash -n # Install via: pip install yamllint # disable yamlint chekck for helm chats -run_check '.*\.ya?ml' yamllint -s -d "{extends: default, rules: {line-length: {allow-non-breakable-inline-mappings: true}},ignore: deploy/rbd/helm/templates/*.yaml}" +run_check '.*\.ya?ml' yamllint -s -d "{extends: default, rules: {line-length: {allow-non-breakable-inline-mappings: true}},ignore: deploy/*/helm/templates/*.yaml}" echo "ALL OK." From 55ad4924b33038e2fb05c88e47082879d93a6bd3 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Fri, 22 Feb 2019 10:20:17 +0530 Subject: [PATCH 21/89] update readme to deploy cephfs in namespace Signed-off-by: Madhu Rajanna --- deploy/cephfs/helm/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/cephfs/helm/README.md b/deploy/cephfs/helm/README.md index 3fa72d336..0e4a1f223 100644 --- a/deploy/cephfs/helm/README.md +++ b/deploy/cephfs/helm/README.md @@ -7,7 +7,7 @@ The ceph-csi-cephfs chart adds cephfs volume support to your cluster. To install the Chart into your Kubernetes cluster ```bash -helm install --name "ceph-csi-cephfs" ceph-csi/ceph-csi-cephfs +helm install --namespace "ceph-csi-cephfs" --name "ceph-csi-cephfs" ceph-csi/ceph-csi-cephfs ``` After installation succeeds, you can get a status of Chart @@ -19,5 +19,5 @@ helm status "ceph-csi-cephfs" If you want to delete your Chart, use this command ```bash -helm delete --purge "ceph-csi-cephfs" +helm delete --purge "ceph-csi-cephfs" ``` From e5dbea15d34afc00b771b56750aeacfe515f4923 Mon Sep 17 00:00:00 2001 From: gman Date: Mon, 25 Feb 2019 18:05:20 +0100 Subject: [PATCH 22/89] util/cachepersister: check and return CacheEntryNotFound error in Get() --- pkg/util/cachepersister.go | 10 ++++++++-- pkg/util/k8scmcache.go | 4 ++++ pkg/util/nodecache.go | 4 ++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/pkg/util/cachepersister.go b/pkg/util/cachepersister.go index e72c28f16..ba8918587 100644 --- a/pkg/util/cachepersister.go +++ b/pkg/util/cachepersister.go @@ -23,13 +23,19 @@ import ( ) const ( - //PluginFolder defines location of plugins + // PluginFolder defines location of plugins PluginFolder = "/var/lib/kubelet/plugins" ) -// ForAllFunc stores metadata with identifier +// ForAllFunc is a unary predicate for visiting all cache entries +// matching the `pattern' in CachePersister's ForAll function. type ForAllFunc func(identifier string) error +// CacheEntryNotFound is an error type for "Not Found" cache errors +type CacheEntryNotFound struct { + error +} + // CachePersister interface implemented for store type CachePersister interface { Create(identifier string, data interface{}) error diff --git a/pkg/util/k8scmcache.go b/pkg/util/k8scmcache.go index 9ba1f9de5..10a6d7ef5 100644 --- a/pkg/util/k8scmcache.go +++ b/pkg/util/k8scmcache.go @@ -159,6 +159,10 @@ func (k8scm *K8sCMCache) Create(identifier string, data interface{}) error { func (k8scm *K8sCMCache) Get(identifier string, data interface{}) error { cm, err := k8scm.getMetadataCM(identifier) if err != nil { + if apierrs.IsNotFound(err) { + return &CacheEntryNotFound{err} + } + return err } err = json.Unmarshal([]byte(cm.Data[cmDataKey]), data) diff --git a/pkg/util/nodecache.go b/pkg/util/nodecache.go index 510ffa246..5659d4eaa 100644 --- a/pkg/util/nodecache.go +++ b/pkg/util/nodecache.go @@ -130,6 +130,10 @@ func (nc *NodeCache) Get(identifier string, data interface{}) error { // #nosec fp, err := os.Open(file) if err != nil { + if os.IsNotExist(errors.Cause(err)) { + return &CacheEntryNotFound{err} + } + return errors.Wrapf(err, "node-cache: open error for %s", file) } From ce3affcc6a16ef8e3a5aaebc689c035f28c9a6e8 Mon Sep 17 00:00:00 2001 From: gman Date: Mon, 25 Feb 2019 18:07:28 +0100 Subject: [PATCH 23/89] cephfs: DeleteVolume should assume the volume to be already deleted if metadata doesn't exist --- pkg/cephfs/controllerserver.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 235a99f3a..67a342040 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -97,8 +97,9 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol }, nil } -// DeleteVolume deletes the volume in backend and removes the volume metadata -// from store +// DeleteVolume deletes the volume in backend +// and removes the volume metadata from store +// nolint: gocyclo func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { if err := cs.validateDeleteVolumeRequest(); err != nil { klog.Errorf("DeleteVolumeRequest validation failed: %v", err) @@ -108,11 +109,15 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol var ( volID = volumeID(req.GetVolumeId()) secrets = req.GetSecrets() - err error ) ce := &controllerCacheEntry{} - if err = cs.MetadataStore.Get(string(volID), ce); err != nil { + if err := cs.MetadataStore.Get(string(volID), ce); err != nil { + if err, ok := err.(*util.CacheEntryNotFound); ok { + klog.Infof("cephfs: metadata for volume %s not found, assuming the volume to be already deleted (%v)", volID, err) + return &csi.DeleteVolumeResponse{}, nil + } + return nil, status.Error(codes.Internal, err.Error()) } From d12fdfd40039ac4c1c5094fdb1a63e4db039f6eb Mon Sep 17 00:00:00 2001 From: gman Date: Mon, 25 Feb 2019 18:09:21 +0100 Subject: [PATCH 24/89] rbd: fixed metadata idempotency in DeleteVolume; DeleteSnapshot should assume the snapshot to be already deleted if metadata doesn't exist --- pkg/rbd/controllerserver.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index b1eddf29e..69b237693 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -18,7 +18,6 @@ package rbd import ( "fmt" - "os" "os/exec" "syscall" @@ -29,7 +28,6 @@ import ( "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" "github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/pborman/uuid" - "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -252,9 +250,11 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol rbdVol := &rbdVolume{} if err := cs.MetadataStore.Get(volumeID, rbdVol); err != nil { - if os.IsNotExist(errors.Cause(err)) { + if err, ok := err.(*util.CacheEntryNotFound); ok { + klog.V(3).Infof("metadata for volume %s not found, assuming the volume to be already deleted (%v)", volumeID, err) return &csi.DeleteVolumeResponse{}, nil } + return nil, err } @@ -471,6 +471,11 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS rbdSnap := &rbdSnapshot{} if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil { + if err, ok := err.(*util.CacheEntryNotFound); ok { + klog.V(3).Infof("metadata for snapshot %s not found, assuming the snapshot to be already deleted (%v)", snapshotID, err) + return &csi.DeleteSnapshotResponse{}, nil + } + return nil, err } From 60588d89685f1741bf8621e5dd328021d437129b Mon Sep 17 00:00:00 2001 From: gman Date: Tue, 26 Feb 2019 11:06:16 +0100 Subject: [PATCH 25/89] cephfs/volume: create/delete-volume idempotency checks --- pkg/cephfs/util.go | 73 +++++++++++----------------- pkg/cephfs/volume.go | 111 ++++++++++++++++++++++++------------------- 2 files changed, 90 insertions(+), 94 deletions(-) diff --git a/pkg/cephfs/util.go b/pkg/cephfs/util.go index 323b965f5..19928c0fa 100644 --- a/pkg/cephfs/util.go +++ b/pkg/cephfs/util.go @@ -17,11 +17,11 @@ limitations under the License. package cephfs import ( + "bytes" "encoding/json" "errors" "fmt" - "io" - "io/ioutil" + "os" "os/exec" "google.golang.org/grpc/codes" @@ -30,61 +30,41 @@ import ( "github.com/ceph/ceph-csi/pkg/util" "github.com/container-storage-interface/spec/lib/go/csi" + "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" ) type volumeID string +func mustUnlock(m keymutex.KeyMutex, key string) { + if err := m.UnlockKey(key); err != nil { + klog.Fatalf("failed to unlock mutex for %s: %v", key, err) + } +} + func makeVolumeID(volName string) volumeID { return volumeID("csi-cephfs-" + volName) } -func closePipeOnError(pipe io.Closer, err error) { - if err != nil { - if err = pipe.Close(); err != nil { - klog.Warningf("failed to close pipe: %v", err) - } - } -} - func execCommand(program string, args ...string) (stdout, stderr []byte, err error) { - cmd := exec.Command(program, args...) // nolint: gosec - stripArgs := util.StripSecretInArgs(args) - klog.V(4).Infof("cephfs: EXEC %s %s", program, stripArgs) + var ( + cmd = exec.Command(program, args...) // nolint: gosec + sanitizedArgs = util.StripSecretInArgs(args) + stdoutBuf bytes.Buffer + stderrBuf bytes.Buffer + ) - stdoutPipe, err := cmd.StdoutPipe() - if err != nil { - return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, stripArgs, err) + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + + klog.V(4).Infof("cephfs: EXEC %s %s", program, sanitizedArgs) + + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("an error occurred while running (%d) %s %v: %v: %s", + cmd.Process.Pid, program, sanitizedArgs, err, stderrBuf.Bytes()) } - defer closePipeOnError(stdoutPipe, err) - - stderrPipe, err := cmd.StderrPipe() - if err != nil { - return nil, nil, fmt.Errorf("cannot open stdout pipe for %s %v: %v", program, stripArgs, err) - } - - defer closePipeOnError(stderrPipe, err) - - if err = cmd.Start(); err != nil { - return nil, nil, fmt.Errorf("failed to run %s %v: %v", program, stripArgs, err) - } - - stdout, err = ioutil.ReadAll(stdoutPipe) - if err != nil { - return nil, nil, fmt.Errorf("failed to read from stdout for %s %v: %v", program, stripArgs, err) - } - - stderr, err = ioutil.ReadAll(stderrPipe) - if err != nil { - return nil, nil, fmt.Errorf("failed to read from stderr for %s %v: %v", program, stripArgs, err) - } - - if waitErr := cmd.Wait(); waitErr != nil { - return nil, nil, fmt.Errorf("an error occurred while running %s %v: %v: %s", program, stripArgs, waitErr, stderr) - } - - return + return stdoutBuf.Bytes(), stderrBuf.Bytes(), nil } func execCommandErr(program string, args ...string) error { @@ -117,6 +97,11 @@ func isMountPoint(p string) (bool, error) { return !notMnt, nil } +func pathExists(p string) bool { + _, err := os.Stat(p) + return err == nil +} + // Controller service request validation func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index 683677b53..3103aa74e 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -52,79 +52,66 @@ func setVolumeAttribute(root, attrName, attrValue string) error { } func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeID, bytesQuota int64) error { - cephRoot := getCephRootPathLocal(volID) - - if err := createMountPoint(cephRoot); err != nil { + if err := mountCephRoot(volID, volOptions, adminCr); err != nil { return err } + defer unmountCephRoot(volID) - // RootPath is not set for a dynamically provisioned volume - // Access to cephfs's / is required - volOptions.RootPath = "/" + var ( + volRoot = getCephRootVolumePathLocal(volID) + volRootCreating = volRoot + "-creating" + ) - m, err := newMounter(volOptions) - if err != nil { - return fmt.Errorf("failed to create mounter: %v", err) + if pathExists(volRoot) { + klog.V(4).Infof("cephfs: volume %s already exists, skipping creation", volID) + return nil } - if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil { - return fmt.Errorf("error mounting ceph root: %v", err) - } - - defer unmountAndRemove(cephRoot) - - volOptions.RootPath = getVolumeRootPathCeph(volID) - localVolRoot := getCephRootVolumePathLocal(volID) - - if err := createMountPoint(localVolRoot); err != nil { + if err := createMountPoint(volRootCreating); err != nil { return err } if bytesQuota > 0 { - if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil { + if err := setVolumeAttribute(volRootCreating, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil { return err } } - if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil { + if err := setVolumeAttribute(volRootCreating, "ceph.dir.layout.pool", volOptions.Pool); err != nil { return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool) } - if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volID)); err != nil { + if err := setVolumeAttribute(volRootCreating, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volID)); err != nil { return err } + if err := os.Rename(volRootCreating, volRoot); err != nil { + return fmt.Errorf("couldn't mark volume %s as created: %v", volID, err) + } + return nil } func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions) error { + if err := mountCephRoot(volID, volOptions, adminCr); err != nil { + return err + } + defer unmountCephRoot(volID) + var ( - cephRoot = getCephRootPathLocal(volID) volRoot = getCephRootVolumePathLocal(volID) volRootDeleting = volRoot + "-deleting" ) - if err := createMountPoint(cephRoot); err != nil { - return err - } - - // Root path is not set for dynamically provisioned volumes - // Access to cephfs's / is required - volOptions.RootPath = "/" - - m, err := newMounter(volOptions) - if err != nil { - return fmt.Errorf("failed to create mounter: %v", err) - } - - if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil { - return fmt.Errorf("error mounting ceph root: %v", err) - } - - defer unmountAndRemove(cephRoot) - - if err := os.Rename(volRoot, volRootDeleting); err != nil { - return fmt.Errorf("couldn't mark volume %s for deletion: %v", volID, err) + if pathExists(volRoot) { + if err := os.Rename(volRoot, volRootDeleting); err != nil { + return fmt.Errorf("couldn't mark volume %s for deletion: %v", volID, err) + } + } else { + if !pathExists(volRootDeleting) { + klog.V(4).Infof("cephfs: volume %s not found, assuming it to be already deleted", volID) + return nil + } } if err := os.RemoveAll(volRootDeleting); err != nil { @@ -134,13 +121,37 @@ func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions return nil } -func unmountAndRemove(mountPoint string) { - var err error - if err = unmountVolume(mountPoint); err != nil { - klog.Errorf("failed to unmount %s with error %s", mountPoint, err) +func mountCephRoot(volID volumeID, volOptions *volumeOptions, adminCr *credentials) error { + cephRoot := getCephRootPathLocal(volID) + + // Root path is not set for dynamically provisioned volumes + // Access to cephfs's / is required + volOptions.RootPath = "/" + + if err := createMountPoint(cephRoot); err != nil { + return err } - if err = os.Remove(mountPoint); err != nil { - klog.Errorf("failed to remove %s with error %s", mountPoint, err) + m, err := newMounter(volOptions) + if err != nil { + return fmt.Errorf("failed to create mounter: %v", err) + } + + if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil { + return fmt.Errorf("error mounting ceph root: %v", err) + } + + return nil +} + +func unmountCephRoot(volID volumeID) { + cephRoot := getCephRootPathLocal(volID) + + if err := unmountVolume(cephRoot); err != nil { + klog.Errorf("failed to unmount %s with error %s", cephRoot, err) + } + + if err := os.Remove(cephRoot); err != nil { + klog.Errorf("failed to remove %s with error %s", cephRoot, err) } } From 143003bcfdacd4b4ef32dd90b43b2a4d7055c2db Mon Sep 17 00:00:00 2001 From: gman Date: Tue, 26 Feb 2019 11:06:25 +0100 Subject: [PATCH 26/89] cephfs: added locks for {Create,Delete}Volume, NodeStageVolume --- pkg/cephfs/controllerserver.go | 11 +++++++++++ pkg/cephfs/nodeserver.go | 10 +++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 67a342040..a3ea1290e 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -24,6 +24,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/drivers/pkg/csi-common" + "k8s.io/kubernetes/pkg/util/keymutex" "github.com/ceph/ceph-csi/pkg/util" ) @@ -40,6 +41,10 @@ type controllerCacheEntry struct { VolumeID volumeID } +var ( + mtxControllerVolumeID = keymutex.NewHashed(0) +) + // CreateVolume creates the volume in backend and store the volume metadata func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { if err := cs.validateCreateVolumeRequest(req); err != nil { @@ -58,6 +63,9 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol volID := makeVolumeID(req.GetName()) + mtxControllerVolumeID.LockKey(string(volID)) + defer mustUnlock(mtxControllerVolumeID, string(volID)) + // Create a volume in case the user didn't provide one if volOptions.ProvisionVolume { @@ -143,6 +151,9 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol return nil, status.Error(codes.InvalidArgument, err.Error()) } + mtxControllerVolumeID.LockKey(string(volID)) + defer mustUnlock(mtxControllerVolumeID, string(volID)) + if err = purgeVolume(volID, cr, &ce.VolOptions); err != nil { klog.Errorf("failed to delete volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index b9ec7284c..a5ffe1ad3 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -24,6 +24,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/keymutex" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-csi/drivers/pkg/csi-common" @@ -35,6 +36,10 @@ type NodeServer struct { *csicommon.DefaultNodeServer } +var ( + mtxNodeVolumeID = keymutex.NewHashed(0) +) + func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) { var ( cr *credentials @@ -44,7 +49,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi if volOptions.ProvisionVolume { // The volume is provisioned dynamically, get the credentials directly from Ceph - // First, store admin credentials - those are needed for retrieving the user credentials + // First, get admin credentials - those are needed for retrieving the user credentials adminCr, err := getAdminCredentials(secrets) if err != nil { @@ -100,6 +105,9 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol return nil, status.Error(codes.Internal, err.Error()) } + mtxNodeVolumeID.LockKey(string(volID)) + defer mustUnlock(mtxNodeVolumeID, string(volID)) + // Check if the volume is already mounted isMnt, err := isMountPoint(stagingTargetPath) From 2f8931315a570a7183d18db6578af6803fd36783 Mon Sep 17 00:00:00 2001 From: gman Date: Tue, 26 Feb 2019 14:46:21 +0100 Subject: [PATCH 27/89] don't attempt to delete mountpoint if unmount failed --- pkg/cephfs/volume.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index 3103aa74e..7b8dea03a 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -149,9 +149,9 @@ func unmountCephRoot(volID volumeID) { if err := unmountVolume(cephRoot); err != nil { klog.Errorf("failed to unmount %s with error %s", cephRoot, err) - } - - if err := os.Remove(cephRoot); err != nil { - klog.Errorf("failed to remove %s with error %s", cephRoot, err) + } else { + if err := os.Remove(cephRoot); err != nil { + klog.Errorf("failed to remove %s with error %s", cephRoot, err) + } } } From 3b320ef19e54dc89e04b734d4e04acbe802ee51a Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 18 Feb 2019 13:52:52 +0530 Subject: [PATCH 28/89] Add support of RBD list volumes currently all the created volumes are stored in the metadata store, so we can use this information to support list volumes. Signed-off-by: Madhu Rajanna --- pkg/rbd/controllerserver.go | 40 +++++++++++++++++++++++++++++++++++++ pkg/rbd/rbd.go | 1 + pkg/rbd/rbd_util.go | 31 ++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 69b237693..d7d1c566c 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -19,6 +19,7 @@ package rbd import ( "fmt" "os/exec" + "strconv" "syscall" "github.com/ceph/ceph-csi/pkg/util" @@ -275,6 +276,45 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol return &csi.DeleteVolumeResponse{}, nil } +// ListVolumes returns a list of volumes stored in memory +func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { + + if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_VOLUMES); err != nil { + klog.Warningf("invalid list volume req: %v", req) + return nil, err + } + + //validate starting token if present + if len(req.GetStartingToken()) > 0 { + i, parseErr := strconv.ParseUint(req.StartingToken, 10, 32) + if parseErr != nil { + return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error()) + } + //check starting Token is greater than list of rbd volumes + if len(rbdVolumes) < int(i) { + return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error()) + } + } + + var entries []*csi.ListVolumesResponse_Entry + + for _, vol := range rbdVolumes { + entries = append(entries, &csi.ListVolumesResponse_Entry{ + Volume: &csi.Volume{ + VolumeId: vol.VolID, + CapacityBytes: vol.VolSize, + VolumeContext: extractStoredVolOpt(vol), + }, + }) + } + + resp := &csi.ListVolumesResponse{ + Entries: entries, + } + + return resp, nil +} + // ValidateVolumeCapabilities checks whether the volume capabilities requested // are supported. func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index 8740e8c26..bdb97be24 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -98,6 +98,7 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, ca r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + csi.ControllerServiceCapability_RPC_LIST_VOLUMES, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, csi.ControllerServiceCapability_RPC_CLONE_VOLUME, diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 7df546553..e2784ddcd 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -383,6 +383,37 @@ func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string] return nil } +func extractStoredVolOpt(r *rbdVolume) map[string]string { + volOptions := make(map[string]string) + volOptions["pool"] = r.Pool + + if len(r.Monitors) > 0 { + volOptions["monitors"] = r.Monitors + } + + if len(r.MonValueFromSecret) > 0 { + volOptions["monValueFromSecret"] = r.MonValueFromSecret + } + + volOptions["imageFormat"] = r.ImageFormat + + if len(r.ImageFeatures) > 0 { + volOptions["imageFeatures"] = r.ImageFeatures + } + + if len(r.AdminID) > 0 { + volOptions["adminid"] = r.AdminID + } + + if len(r.UserID) > 0 { + volOptions["userid"] = r.UserID + } + if len(r.Mounter) > 0 { + volOptions["mounter"] = r.Mounter + } + return volOptions +} + func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error { var output []byte From e861f12a607bced02704dc86614fcef9a818fb92 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 26 Feb 2019 16:39:39 +0530 Subject: [PATCH 29/89] Fix review comments Signed-off-by: Madhu Rajanna --- pkg/rbd/rbd_util.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index e2784ddcd..8c1bd16eb 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -402,11 +402,11 @@ func extractStoredVolOpt(r *rbdVolume) map[string]string { } if len(r.AdminID) > 0 { - volOptions["adminid"] = r.AdminID + volOptions["adminId"] = r.AdminID } if len(r.UserID) > 0 { - volOptions["userid"] = r.UserID + volOptions["userId"] = r.UserID } if len(r.Mounter) > 0 { volOptions["mounter"] = r.Mounter From 1cedbf0eee6e325da441010f3961289b71b4f8bb Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 26 Feb 2019 18:49:00 +0530 Subject: [PATCH 30/89] sort volume ID's for rbd volume list looping over a map is not guaranteet to be ordered. we need to sort the volume ID's for ListVolume rpc for rbd plugin. Signed-off-by: Madhu Rajanna --- pkg/rbd/controllerserver.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index d7d1c566c..f8a6dd7db 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -19,6 +19,7 @@ package rbd import ( "fmt" "os/exec" + "sort" "strconv" "syscall" @@ -278,7 +279,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol // ListVolumes returns a list of volumes stored in memory func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { - + var startToken int if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_VOLUMES); err != nil { klog.Warningf("invalid list volume req: %v", req) return nil, err @@ -294,16 +295,26 @@ func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume if len(rbdVolumes) < int(i) { return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error()) } + startToken = int(i) } var entries []*csi.ListVolumesResponse_Entry - for _, vol := range rbdVolumes { + keys := make([]string, 0) + for k := range rbdVolumes { + keys = append(keys, k) + } + sort.Strings(keys) + + for index, k := range keys { + if index < startToken { + continue + } entries = append(entries, &csi.ListVolumesResponse_Entry{ Volume: &csi.Volume{ - VolumeId: vol.VolID, - CapacityBytes: vol.VolSize, - VolumeContext: extractStoredVolOpt(vol), + VolumeId: rbdVolumes[k].VolID, + CapacityBytes: rbdVolumes[k].VolSize, + VolumeContext: extractStoredVolOpt(rbdVolumes[k]), }, }) } From 3a0d048186c8a9fce33b2a23ad3b81d1ce358339 Mon Sep 17 00:00:00 2001 From: gman Date: Tue, 26 Feb 2019 17:57:24 +0100 Subject: [PATCH 31/89] cephfs/volumemounter: unmountVolume now waits till the ceph-fuse daemon exits --- pkg/cephfs/volumemounter.go | 60 +++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/pkg/cephfs/volumemounter.go b/pkg/cephfs/volumemounter.go index 3119a2474..7d923d4a5 100644 --- a/pkg/cephfs/volumemounter.go +++ b/pkg/cephfs/volumemounter.go @@ -22,6 +22,10 @@ import ( "fmt" "os" "os/exec" + "strconv" + "sync" + + "k8s.io/klog" ) const ( @@ -31,6 +35,10 @@ const ( var ( availableMounters []string + + // maps a mountpoint to PID of its FUSE daemon + fusePidMap = make(map[string]int) + fusePidMapMtx sync.Mutex ) // Load available ceph mounters installed on system into availableMounters @@ -116,10 +124,36 @@ func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, vo return err } - if !bytes.Contains(stderr, []byte("starting fuse")) { + // Parse the output: + // We need "starting fuse" meaning the mount is ok + // and PID of the ceph-fuse daemon for unmount + + idx := bytes.Index(stderr, []byte("starting fuse")) + if idx < 0 { return fmt.Errorf("ceph-fuse failed: %s", stderr) } + pidParseErr := fmt.Errorf("failed to read FUSE daemon PID: %s", stderr) + + pidEnd := bytes.LastIndexByte(stderr[:idx], ']') + if pidEnd < 0 { + return pidParseErr + } + + pidStart := bytes.LastIndexByte(stderr[:pidEnd], '[') + if pidStart < 0 { + return pidParseErr + } + + pid, err := strconv.Atoi(string(stderr[pidStart+1 : pidEnd])) + if err != nil { + return fmt.Errorf("failed to parse FUSE daemon PID: %v", err) + } + + fusePidMapMtx.Lock() + fusePidMap[mountPoint] = pid + fusePidMapMtx.Unlock() + return nil } @@ -173,7 +207,29 @@ func bindMount(from, to string, readOnly bool) error { } func unmountVolume(mountPoint string) error { - return execCommandErr("umount", mountPoint) + if err := execCommandErr("umount", mountPoint); err != nil { + return err + } + + fusePidMapMtx.Lock() + pid, ok := fusePidMap[mountPoint] + if ok { + delete(fusePidMap, mountPoint) + } + fusePidMapMtx.Unlock() + + if ok { + p, err := os.FindProcess(pid) + if err != nil { + klog.Warningf("failed to find process %d: %v", pid, err) + } else { + if _, err = p.Wait(); err != nil { + klog.Warningf("%d is not a child process: %v", pid, err) + } + } + } + + return nil } func createMountPoint(root string) error { From fdc0d8255a5b91eb2e05809c6e42ba69c7b5e9d2 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 18 Feb 2019 17:00:28 +0530 Subject: [PATCH 32/89] move csi-common to ceph-csi kubernetes/driver/csi-common is no longer maintained. Signed-off-by: Madhu Rajanna --- Gopkg.lock | 18 +- Gopkg.toml | 4 - pkg/cephfs/controllerserver.go | 9 +- pkg/cephfs/driver.go | 6 +- pkg/cephfs/identityserver.go | 3 +- pkg/cephfs/nodeserver.go | 6 +- .../csi-common/controllerserver-default.go | 24 +- .../drivers/pkg => pkg}/csi-common/driver.go | 30 +- .../csi-common/identityserver-default.go | 10 +- .../csi-common/nodeserver-default.go | 16 +- .../drivers/pkg => pkg}/csi-common/server.go | 33 +- .../drivers/pkg => pkg}/csi-common/utils.go | 22 +- pkg/rbd/controllerserver.go | 3 +- pkg/rbd/identityserver.go | 3 +- pkg/rbd/nodeserver.go | 18 +- pkg/rbd/rbd.go | 7 +- vendor/github.com/golang/glog/LICENSE | 191 --- vendor/github.com/golang/glog/glog.go | 1180 ----------------- vendor/github.com/golang/glog/glog_file.go | 124 -- .../github.com/kubernetes-csi/drivers/LICENSE | 201 --- 20 files changed, 102 insertions(+), 1806 deletions(-) rename {vendor/github.com/kubernetes-csi/drivers/pkg => pkg}/csi-common/controllerserver-default.go (78%) rename {vendor/github.com/kubernetes-csi/drivers/pkg => pkg}/csi-common/driver.go (73%) rename {vendor/github.com/kubernetes-csi/drivers/pkg => pkg}/csi-common/identityserver-default.go (86%) rename {vendor/github.com/kubernetes-csi/drivers/pkg => pkg}/csi-common/nodeserver-default.go (72%) rename {vendor/github.com/kubernetes-csi/drivers/pkg => pkg}/csi-common/server.go (75%) rename {vendor/github.com/kubernetes-csi/drivers/pkg => pkg}/csi-common/utils.go (76%) delete mode 100644 vendor/github.com/golang/glog/LICENSE delete mode 100644 vendor/github.com/golang/glog/glog.go delete mode 100644 vendor/github.com/golang/glog/glog_file.go delete mode 100644 vendor/github.com/kubernetes-csi/drivers/LICENSE diff --git a/Gopkg.lock b/Gopkg.lock index 0a46d8f9b..d3270adf6 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -20,14 +20,6 @@ revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7" version = "v1.2.0" -[[projects]] - branch = "master" - digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "NUT" - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - [[projects]] digest = "1:bff0ce7c8e3d6357fa5a8549bbe4bdb620bddc13c11ae569aa7248ea92e2139f" name = "github.com/golang/protobuf" @@ -116,14 +108,6 @@ revision = "5853414e1d4771302e0df10d1870c444c2135799" version = "v0.2.0" -[[projects]] - branch = "master" - digest = "1:0bde3fb932a1aa4e12bc43ef91157fcda27dd0fc5d9f309647544ceaec075f48" - name = "github.com/kubernetes-csi/drivers" - packages = ["pkg/csi-common"] - pruneopts = "NUT" - revision = "05e1ea84df03b90296869812fa42f4244bd5ab53" - [[projects]] digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" name = "github.com/modern-go/concurrent" @@ -534,10 +518,10 @@ "github.com/golang/protobuf/ptypes", "github.com/golang/protobuf/ptypes/timestamp", "github.com/kubernetes-csi/csi-lib-utils/protosanitizer", - "github.com/kubernetes-csi/drivers/pkg/csi-common", "github.com/pborman/uuid", "github.com/pkg/errors", "golang.org/x/net/context", + "google.golang.org/grpc", "google.golang.org/grpc/codes", "google.golang.org/grpc/status", "k8s.io/api/core/v1", diff --git a/Gopkg.toml b/Gopkg.toml index fe76bd62f..04deea00b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -2,10 +2,6 @@ name = "github.com/container-storage-interface/spec" version = "~1.0.0" -[[constraint]] - branch = "master" - name = "github.com/kubernetes-csi/drivers" - [[override]] revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d" name = "github.com/docker/distribution" diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index a3ea1290e..c2e596cbf 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -17,16 +17,15 @@ limitations under the License. package cephfs import ( + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" + "github.com/ceph/ceph-csi/pkg/util" + + "github.com/container-storage-interface/spec/lib/go/csi" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/klog" - - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/drivers/pkg/csi-common" "k8s.io/kubernetes/pkg/util/keymutex" - - "github.com/ceph/ceph-csi/pkg/util" ) // ControllerServer struct of CEPH CSI driver with supported methods of CSI diff --git a/pkg/cephfs/driver.go b/pkg/cephfs/driver.go index d69d11aa7..43a25b740 100644 --- a/pkg/cephfs/driver.go +++ b/pkg/cephfs/driver.go @@ -19,10 +19,10 @@ package cephfs import ( "k8s.io/klog" - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/drivers/pkg/csi-common" - + "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" + + "github.com/container-storage-interface/spec/lib/go/csi" ) const ( diff --git a/pkg/cephfs/identityserver.go b/pkg/cephfs/identityserver.go index 9f3a6b4fd..cf343ca89 100644 --- a/pkg/cephfs/identityserver.go +++ b/pkg/cephfs/identityserver.go @@ -19,8 +19,9 @@ package cephfs import ( "context" + "github.com/ceph/ceph-csi/pkg/csi-common" + "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/drivers/pkg/csi-common" ) // IdentityServer struct of ceph CSI driver with supported methods of CSI diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index a5ffe1ad3..4c97a80f2 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -21,13 +21,13 @@ import ( "fmt" "os" + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" + + "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/keymutex" - - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/drivers/pkg/csi-common" ) // NodeServer struct of ceph CSI driver with supported methods of CSI diff --git a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/controllerserver-default.go b/pkg/csi-common/controllerserver-default.go similarity index 78% rename from vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/controllerserver-default.go rename to pkg/csi-common/controllerserver-default.go index db72dc2e9..0424021b7 100644 --- a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/controllerserver-default.go +++ b/pkg/csi-common/controllerserver-default.go @@ -18,40 +18,33 @@ package csicommon import ( "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/glog" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "k8s.io/klog" ) +// DefaultControllerServer points to default driver type DefaultControllerServer struct { Driver *CSIDriver } -func (cs *DefaultControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -func (cs *DefaultControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - +// ControllerPublishVolume publish volume on node func (cs *DefaultControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { return nil, status.Error(codes.Unimplemented, "") } +// ControllerUnpublishVolume unpublish on node func (cs *DefaultControllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { return nil, status.Error(codes.Unimplemented, "") } -func (cs *DefaultControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - +// ListVolumes lists volumes func (cs *DefaultControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { return nil, status.Error(codes.Unimplemented, "") } +// GetCapacity get volume capacity func (cs *DefaultControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { return nil, status.Error(codes.Unimplemented, "") } @@ -59,21 +52,24 @@ func (cs *DefaultControllerServer) GetCapacity(ctx context.Context, req *csi.Get // ControllerGetCapabilities implements the default GRPC callout. // Default supports all capabilities func (cs *DefaultControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { - glog.V(5).Infof("Using default ControllerGetCapabilities") + klog.V(5).Infof("Using default ControllerGetCapabilities") return &csi.ControllerGetCapabilitiesResponse{ Capabilities: cs.Driver.cap, }, nil } +// CreateSnapshot creates snapshot func (cs *DefaultControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { return nil, status.Error(codes.Unimplemented, "") } +// DeleteSnapshot deletes snapshot func (cs *DefaultControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { return nil, status.Error(codes.Unimplemented, "") } +// ListSnapshots lists snapshosts func (cs *DefaultControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { return nil, status.Error(codes.Unimplemented, "") } diff --git a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/driver.go b/pkg/csi-common/driver.go similarity index 73% rename from vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/driver.go rename to pkg/csi-common/driver.go index d20c6c808..f479fca9c 100644 --- a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/driver.go +++ b/pkg/csi-common/driver.go @@ -19,13 +19,13 @@ package csicommon import ( "fmt" - "github.com/golang/glog" + "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "github.com/container-storage-interface/spec/lib/go/csi" + "k8s.io/klog" ) +// CSIDriver stores driver information type CSIDriver struct { name string nodeID string @@ -34,21 +34,22 @@ type CSIDriver struct { vc []*csi.VolumeCapability_AccessMode } -// Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & -// does not support optional driver plugin info manifest field. Refer to CSI spec for more details. +// NewCSIDriver Creates a NewCSIDriver object. Assumes vendor +// version is equal to driver version & does not support optional +// driver plugin info manifest field. Refer to CSI spec for more details. func NewCSIDriver(name string, v string, nodeID string) *CSIDriver { if name == "" { - glog.Errorf("Driver name missing") + klog.Errorf("Driver name missing") return nil } if nodeID == "" { - glog.Errorf("NodeID missing") + klog.Errorf("NodeID missing") return nil } // TODO version format and validation if len(v) == 0 { - glog.Errorf("Version argument missing") + klog.Errorf("Version argument missing") return nil } @@ -61,6 +62,8 @@ func NewCSIDriver(name string, v string, nodeID string) *CSIDriver { return &driver } +// ValidateControllerServiceRequest validates the controller +// plugin capabilities func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error { if c == csi.ControllerServiceCapability_RPC_UNKNOWN { return nil @@ -71,32 +74,35 @@ func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapa return nil } } - return status.Error(codes.InvalidArgument, fmt.Sprintf("%s", c)) + return status.Error(codes.InvalidArgument, fmt.Sprintf("%s", c)) //nolint } +// AddControllerServiceCapabilities stores the controller capabilities +// in driver object func (d *CSIDriver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) { var csc []*csi.ControllerServiceCapability for _, c := range cl { - glog.Infof("Enabling controller service capability: %v", c.String()) + klog.Infof("Enabling controller service capability: %v", c.String()) csc = append(csc, NewControllerServiceCapability(c)) } d.cap = csc - return } +// AddVolumeCapabilityAccessModes stores volume access modes func (d *CSIDriver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode { var vca []*csi.VolumeCapability_AccessMode for _, c := range vc { - glog.Infof("Enabling volume access mode: %v", c.String()) + klog.Infof("Enabling volume access mode: %v", c.String()) vca = append(vca, NewVolumeCapabilityAccessMode(c)) } d.vc = vca return vca } +// GetVolumeCapabilityAccessModes returns access modes func (d *CSIDriver) GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode { return d.vc } diff --git a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/identityserver-default.go b/pkg/csi-common/identityserver-default.go similarity index 86% rename from vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/identityserver-default.go rename to pkg/csi-common/identityserver-default.go index b16b67c09..a206836ae 100644 --- a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/identityserver-default.go +++ b/pkg/csi-common/identityserver-default.go @@ -18,18 +18,20 @@ package csicommon import ( "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/glog" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "k8s.io/klog" ) +// DefaultIdentityServer stores driver object type DefaultIdentityServer struct { Driver *CSIDriver } +// GetPluginInfo returns plugin information func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { - glog.V(5).Infof("Using default GetPluginInfo") + klog.V(5).Infof("Using default GetPluginInfo") if ids.Driver.name == "" { return nil, status.Error(codes.Unavailable, "Driver name not configured") @@ -45,12 +47,14 @@ func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.Ge }, nil } +// Probe returns empty response func (ids *DefaultIdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) { return &csi.ProbeResponse{}, nil } +// GetPluginCapabilities returns plugin capabilities func (ids *DefaultIdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { - glog.V(5).Infof("Using default capabilities") + klog.V(5).Infof("Using default capabilities") return &csi.GetPluginCapabilitiesResponse{ Capabilities: []*csi.PluginCapability{ { diff --git a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/nodeserver-default.go b/pkg/csi-common/nodeserver-default.go similarity index 72% rename from vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/nodeserver-default.go rename to pkg/csi-common/nodeserver-default.go index cd2355bf2..f24b63d6b 100644 --- a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/nodeserver-default.go +++ b/pkg/csi-common/nodeserver-default.go @@ -18,34 +18,39 @@ package csicommon import ( "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/glog" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "k8s.io/klog" ) +// DefaultNodeServer stores driver object type DefaultNodeServer struct { Driver *CSIDriver } -func (ns *DefaultNodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { +// NodeStageVolume returns unimplemented response +func (ns *DefaultNodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { return nil, status.Error(codes.Unimplemented, "") } -func (ns *DefaultNodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { +// NodeUnstageVolume returns unimplemented response +func (ns *DefaultNodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { return nil, status.Error(codes.Unimplemented, "") } +// NodeGetInfo returns node ID func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - glog.V(5).Infof("Using default NodeGetInfo") + klog.V(5).Infof("Using default NodeGetInfo") return &csi.NodeGetInfoResponse{ NodeId: ns.Driver.nodeID, }, nil } +// NodeGetCapabilities returns RPC unknow capability func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { - glog.V(5).Infof("Using default NodeGetCapabilities") + klog.V(5).Infof("Using default NodeGetCapabilities") return &csi.NodeGetCapabilitiesResponse{ Capabilities: []*csi.NodeServiceCapability{ @@ -60,6 +65,7 @@ func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.N }, nil } +// NodeGetVolumeStats returns volume stats func (ns *DefaultNodeServer) NodeGetVolumeStats(ctx context.Context, in *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { return nil, status.Error(codes.Unimplemented, "") } diff --git a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/server.go b/pkg/csi-common/server.go similarity index 75% rename from vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/server.go rename to pkg/csi-common/server.go index 9d3c99523..caaaa8bb8 100644 --- a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/server.go +++ b/pkg/csi-common/server.go @@ -21,13 +21,12 @@ import ( "os" "sync" - "github.com/golang/glog" - "google.golang.org/grpc" - "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" + "k8s.io/klog" ) -// Defines Non blocking GRPC server interfaces +// NonBlockingGRPCServer defines Non blocking GRPC server interfaces type NonBlockingGRPCServer interface { // Start services at the endpoint Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) @@ -39,6 +38,7 @@ type NonBlockingGRPCServer interface { ForceStop() } +// NewNonBlockingGRPCServer return non-blocking GRPC func NewNonBlockingGRPCServer() NonBlockingGRPCServer { return &nonBlockingGRPCServer{} } @@ -49,44 +49,45 @@ type nonBlockingGRPCServer struct { server *grpc.Server } +// Start start service on endpoint func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { s.wg.Add(1) - go s.serve(endpoint, ids, cs, ns) - - return } +// Wait blocks until the WaitGroup counter func (s *nonBlockingGRPCServer) Wait() { s.wg.Wait() } +// GracefulStop stops the gRPC server gracefully. func (s *nonBlockingGRPCServer) Stop() { s.server.GracefulStop() } +// Stop stops the gRPC server. func (s *nonBlockingGRPCServer) ForceStop() { s.server.Stop() } func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { - proto, addr, err := ParseEndpoint(endpoint) + proto, addr, err := parseEndpoint(endpoint) if err != nil { - glog.Fatal(err.Error()) + klog.Fatal(err.Error()) } if proto == "unix" { addr = "/" + addr - if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", addr, err.Error()) + if e := os.Remove(addr); e != nil && !os.IsNotExist(e) { + klog.Fatalf("Failed to remove %s, error: %s", addr, e.Error()) } } listener, err := net.Listen(proto, addr) if err != nil { - glog.Fatalf("Failed to listen: %v", err) + klog.Fatalf("Failed to listen: %v", err) } opts := []grpc.ServerOption{ @@ -105,8 +106,10 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c csi.RegisterNodeServer(server, ns) } - glog.Infof("Listening for connections on address: %#v", listener.Addr()) - - server.Serve(listener) + klog.Infof("Listening for connections on address: %#v", listener.Addr()) + err = server.Serve(listener) + if err != nil { + klog.Fatalf("Failed to server: %v", err) + } } diff --git a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/utils.go b/pkg/csi-common/utils.go similarity index 76% rename from vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/utils.go rename to pkg/csi-common/utils.go index b39132e0d..c6ebb5ac8 100644 --- a/vendor/github.com/kubernetes-csi/drivers/pkg/csi-common/utils.go +++ b/pkg/csi-common/utils.go @@ -21,44 +21,49 @@ import ( "strings" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/glog" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" "golang.org/x/net/context" "google.golang.org/grpc" + "k8s.io/klog" ) -func ParseEndpoint(ep string) (string, string, error) { +func parseEndpoint(ep string) (string, string, error) { if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") { s := strings.SplitN(ep, "://", 2) if s[1] != "" { return s[0], s[1], nil } } - return "", "", fmt.Errorf("Invalid endpoint: %v", ep) + return "", "", fmt.Errorf("invalid endpoint: %v", ep) } +// NewVolumeCapabilityAccessMode returns volume access mode func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode { return &csi.VolumeCapability_AccessMode{Mode: mode} } +// NewDefaultNodeServer initializes default node server func NewDefaultNodeServer(d *CSIDriver) *DefaultNodeServer { return &DefaultNodeServer{ Driver: d, } } +// NewDefaultIdentityServer initializes default identity servier func NewDefaultIdentityServer(d *CSIDriver) *DefaultIdentityServer { return &DefaultIdentityServer{ Driver: d, } } +// NewDefaultControllerServer initializes default controller server func NewDefaultControllerServer(d *CSIDriver) *DefaultControllerServer { return &DefaultControllerServer{ Driver: d, } } +// NewControllerServiceCapability returns controller capabilities func NewControllerServiceCapability(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability { return &csi.ControllerServiceCapability{ Type: &csi.ControllerServiceCapability_Rpc{ @@ -69,6 +74,7 @@ func NewControllerServiceCapability(cap csi.ControllerServiceCapability_RPC_Type } } +// RunNodePublishServer starts node server func RunNodePublishServer(endpoint string, d *CSIDriver, ns csi.NodeServer) { ids := NewDefaultIdentityServer(d) @@ -77,6 +83,7 @@ func RunNodePublishServer(endpoint string, d *CSIDriver, ns csi.NodeServer) { s.Wait() } +// RunControllerPublishServer starts controller server func RunControllerPublishServer(endpoint string, d *CSIDriver, cs csi.ControllerServer) { ids := NewDefaultIdentityServer(d) @@ -85,6 +92,7 @@ func RunControllerPublishServer(endpoint string, d *CSIDriver, cs csi.Controller s.Wait() } +// RunControllerandNodePublishServer starts both controller and node server func RunControllerandNodePublishServer(endpoint string, d *CSIDriver, cs csi.ControllerServer, ns csi.NodeServer) { ids := NewDefaultIdentityServer(d) @@ -94,13 +102,13 @@ func RunControllerandNodePublishServer(endpoint string, d *CSIDriver, cs csi.Con } func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - glog.V(3).Infof("GRPC call: %s", info.FullMethod) - glog.V(5).Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) + klog.V(3).Infof("GRPC call: %s", info.FullMethod) + klog.V(5).Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) resp, err := handler(ctx, req) if err != nil { - glog.Errorf("GRPC error: %v", err) + klog.Errorf("GRPC error: %v", err) } else { - glog.V(5).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp)) + klog.V(5).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp)) } return resp, err } diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index f8a6dd7db..f5eb1400a 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -23,12 +23,13 @@ import ( "strconv" "syscall" + "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/timestamp" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" - "github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/pborman/uuid" "golang.org/x/net/context" "google.golang.org/grpc/codes" diff --git a/pkg/rbd/identityserver.go b/pkg/rbd/identityserver.go index 856bc50a8..155586b31 100644 --- a/pkg/rbd/identityserver.go +++ b/pkg/rbd/identityserver.go @@ -19,8 +19,9 @@ package rbd import ( "context" + "github.com/ceph/ceph-csi/pkg/csi-common" + "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/drivers/pkg/csi-common" ) // IdentityServer struct of rbd CSI driver with supported methods of CSI diff --git a/pkg/rbd/nodeserver.go b/pkg/rbd/nodeserver.go index d1eb374c1..21d7ae829 100644 --- a/pkg/rbd/nodeserver.go +++ b/pkg/rbd/nodeserver.go @@ -23,16 +23,14 @@ import ( "regexp" "strings" - "golang.org/x/net/context" - "k8s.io/klog" + "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/container-storage-interface/spec/lib/go/csi" + "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" - - "github.com/kubernetes-csi/drivers/pkg/csi-common" ) // NodeServer struct of ceph rbd driver with supported methods of CSI @@ -45,16 +43,6 @@ type NodeServer struct { //TODO remove both stage and unstage methods //once https://github.com/kubernetes-csi/drivers/pull/145 is merged -// NodeStageVolume returns unimplemented response -func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - -// NodeUnstageVolume returns unimplemented response -func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") -} - // NodePublishVolume mounts the volume mounted to the device path to the target // path func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index bdb97be24..73911aec4 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -17,12 +17,11 @@ limitations under the License. package rbd import ( - "k8s.io/klog" - + "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/drivers/pkg/csi-common" + "github.com/container-storage-interface/spec/lib/go/csi" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/nsenter" "k8s.io/utils/exec" diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 54bd7afdc..000000000 --- a/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1180 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d281..000000000 --- a/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/vendor/github.com/kubernetes-csi/drivers/LICENSE b/vendor/github.com/kubernetes-csi/drivers/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/kubernetes-csi/drivers/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. From 6f4f148d3bf8601223eee9d0cae72afa832baf8b Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 19 Feb 2019 19:19:40 +0530 Subject: [PATCH 33/89] remove glog Signed-off-by: Madhu Rajanna --- cmd/cephfs/main.go | 10 +++++++++- cmd/rbd/main.go | 10 +++++++++- pkg/util/log.go | 45 --------------------------------------------- 3 files changed, 18 insertions(+), 47 deletions(-) delete mode 100644 pkg/util/log.go diff --git a/cmd/cephfs/main.go b/cmd/cephfs/main.go index 719887763..8ee9141e6 100644 --- a/cmd/cephfs/main.go +++ b/cmd/cephfs/main.go @@ -22,6 +22,7 @@ import ( "github.com/ceph/ceph-csi/pkg/cephfs" "github.com/ceph/ceph-csi/pkg/util" + "k8s.io/klog" ) var ( @@ -32,8 +33,15 @@ var ( metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") ) +func init() { + klog.InitFlags(nil) + if err := flag.Set("logtostderr", "true"); err != nil { + klog.Exitf("failed to set logtostderr flag: %v", err) + } + flag.Parse() +} + func main() { - util.InitLogging() cp, err := util.CreatePersistanceStorage(cephfs.PluginFolder, *metadataStorage, *driverName) if err != nil { diff --git a/cmd/rbd/main.go b/cmd/rbd/main.go index 6aec53ea7..6f61c637f 100644 --- a/cmd/rbd/main.go +++ b/cmd/rbd/main.go @@ -22,6 +22,7 @@ import ( "github.com/ceph/ceph-csi/pkg/rbd" "github.com/ceph/ceph-csi/pkg/util" + "k8s.io/klog" ) var ( @@ -32,8 +33,15 @@ var ( metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") ) +func init() { + klog.InitFlags(nil) + if err := flag.Set("logtostderr", "true"); err != nil { + klog.Exitf("failed to set logtostderr flag: %v", err) + } + flag.Parse() +} + func main() { - util.InitLogging() cp, err := util.CreatePersistanceStorage(rbd.PluginFolder, *metadataStorage, *driverName) if err != nil { diff --git a/pkg/util/log.go b/pkg/util/log.go deleted file mode 100644 index 3d500aaeb..000000000 --- a/pkg/util/log.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "flag" - "os" - - "k8s.io/klog" -) - -// InitLogging initializes klog alongside glog -// XXX: This is just a temporary solution till all deps move to klog -func InitLogging() { - if err := flag.Set("logtostderr", "true"); err != nil { - klog.Errorf("failed to set logtostderr flag: %v", err) - os.Exit(1) - } - - flag.Parse() - - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - - // Sync klog flags with glog - flag.CommandLine.VisitAll(func(f1 *flag.Flag) { - if f2 := klogFlags.Lookup(f1.Name); f2 != nil { - f2.Value.Set(f1.Value.String()) // nolint: errcheck, gosec - } - }) -} From c9815e99a969ada12c8b27a10b7b2fe5e82d2ef7 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 27 Feb 2019 16:38:20 +0530 Subject: [PATCH 34/89] Fix rbac issue in cephfs plugin remove unwanted rules and update rbac to have permission to modify endpoints and configmaps in the current namespace. Signed-off-by: Madhu Rajanna --- .../cephfs/kubernetes/csi-attacher-rbac.yaml | 3 -- .../kubernetes/csi-nodeplugin-rbac.yaml | 3 -- .../kubernetes/csi-provisioner-rbac.yaml | 38 ++++++++++++++++--- 3 files changed, 32 insertions(+), 12 deletions(-) diff --git a/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml b/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml index 94dc8f7a8..e0bcbd81a 100644 --- a/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml @@ -10,9 +10,6 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: cephfs-external-attacher-runner rules: - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "update"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "update"] diff --git a/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml b/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml index 7e6b075b6..cc2919b0e 100644 --- a/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml @@ -22,9 +22,6 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list"] --- kind: ClusterRoleBinding diff --git a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml index 796dc86b0..7bf0da300 100644 --- a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml @@ -22,12 +22,6 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "create", "delete"] --- kind: ClusterRoleBinding @@ -42,3 +36,35 @@ roleRef: kind: ClusterRole name: cephfs-external-provisioner-runner apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # replace with non-default namespace name + namespace: default + name: cephfs-external-provisioner-cfg +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "create", "delete"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role-cfg + # replace with non-default namespace name + namespace: default +subjects: + - kind: ServiceAccount + name: cephfs-csi-provisioner + # replace with non-default namespace name + namespace: default +roleRef: + kind: Role + name: cephfs-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io From 119504c0045dee877b4b0298ee3f532d3ca690ec Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 27 Feb 2019 16:44:46 +0530 Subject: [PATCH 35/89] Add role and rolebinding for cephfs Signed-off-by: Madhu Rajanna --- .../helm/templates/provisioner-role.yaml | 19 +++++++++++++++++ .../templates/provisioner-rolebinding.yaml | 21 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 deploy/cephfs/helm/templates/provisioner-role.yaml create mode 100644 deploy/cephfs/helm/templates/provisioner-rolebinding.yaml diff --git a/deploy/cephfs/helm/templates/provisioner-role.yaml b/deploy/cephfs/helm/templates/provisioner-role.yaml new file mode 100644 index 000000000..c6f28c40e --- /dev/null +++ b/deploy/cephfs/helm/templates/provisioner-role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "delete"] +{{- end -}} diff --git a/deploy/cephfs/helm/templates/provisioner-rolebinding.yaml b/deploy/cephfs/helm/templates/provisioner-rolebinding.yaml new file mode 100644 index 000000000..63dc9503b --- /dev/null +++ b/deploy/cephfs/helm/templates/provisioner-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + labels: + app: {{ include "ceph-csi-cephfs.name" . }} + chart: {{ include "ceph-csi-cephfs.chart" . }} + component: {{ .Values.provisioner.name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }} + apiGroup: rbac.authorization.k8s.io + namespace: {{ .Release.Namespace }} +{{- end -}} From f4a0726226c4180c5173dd36b024fabab699c5c8 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 27 Feb 2019 12:15:40 +0530 Subject: [PATCH 36/89] Fix rbac issue in rbd plugin remove unwanted rules and update rbac to have permission to modify endpoints and configmaps in the current namespace. Signed-off-by: Madhu Rajanna --- deploy/rbd/kubernetes/csi-attacher-rbac.yaml | 3 -- .../rbd/kubernetes/csi-provisioner-rbac.yaml | 41 +++++++++++++++---- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/deploy/rbd/kubernetes/csi-attacher-rbac.yaml b/deploy/rbd/kubernetes/csi-attacher-rbac.yaml index 7160e293e..aaa596721 100644 --- a/deploy/rbd/kubernetes/csi-attacher-rbac.yaml +++ b/deploy/rbd/kubernetes/csi-attacher-rbac.yaml @@ -10,9 +10,6 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-external-attacher-runner rules: - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "update"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "update"] diff --git a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml index 71ef4f160..cfd113591 100644 --- a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml @@ -22,18 +22,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "create", "update"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "create", "delete"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] verbs: ["create", "get", "list", "watch", "update", "delete"] @@ -57,3 +48,35 @@ roleRef: kind: ClusterRole name: rbd-external-provisioner-runner apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # replace with non-default namespace name + namespace: default + name: rbd-external-provisioner-cfg +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "delete"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role-cfg + # replace with non-default namespace name + namespace: default +subjects: + - kind: ServiceAccount + name: rbd-csi-provisioner + # replace with non-default namespace name + namespace: default +roleRef: + kind: Role + name: rbd-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io From b629b22cf065b05654c8a45f5a43322d74406b29 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 27 Feb 2019 19:29:11 +0530 Subject: [PATCH 37/89] Add csinodeinfos rules Signed-off-by: Madhu Rajanna --- deploy/cephfs/helm/templates/attacher-clusterrole.yaml | 3 +++ deploy/cephfs/helm/templates/provisioner-clusterrole.yaml | 3 +++ deploy/cephfs/kubernetes/csi-attacher-rbac.yaml | 3 +++ deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml | 3 +++ 4 files changed, 12 insertions(+) diff --git a/deploy/cephfs/helm/templates/attacher-clusterrole.yaml b/deploy/cephfs/helm/templates/attacher-clusterrole.yaml index 2f70448e2..a66256500 100644 --- a/deploy/cephfs/helm/templates/attacher-clusterrole.yaml +++ b/deploy/cephfs/helm/templates/attacher-clusterrole.yaml @@ -22,4 +22,7 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] {{- end -}} diff --git a/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml b/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml index 590521ab2..6e6721cbf 100644 --- a/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml +++ b/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml @@ -28,4 +28,7 @@ rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list", "create", "delete"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] {{- end -}} diff --git a/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml b/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml index e0bcbd81a..3b16a8ea2 100644 --- a/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-attacher-rbac.yaml @@ -19,6 +19,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding diff --git a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml index 7bf0da300..823b16a9b 100644 --- a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml @@ -22,6 +22,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding From 2ab1f3e82d97c57fe4e4590cb5109808f05410e0 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 27 Feb 2019 19:32:07 +0530 Subject: [PATCH 38/89] add csinodeinfos rules Signed-off-by: Madhu Rajanna --- deploy/rbd/helm/templates/attacher-clusterrole.yaml | 3 +++ deploy/rbd/helm/templates/provisioner-clusterrole.yaml | 3 +++ deploy/rbd/kubernetes/csi-attacher-rbac.yaml | 3 +++ deploy/rbd/kubernetes/csi-provisioner-rbac.yaml | 3 +++ 4 files changed, 12 insertions(+) diff --git a/deploy/rbd/helm/templates/attacher-clusterrole.yaml b/deploy/rbd/helm/templates/attacher-clusterrole.yaml index 3ebc0438d..59507abc3 100644 --- a/deploy/rbd/helm/templates/attacher-clusterrole.yaml +++ b/deploy/rbd/helm/templates/attacher-clusterrole.yaml @@ -22,4 +22,7 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] {{- end -}} diff --git a/deploy/rbd/helm/templates/provisioner-clusterrole.yaml b/deploy/rbd/helm/templates/provisioner-clusterrole.yaml index 1891dc6b1..1c473e6e9 100644 --- a/deploy/rbd/helm/templates/provisioner-clusterrole.yaml +++ b/deploy/rbd/helm/templates/provisioner-clusterrole.yaml @@ -43,4 +43,7 @@ rules: - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["create"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] {{- end -}} diff --git a/deploy/rbd/kubernetes/csi-attacher-rbac.yaml b/deploy/rbd/kubernetes/csi-attacher-rbac.yaml index aaa596721..e502da5c9 100644 --- a/deploy/rbd/kubernetes/csi-attacher-rbac.yaml +++ b/deploy/rbd/kubernetes/csi-attacher-rbac.yaml @@ -19,6 +19,9 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding diff --git a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml index cfd113591..c465aebd6 100644 --- a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml @@ -34,6 +34,9 @@ rules: - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["create"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding From b7790097705105290157ec40db50b00fda4fc96d Mon Sep 17 00:00:00 2001 From: gman Date: Wed, 27 Feb 2019 20:29:20 +0100 Subject: [PATCH 39/89] parse ceph-fuse PID with regexp --- pkg/cephfs/volumemounter.go | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/pkg/cephfs/volumemounter.go b/pkg/cephfs/volumemounter.go index 7d923d4a5..035a161a4 100644 --- a/pkg/cephfs/volumemounter.go +++ b/pkg/cephfs/volumemounter.go @@ -17,11 +17,11 @@ limitations under the License. package cephfs import ( - "bytes" "errors" "fmt" "os" "os/exec" + "regexp" "strconv" "sync" @@ -39,6 +39,8 @@ var ( // maps a mountpoint to PID of its FUSE daemon fusePidMap = make(map[string]int) fusePidMapMtx sync.Mutex + + fusePidRx = regexp.MustCompile(`(?m)^ceph-fuse\[(.+)\]: starting fuse$`) ) // Load available ceph mounters installed on system into availableMounters @@ -128,24 +130,12 @@ func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, vo // We need "starting fuse" meaning the mount is ok // and PID of the ceph-fuse daemon for unmount - idx := bytes.Index(stderr, []byte("starting fuse")) - if idx < 0 { + match := fusePidRx.FindSubmatch(stderr) + if len(match) != 2 { return fmt.Errorf("ceph-fuse failed: %s", stderr) } - pidParseErr := fmt.Errorf("failed to read FUSE daemon PID: %s", stderr) - - pidEnd := bytes.LastIndexByte(stderr[:idx], ']') - if pidEnd < 0 { - return pidParseErr - } - - pidStart := bytes.LastIndexByte(stderr[:pidEnd], '[') - if pidStart < 0 { - return pidParseErr - } - - pid, err := strconv.Atoi(string(stderr[pidStart+1 : pidEnd])) + pid, err := strconv.Atoi(string(match[1])) if err != nil { return fmt.Errorf("failed to parse FUSE daemon PID: %v", err) } From eb14742874681ebc15e0cafe3e8bf1449df48726 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 28 Feb 2019 13:41:11 +0530 Subject: [PATCH 40/89] bump helm chat version from 0.4.0 to 0.5.0 Signed-off-by: Madhu Rajanna --- deploy/cephfs/helm/Chart.yaml | 2 +- deploy/rbd/helm/Chart.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/cephfs/helm/Chart.yaml b/deploy/cephfs/helm/Chart.yaml index d3e94d26d..40b9f3cc5 100644 --- a/deploy/cephfs/helm/Chart.yaml +++ b/deploy/cephfs/helm/Chart.yaml @@ -4,7 +4,7 @@ appVersion: "1.0.0" description: "Container Storage Interface (CSI) driver, provisioner, and attacher for Ceph cephfs" name: ceph-csi-cephfs -version: 0.4.0 +version: 0.5.0 keywords: - ceph - cephfs diff --git a/deploy/rbd/helm/Chart.yaml b/deploy/rbd/helm/Chart.yaml index d91c4e041..26185a91e 100644 --- a/deploy/rbd/helm/Chart.yaml +++ b/deploy/rbd/helm/Chart.yaml @@ -4,7 +4,7 @@ appVersion: "1.0.0" description: "Container Storage Interface (CSI) driver, provisioner, snapshotter, and attacher for Ceph RBD" name: ceph-csi-rbd -version: 0.4.0 +version: 0.5.0 keywords: - ceph - rbd From b5b8e4646094d0ec1f3dfe96060a183c863d7d1a Mon Sep 17 00:00:00 2001 From: j-griffith Date: Fri, 1 Mar 2019 10:45:27 -0700 Subject: [PATCH 41/89] Add multiNodeWritable option for RBD Volumes This change adds the ability to define a `multiNodeWritable` option in the Storage Class. This change does a number of things: 1. Allow multi-node-multi-writer access modes if the SC options is enabled 2. Bypass the watcher checks for MultiNodeMultiWriter Volumes 3. Maintains existing watcher checks for SingleNodeWriter access modes regardless of the StorageClass option. fix lint-errors --- Makefile | 2 +- docs/deploy-rbd.md | 15 +++++ examples/README.md | 102 +++++++++++++++++++++++++++++++++ examples/rbd/storageclass.yaml | 3 + pkg/rbd/controllerserver.go | 27 +++++++-- pkg/rbd/nodeserver.go | 10 +++- pkg/rbd/rbd.go | 7 ++- pkg/rbd/rbd_attach.go | 8 +++ pkg/rbd/rbd_util.go | 9 ++- 9 files changed, 175 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 734a761b8..825ed8b66 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ go-test: ./scripts/test-go.sh static-check: - ./scripts/lint-go.sh + ./scripts/lint-go.sh ./scripts/lint-text.sh rbdplugin: diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index bfaa46bb9..acfde7d46 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -58,6 +58,21 @@ Parameter | Required | Description `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value `csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | for Kubernetes | namespaces of the above Secret objects `mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images +`fsType` | no | allows setting to `ext3 | ext-4 | xfs`, default is `ext-4` +`multiNodeWritable` | no | if set to `enabled` allows RBD volumes with MultiNode Access Modes to bypass watcher checks. By default multiple attachments of an RBD volume are NOT allowed. Even if this option is set in the StorageClass, it's ignored if a standard SingleNodeWriter Access Mode is requested + +**Warning for multiNodeWritable:** + +*NOTE* the `multiNodeWritable` setting is NOT safe for use by workloads +that are not designed to coordinate access. This does NOT add any sort +of a clustered filesystem or write syncronization, it's specifically for +special workloads that handle access coordination on their own +(ie Active/Passive scenarios). + +Using this mode for general purposes *WILL RESULT IN DATA CORRUPTION*. +We attempt to limit exposure to trouble here but ignoring the Storage Class +setting unless your Volume explicitly asks for multi node access, and assume +you know what you're doing. **Required secrets:** diff --git a/examples/README.md b/examples/README.md index d309cdcaf..94b239b26 100644 --- a/examples/README.md +++ b/examples/README.md @@ -114,3 +114,105 @@ To restore the snapshot to a new PVC, deploy kubectl create -f pvc-restore.yaml kubectl create -f pod-restore.yaml ``` + +## How to enable multi node attach support for RBD + +*WARNING* This feature is strictly for workloads that know how to deal +with concurrent acces to the Volume (eg Active/Passive applications). +Using RWX modes on non clustered file systems with applications trying +to simultaneously access the Volume will likely result in data corruption! + +### Example process to test the multiNodeWritable feature + +Modify your current storage class, or create a new storage class specifically +for multi node writers by adding the `multiNodeWritable: "enabled"` entry to +your parameters. Here's an example: + +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-rbd +provisioner: csi-rbdplugin +parameters: + monitors: rook-ceph-mon-b.rook-ceph.svc.cluster.local:6789 + pool: rbd + imageFormat: "2" + imageFeatures: layering + csiProvisionerSecretName: csi-rbd-secret + csiProvisionerSecretNamespace: default + csiNodePublishSecretName: csi-rbd-secret + csiNodePublishSecretNamespace: default + adminid: admin + userid: admin + fsType: xfs + multiNodeWritable: "enabled" +reclaimPolicy: Delete +``` + +Now, you can request Claims from the configured storage class that include +the `ReadWriteMany` access mode: + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-1 +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + storageClassName: csi-rbd +``` + +Create a POD that uses this PVC: + +``` +apiVersion: v1 +kind: Pod +metadata: + name: test-1 +spec: + containers: + - name: web-server + image: nginx + volumeMounts: + - name: mypvc + mountPath: /var/lib/www/html + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: pvc-1 + readOnly: false +``` + +Wait for the POD to enter Running state, write some data to +`/var/lib/www/html` + +Now, we can create a second POD (ensure the POD is scheduled on a different +node; multiwriter single node works without this feature) that also uses this +PVC at the same time + +``` +apiVersion: v1 +kind: Pod +metadata: + name: test-2 +spec: + containers: + - name: web-server + image: nginx + volumeMounts: + - name: mypvc + mountPath: /var/lib/www/html + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: pvc-1 + readOnly: false +``` + +If you access the pod you can check that your data is avaialable at +`/var/lib/www/html` diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index 320a489a8..f7de85f61 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -35,4 +35,7 @@ parameters: userid: kubernetes # uncomment the following to use rbd-nbd as mounter on supported nodes # mounter: rbd-nbd + # fsType: xfs + # uncomment the following line to enable multi-attach on RBD volumes + # multiNodeWritable: enabled reclaimPolicy: Delete diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index f5eb1400a..598451f9e 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -21,6 +21,7 @@ import ( "os/exec" "sort" "strconv" + "strings" "syscall" "github.com/ceph/ceph-csi/pkg/csi-common" @@ -92,7 +93,16 @@ func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) erro func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { // TODO (sbezverk) Last check for not exceeding total storage capacity - rbdVol, err := getRBDVolumeOptions(req.GetParameters()) + // MultiNodeWriters are accepted but they're only for special cases, and we skip the watcher checks for them which isn't the greatest + // let's make sure we ONLY skip that if the user is requesting a MULTI Node accessible mode + disableMultiWriter := true + for _, am := range req.VolumeCapabilities { + if am.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { + disableMultiWriter = false + } + } + + rbdVol, err := getRBDVolumeOptions(req.GetParameters(), disableMultiWriter) if err != nil { return nil, err } @@ -330,11 +340,20 @@ func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume // ValidateVolumeCapabilities checks whether the volume capabilities requested // are supported. func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { - for _, cap := range req.VolumeCapabilities { - if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { - return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil + params := req.GetParameters() + multiWriter := params["multiNodeWritable"] + if strings.ToLower(multiWriter) == "enabled" { + klog.V(3).Info("detected multiNodeWritable parameter in Storage Class, allowing multi-node access modes") + + } else { + for _, cap := range req.VolumeCapabilities { + if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { + return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil + } } + } + return &csi.ValidateVolumeCapabilitiesResponse{ Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ VolumeCapabilities: req.VolumeCapabilities, diff --git a/pkg/rbd/nodeserver.go b/pkg/rbd/nodeserver.go index 21d7ae829..6f6160a4b 100644 --- a/pkg/rbd/nodeserver.go +++ b/pkg/rbd/nodeserver.go @@ -70,10 +70,18 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis if !notMnt { return &csi.NodePublishVolumeResponse{}, nil } - volOptions, err := getRBDVolumeOptions(req.GetVolumeContext()) + + ignoreMultiWriterEnabled := true + if req.VolumeCapability.AccessMode.Mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { + ignoreMultiWriterEnabled = false + } + + volOptions, err := getRBDVolumeOptions(req.GetVolumeContext(), ignoreMultiWriterEnabled) if err != nil { return nil, err } + // Check access mode settings in the request, even if SC is RW-Many, if the request is a normal Single Writer volume, we ignore this setting and proceed as normal + volOptions.VolName = volName // Mapping RBD image devicePath, err := attachRBDImage(volOptions, volOptions.UserID, req.GetSecrets()) diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index 73911aec4..c7b2eab89 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -102,7 +102,12 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, ca csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, csi.ControllerServiceCapability_RPC_CLONE_VOLUME, }) - r.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER}) + + // TODO: JDG Should also look at remaining modes like MULT_NODE_READER (SINGLE_READER) + r.cd.AddVolumeCapabilityAccessModes( + []csi.VolumeCapability_AccessMode_Mode{ + csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}) // Create GRPC servers r.ids = NewIdentityServer(r.cd) diff --git a/pkg/rbd/rbd_attach.go b/pkg/rbd/rbd_attach.go index 354554d12..88834757b 100644 --- a/pkg/rbd/rbd_attach.go +++ b/pkg/rbd/rbd_attach.go @@ -313,8 +313,16 @@ func waitForrbdImage(backoff wait.Backoff, volOptions *rbdVolume, userID string, if err != nil { return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) } + // In the case of multiattach we want to short circuit the retries when used (so r`if used; return used`) + // otherwise we're setting this to false which translates to !ok, which means backoff and try again + // NOTE: we ONLY do this if an multi-node access mode is requested for this volume + if (strings.ToLower(volOptions.MultiNodeWritable) == "enabled") && (used) { + klog.V(2).Info("detected MultiNodeWritable enabled, ignoring watcher in-use result") + return used, nil + } return !used, nil }) + // return error if rbd image has not become available for the specified timeout if err == wait.ErrWaitTimeout { return fmt.Errorf("rbd image %s is still being used", imagePath) diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 8c1bd16eb..36f655ec5 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -51,6 +51,7 @@ type rbdVolume struct { AdminID string `json:"adminId"` UserID string `json:"userId"` Mounter string `json:"mounter"` + MultiNodeWritable string `json:"multiNodeWritable"` } type rbdSnapshot struct { @@ -226,7 +227,7 @@ func execCommand(command string, args []string) ([]byte, error) { return cmd.CombinedOutput() } -func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) { +func getRBDVolumeOptions(volOptions map[string]string, ignoreMultiNodeWritable bool) (*rbdVolume, error) { var ok bool rbdVol := &rbdVolume{} rbdVol.Pool, ok = volOptions["pool"] @@ -260,6 +261,12 @@ func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) { } getCredsFromVol(rbdVol, volOptions) + + klog.V(3).Infof("ignoreMultiNodeWritable flag in parse getRBDVolumeOptions is: %v", ignoreMultiNodeWritable) + // If the volume we're working with is NOT requesting multi-node attach then don't treat it special, ignore the setting in the SC and just keep our watcher checks + if !ignoreMultiNodeWritable { + rbdVol.MultiNodeWritable = volOptions["multiNodeWritable"] + } return rbdVol, nil } From 72edf069160d935aed38950ac77f3aad7d25bc3c Mon Sep 17 00:00:00 2001 From: j-griffith Date: Fri, 1 Mar 2019 15:07:07 -0700 Subject: [PATCH 42/89] Fix obsolete comment Had a stray comment that got left behind during refactoring, move it back to where it's relevant and reword it a bit to fit the finished product. --- pkg/rbd/nodeserver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/rbd/nodeserver.go b/pkg/rbd/nodeserver.go index 6f6160a4b..2faed49c1 100644 --- a/pkg/rbd/nodeserver.go +++ b/pkg/rbd/nodeserver.go @@ -71,6 +71,8 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis return &csi.NodePublishVolumeResponse{}, nil } + // if our access mode is a simple SINGLE_NODE_WRITER we're going to ignore the SC directive and use the + // watcher still ignoreMultiWriterEnabled := true if req.VolumeCapability.AccessMode.Mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { ignoreMultiWriterEnabled = false @@ -80,7 +82,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis if err != nil { return nil, err } - // Check access mode settings in the request, even if SC is RW-Many, if the request is a normal Single Writer volume, we ignore this setting and proceed as normal volOptions.VolName = volName // Mapping RBD image From 1018eda27a3303e853f0cce708edd93de7c6c440 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 4 Mar 2019 09:16:31 +0530 Subject: [PATCH 43/89] replace gometalinter with golangci gometalinter is being deprecated in favor of golangci. switching to golangci in ci and make test Signed-off-by: Madhu Rajanna --- .travis.yml | 12 ++-- scripts/golangci.yml | 142 +++++++++++++++++++++++++++++++++++++++++++ scripts/lint-go.sh | 9 +-- 3 files changed, 151 insertions(+), 12 deletions(-) create mode 100644 scripts/golangci.yml diff --git a/.travis.yml b/.travis.yml index 9c4d7f76a..8175a7ef2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,7 +18,7 @@ go: 1.11.x env: global: - - GO_METALINTER_VERSION="v3.0.0" + - GOLANGCI_VERSION="v1.15.0" - TEST_COVERAGE=stdout - GO_METALINTER_THREADS=1 - GO_COVER_DIR=_output @@ -30,10 +30,10 @@ jobs: - gem install mdl - pip install --user --upgrade pip - pip install --user yamllint - # install gometalinter - - curl -L - "https://raw.githubusercontent.com/alecthomas/gometalinter/"${GO_METALINTER_VERSION}"/scripts/install.sh" - | bash -s -- -b $GOPATH/bin "${GO_METALINTER_VERSION}" + # install golangci-lint + - curl -sf + "https://install.goreleaser.com/github.com/golangci/golangci-lint.sh" + | bash -s -- -b $GOPATH/bin "${GOLANGCI_VERSION}" script: - scripts/lint-text.sh --require-all - scripts/lint-go.sh @@ -49,6 +49,6 @@ jobs: deploy: - provider: script - on: # yamllint disable-line rule:truthy + on: # yamllint disable-line rule:truthy all_branches: true script: ./deploy.sh diff --git a/scripts/golangci.yml b/scripts/golangci.yml new file mode 100644 index 000000000..3675e4b89 --- /dev/null +++ b/scripts/golangci.yml @@ -0,0 +1,142 @@ +--- +# https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml +# This file contains all available configuration options +# with their default values. + +# options for analysis running +run: + # default concurrency is a available CPU number + concurrency: 4 + + # timeout for analysis, e.g. 30s, 5m, default is 1m + deadline: 10m + + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + + # include test files or not, default is true + tests: true + + # which dirs to skip: they won't be analyzed; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but next dirs are always skipped independently + # from this option's value: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs: + - vendor$ + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + skip-files: + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: true + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: true + + # path to a file containing a list of functions to exclude from checking + # see https://github.com/kisielk/errcheck#excluding-functions for details + #exclude: /path/to/file.txt + govet: + # report about shadowed variables + check-shadowing: true + golint: + # minimal confidence for issues, default is 0.8 + min-confidence: 0 + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: github.com/ceph/csph-csi + gocyclo: + # minimal code complexity to report, 30 by default (but we recommend 10-20) + min-complexity: 20 + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + dupl: + # tokens count to trigger issue, 150 by default + threshold: 100 + goconst: + # minimal length of string constant, 3 by default + min-len: 3 + # minimal occurrences count to trigger, 3 by default + min-occurrences: 3 + depguard: + list-type: blacklist + include-go-root: false + packages: + - github.com/davecgh/go-spew/spew + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US + ignore-words: + - someword + lll: + # max line length, lines longer will be reported. Default is 120. + # '\t' is counted as 1 character by default, and can be changed with the + # tab-width option + # TODO make line length to 120 char + line-length: 180 + # tab width in spaces. Default to 1. + tab-width: 1 + unused: + # treat code as a program (not a library) and report unused exported identifiers; default is false. + # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find funcs usages. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + unparam: + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 30 + +linters: + enable: + - megacheck + - govet + - golint + - stylecheck + - interfacer + - unconvert + - gofmt + - gocyclo + - maligned + - lll + - nakedret + enable-all: false + disable: + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/scripts/lint-go.sh b/scripts/lint-go.sh index 0f9d49d18..c55112749 100755 --- a/scripts/lint-go.sh +++ b/scripts/lint-go.sh @@ -2,11 +2,8 @@ set -o pipefail -if [[ -x "$(command -v gometalinter)" ]]; then - gometalinter -j "${GO_METALINTER_THREADS:-1}" \ - --sort path --sort line --sort column --deadline=10m \ - --enable=misspell --enable=staticcheck \ - --vendor "${@-./...}" +if [[ -x "$(command -v golangci-lint)" ]]; then + golangci-lint --config=scripts/golangci.yml run ./... -v else - echo "WARNING: gometalinter not found, skipping lint tests" >&2 + echo "WARNING: golangci-lint not found, skipping lint tests" >&2 fi From 8f07c9efcc5132051667ae6333b8c0dfb9026dd9 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 4 Mar 2019 09:40:44 +0530 Subject: [PATCH 44/89] remove unused param from function Signed-off-by: Madhu Rajanna --- pkg/cephfs/nodeserver.go | 2 +- pkg/cephfs/volume.go | 2 +- pkg/cephfs/volumemounter.go | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index 4c97a80f2..51c44933a 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -150,7 +150,7 @@ func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequ klog.V(4).Infof("cephfs: mounting volume %s with %s", volID, m.name()) - if err = m.mount(stagingTargetPath, cr, volOptions, volID); err != nil { + if err = m.mount(stagingTargetPath, cr, volOptions); err != nil { klog.Errorf("failed to mount volume %s: %v", volID, err) return status.Error(codes.Internal, err.Error()) } diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index 7b8dea03a..caf9887dc 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -137,7 +137,7 @@ func mountCephRoot(volID volumeID, volOptions *volumeOptions, adminCr *credentia return fmt.Errorf("failed to create mounter: %v", err) } - if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil { + if err = m.mount(cephRoot, adminCr, volOptions); err != nil { return fmt.Errorf("error mounting ceph root: %v", err) } diff --git a/pkg/cephfs/volumemounter.go b/pkg/cephfs/volumemounter.go index 035a161a4..6a91afafc 100644 --- a/pkg/cephfs/volumemounter.go +++ b/pkg/cephfs/volumemounter.go @@ -67,7 +67,7 @@ func loadAvailableMounters() error { } type volumeMounter interface { - mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error + mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error name() string } @@ -111,7 +111,7 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) { type fuseMounter struct{} -func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error { +func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions) error { args := [...]string{ mountPoint, "-m", volOptions.Monitors, @@ -147,19 +147,19 @@ func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, vo return nil } -func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error { +func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error { if err := createMountPoint(mountPoint); err != nil { return err } - return mountFuse(mountPoint, cr, volOptions, volID) + return mountFuse(mountPoint, cr, volOptions) } func (m *fuseMounter) name() string { return "Ceph FUSE driver" } type kernelMounter struct{} -func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error { +func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions) error { if err := execCommandErr("modprobe", "ceph"); err != nil { return err } @@ -172,12 +172,12 @@ func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions, ) } -func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error { +func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error { if err := createMountPoint(mountPoint); err != nil { return err } - return mountKernel(mountPoint, cr, volOptions, volID) + return mountKernel(mountPoint, cr, volOptions) } func (m *kernelMounter) name() string { return "Ceph kernel client" } From 57cea727fa98e024701371942ae42769c5a01d26 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 4 Mar 2019 09:58:37 +0530 Subject: [PATCH 45/89] Fix yaml lint errors Signed-off-by: Madhu Rajanna --- .travis.yml | 2 +- scripts/golangci.yml | 35 +++++++++++++++++++++++------------ 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8175a7ef2..bd1ccc0c2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,6 +49,6 @@ jobs: deploy: - provider: script - on: # yamllint disable-line rule:truthy + on: # yamllint disable-line rule:truthy all_branches: true script: ./deploy.sh diff --git a/scripts/golangci.yml b/scripts/golangci.yml index 3675e4b89..1aa82525b 100644 --- a/scripts/golangci.yml +++ b/scripts/golangci.yml @@ -33,7 +33,8 @@ run: # output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + # colored-line-number|line-number|json|tab|checkstyle|code-climate, + # default is "colored-line-number" format: colored-line-number # print lines of code with issue, default is true @@ -45,17 +46,19 @@ output: # all available settings of specific linters linters-settings: errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # report about not checking of errors in type assetions: + # `a := b.(MyStruct)`; # default is false: such cases aren't reported by default. check-type-assertions: true - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # report about assignment of errors to blank identifier: + # `num, _ := strconv.Atoi(numStr)`; # default is false: such cases aren't reported by default. check-blank: true # path to a file containing a list of functions to exclude from checking # see https://github.com/kisielk/errcheck#excluding-functions for details - #exclude: /path/to/file.txt + # exclude: /path/to/file.txt govet: # report about shadowed variables check-shadowing: true @@ -91,7 +94,8 @@ linters-settings: misspell: # Correct spellings using locale preferences for US or UK. # Default is to use a neutral variety of English. - # Setting locale to US will correct the British spelling of 'colour' to 'color'. + # Setting locale to US will correct the British spelling of 'colour' to + # 'color'. locale: US ignore-words: - someword @@ -104,19 +108,26 @@ linters-settings: # tab width in spaces. Default to 1. tab-width: 1 unused: - # treat code as a program (not a library) and report unused exported identifiers; default is false. - # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find funcs usages. All text editor integrations + # treat code as a program (not a library) and report unused exported + # identifiers; default is false. + # XXX: if you enable this setting, unused will report a lot of + # false-positives in text editors: + # if it's called for subdir of a project it can't find funcs usages. + # All text editor integrations # with golangci-lint call it on a directory with the changed file. check-exported: false unparam: - # Inspect exported functions, default is false. Set to true if no external program/library imports your code. - # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # Inspect exported functions, default is false. Set to true if no external + # program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of + # false-positives in text editors: + # if it's called for subdir of a project it can't find external + # interfaces. All text editor integrations # with golangci-lint call it on a directory with the changed file. check-exported: false nakedret: - # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + # make an issue if func has more lines of code than this setting and + # it has naked returns; default is 30 max-func-lines: 30 linters: From 0fd091fa7f1c34ff05af053b1ed83466a939f8f5 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 4 Mar 2019 19:02:10 +0530 Subject: [PATCH 46/89] skip errcheck Signed-off-by: Madhu Rajanna --- pkg/rbd/controllerserver.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 598451f9e..c4808e72d 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -24,7 +24,7 @@ import ( "strings" "syscall" - "github.com/ceph/ceph-csi/pkg/csi-common" + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" "github.com/container-storage-interface/spec/lib/go/csi" @@ -201,7 +201,8 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeGB int) error { var err error // Check if there is already RBD image with requested name - found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets()) // #nosec + //nolint + found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets()) if !found { // if VolumeContentSource is not nil, this request is for snapshot if req.VolumeContentSource != nil { From 16279eda787dc7b203c6233a703a4fefef38b4ff Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Fri, 1 Mar 2019 17:38:17 +0530 Subject: [PATCH 47/89] Roundup volume size to Mib for rbd Signed-off-by: Madhu Rajanna --- pkg/rbd/controllerserver.go | 15 +++++++-------- pkg/rbd/rbd_util.go | 8 ++++---- pkg/util/util.go | 21 +++++++++++++++++++++ 3 files changed, 32 insertions(+), 12 deletions(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 598451f9e..6206aef57 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -24,7 +24,7 @@ import ( "strings" "syscall" - "github.com/ceph/ceph-csi/pkg/csi-common" + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" "github.com/container-storage-interface/spec/lib/go/csi" @@ -118,7 +118,8 @@ func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { if req.GetCapacityRange() != nil { volSizeBytes = req.GetCapacityRange().GetRequiredBytes() } - rbdVol.VolSize = volSizeBytes + + rbdVol.VolSize = util.RoundUpToMiB(volSizeBytes) return rbdVol, nil } @@ -175,10 +176,8 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, err } - volSizeGB := int(rbdVol.VolSize / 1024 / 1024 / 1024) - // Check if there is already RBD image with requested name - err = cs.checkRBDStatus(rbdVol, req, volSizeGB) + err = cs.checkRBDStatus(rbdVol, req, int(rbdVol.VolSize)) if err != nil { return nil, err } @@ -192,13 +191,13 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ VolumeId: rbdVol.VolID, - CapacityBytes: rbdVol.VolSize, + CapacityBytes: rbdVol.VolSize * util.MiB, VolumeContext: req.GetParameters(), }, }, nil } -func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeGB int) error { +func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeMiB int) error { var err error // Check if there is already RBD image with requested name found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets()) // #nosec @@ -209,7 +208,7 @@ func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVol return err } } else { - err = createRBDImage(rbdVol, volSizeGB, rbdVol.AdminID, req.GetSecrets()) + err = createRBDImage(rbdVol, volSizeMiB, rbdVol.AdminID, req.GetSecrets()) if err != nil { klog.Warningf("failed to create volume: %v", err) return err diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 36f655ec5..25c7a7b27 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -121,18 +121,18 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map } image := pOpts.VolName - volSzGB := fmt.Sprintf("%dG", volSz) + volSzMiB := fmt.Sprintf("%dM", volSz) key, err := getRBDKey(adminID, credentials) if err != nil { return err } if pOpts.ImageFormat == rbdImageFormat2 { - klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s ", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool) + klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s ", image, volSzMiB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool) } else { - klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool) + klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzMiB, pOpts.ImageFormat, mon, pOpts.Pool) } - args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat} + args := []string{"create", image, "--size", volSzMiB, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat} if pOpts.ImageFormat == rbdImageFormat2 { args = append(args, "--image-feature", pOpts.ImageFeatures) } diff --git a/pkg/util/util.go b/pkg/util/util.go index 2382f87d7..ef8f37d50 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -23,6 +23,27 @@ import ( "k8s.io/klog" ) +// remove this once kubernetes v1.14.0 release is done +// https://github.com/kubernetes/cloud-provider/blob/master/volume/helpers/rounding.go +const ( + // MiB - MebiByte size + MiB = 1024 * 1024 +) + +// RoundUpToMiB rounds up given quantity upto chunks of MiB +func RoundUpToMiB(size int64) int64 { + requestBytes := size + return roundUpSize(requestBytes, MiB) +} + +func roundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { + roundedUp := volumeSizeBytes / allocationUnitBytes + if volumeSizeBytes%allocationUnitBytes > 0 { + roundedUp++ + } + return roundedUp +} + // CreatePersistanceStorage creates storage path and initializes new cache func CreatePersistanceStorage(sPath, metaDataStore, driverName string) (CachePersister, error) { var err error From c0745486a765dcbadec7d458c85c4a16936d37f5 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 4 Mar 2019 08:32:01 +0530 Subject: [PATCH 48/89] add event rules for provisioner Fixes: #https://github.com/ceph/ceph-csi/pull/234#issuecomment-468967752 Signed-off-by: Madhu Rajanna --- deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml | 3 +++ deploy/rbd/kubernetes/csi-provisioner-rbac.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml index 823b16a9b..2e76defb3 100644 --- a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml @@ -13,6 +13,9 @@ rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] diff --git a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml index c465aebd6..bf2aaa1af 100644 --- a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml @@ -13,6 +13,9 @@ rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] From ef74050af2f11952e301ca38d00c6ee2dc4d55fb Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 5 Mar 2019 17:46:53 +0530 Subject: [PATCH 49/89] add examples for raw block pvc and pod to mount it Signed-off-by: Madhu Rajanna --- examples/rbd/raw-block-pod.yaml | 18 ++++++++++++++++++ examples/rbd/raw-block-pvc.yaml | 13 +++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 examples/rbd/raw-block-pod.yaml create mode 100644 examples/rbd/raw-block-pvc.yaml diff --git a/examples/rbd/raw-block-pod.yaml b/examples/rbd/raw-block-pod.yaml new file mode 100644 index 000000000..c433e98cc --- /dev/null +++ b/examples/rbd/raw-block-pod.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: pod-with-raw-block-volume +spec: + containers: + - name: fc-container + image: fedora:26 + command: ["/bin/sh", "-c"] + args: ["tail -f /dev/null"] + volumeDevices: + - name: data + devicePath: /dev/xvda + volumes: + - name: data + persistentVolumeClaim: + claimName: raw-block-pvc diff --git a/examples/rbd/raw-block-pvc.yaml b/examples/rbd/raw-block-pvc.yaml new file mode 100644 index 000000000..ee37ff33c --- /dev/null +++ b/examples/rbd/raw-block-pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: raw-block-pvc +spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + resources: + requests: + storage: 1Gi + storageClassName: csi-rbd From 9bebd829ab8d5c37dec31bd416cecea86032db46 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 5 Mar 2019 17:46:17 +0530 Subject: [PATCH 50/89] update readme for raw block pvc Signed-off-by: Madhu Rajanna --- examples/README.md | 62 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/examples/README.md b/examples/README.md index 94b239b26..e1632b1c4 100644 --- a/examples/README.md +++ b/examples/README.md @@ -216,3 +216,65 @@ spec: If you access the pod you can check that your data is avaialable at `/var/lib/www/html` + +## Testing Raw Block feature in kubernetes with RBD volumes + +CSI block volume support is feature-gated and turned off by default. To run CSI +with block volume support enabled, a cluster administrator must enable the +feature for each Kubernetes component using the following feature gate flags: + +--feature-gates=BlockVolume=true,CSIBlockVolume=true + +these feature-gates must be enabled on both api-server and kubelet + +### create a raw-block PVC + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: raw-block-pvc +spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + resources: + requests: + storage: 1Gi + storageClassName: csi-rbd +``` + +create raw block pvc + +```console +kubectl create -f raw-block-pvc.yaml +``` + +### create a pod to mount raw-block PVC + +```yaml +--- +apiVersion: v1 +kind: Pod +metadata: + name: pod-with-raw-block-volume +spec: + containers: + - name: fc-container + image: fedora:26 + command: ["/bin/sh", "-c"] + args: [ "tail -f /dev/null" ] + volumeDevices: + - name: data + devicePath: /dev/xvda + volumes: + - name: data + persistentVolumeClaim: + claimName: raw-block-pvc +``` + +Create a POD that uses raw block PVC + +```console +kubectl create -f raw-block-pod.yaml +``` From 53dfdb63b5403068e4e8ad6080da7b7bd8109a5f Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 5 Mar 2019 17:54:05 +0530 Subject: [PATCH 51/89] correct yaml examples template Signed-off-by: Madhu Rajanna --- examples/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/README.md b/examples/README.md index e1632b1c4..16238fd98 100644 --- a/examples/README.md +++ b/examples/README.md @@ -128,7 +128,7 @@ Modify your current storage class, or create a new storage class specifically for multi node writers by adding the `multiNodeWritable: "enabled"` entry to your parameters. Here's an example: -``` +```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: @@ -153,7 +153,7 @@ reclaimPolicy: Delete Now, you can request Claims from the configured storage class that include the `ReadWriteMany` access mode: -``` +```yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -169,7 +169,7 @@ spec: Create a POD that uses this PVC: -``` +```yaml apiVersion: v1 kind: Pod metadata: @@ -195,7 +195,7 @@ Now, we can create a second POD (ensure the POD is scheduled on a different node; multiwriter single node works without this feature) that also uses this PVC at the same time -``` +```yaml apiVersion: v1 kind: Pod metadata: From 9f5ac5eeaa6662b4e6abccaa758840b8b107bb18 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 7 Mar 2019 18:26:47 +0530 Subject: [PATCH 52/89] Fix return error codes Signed-off-by: Madhu Rajanna --- pkg/rbd/controllerserver.go | 23 ++++++++++++----------- pkg/rbd/rbd_util.go | 8 ++++---- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 4313576f7..6af065ea3 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -32,6 +32,7 @@ import ( "github.com/golang/protobuf/ptypes/timestamp" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" "github.com/pborman/uuid" + "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -104,7 +105,7 @@ func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { rbdVol, err := getRBDVolumeOptions(req.GetParameters(), disableMultiWriter) if err != nil { - return nil, err + return nil, status.Error(codes.InvalidArgument, err.Error()) } // Generating Volume Name and Volume ID, as according to CSI spec they MUST be different @@ -212,7 +213,7 @@ func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVol err = createRBDImage(rbdVol, volSizeMiB, rbdVol.AdminID, req.GetSecrets()) if err != nil { klog.Warningf("failed to create volume: %v", err) - return err + return status.Error(codes.Internal, err.Error()) } klog.V(4).Infof("create volume %s", rbdVol.VolName) @@ -233,12 +234,12 @@ func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol * rbdSnap := &rbdSnapshot{} if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil { - return err + return status.Error(codes.NotFound, err.Error()) } err := restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminID, req.GetSecrets()) if err != nil { - return err + return status.Error(codes.Internal, err.Error()) } klog.V(4).Infof("create volume %s from snapshot %s", req.GetName(), rbdSnap.SnapName) return nil @@ -277,11 +278,11 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol if err := deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil { // TODO: can we detect "already deleted" situations here and proceed? klog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, volName, err) - return nil, err + return nil, status.Error(codes.Internal, err.Error()) } if err := cs.MetadataStore.Delete(volumeID); err != nil { - return nil, err + return nil, status.Error(codes.Internal, err.Error()) } delete(rbdVolumes, volumeID) @@ -412,7 +413,7 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS rbdSnap, err := getRBDSnapshotOptions(req.GetParameters()) if err != nil { - return nil, err + return nil, status.Error(codes.InvalidArgument, err.Error()) } // Generating Snapshot Name and Snapshot ID, as according to CSI spec they MUST be different @@ -423,7 +424,7 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Errorf(codes.NotFound, "Source Volume ID %s cannot found", req.GetSourceVolumeId()) } if !hasSnapshotFeature(rbdVolume.ImageFeatures) { - return nil, fmt.Errorf("volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId()) + return nil, status.Errorf(codes.InvalidArgument, "volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId()) } rbdSnap.VolName = rbdVolume.VolName @@ -436,7 +437,7 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS err = cs.doSnapshot(rbdSnap, req.GetSecrets()) // if we already have the snapshot, return the snapshot if err != nil { - return nil, err + return nil, status.Error(codes.Internal, err.Error()) } rbdSnap.CreatedAt = ptypes.TimestampNow().GetSeconds() @@ -514,7 +515,7 @@ func (cs *ControllerServer) doSnapshot(rbdSnap *rbdSnapshot, secret map[string]s if err != nil { return fmt.Errorf("snapshot is created but failed to protect and delete snapshot: %v", err) } - return fmt.Errorf("snapshot is created but failed to protect snapshot") + return errors.New("snapshot is created but failed to protect snapshot") } } return nil @@ -563,7 +564,7 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } if err := cs.MetadataStore.Delete(snapshotID); err != nil { - return nil, err + return nil, status.Error(codes.Internal, err.Error()) } delete(rbdSnapshots, snapshotID) diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 25c7a7b27..5f7f16c89 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -232,13 +232,13 @@ func getRBDVolumeOptions(volOptions map[string]string, ignoreMultiNodeWritable b rbdVol := &rbdVolume{} rbdVol.Pool, ok = volOptions["pool"] if !ok { - return nil, fmt.Errorf("missing required parameter pool") + return nil, errors.New("missing required parameter pool") } rbdVol.Monitors, ok = volOptions["monitors"] if !ok { // if mons are not set in options, check if they are set in secret if rbdVol.MonValueFromSecret, ok = volOptions["monValueFromSecret"]; !ok { - return nil, fmt.Errorf("either monitors or monValueFromSecret must be set") + return nil, errors.New("either monitors or monValueFromSecret must be set") } } rbdVol.ImageFormat, ok = volOptions["imageFormat"] @@ -290,13 +290,13 @@ func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) rbdSnap := &rbdSnapshot{} rbdSnap.Pool, ok = snapOptions["pool"] if !ok { - return nil, fmt.Errorf("missing required parameter pool") + return nil, errors.New("missing required parameter pool") } rbdSnap.Monitors, ok = snapOptions["monitors"] if !ok { // if mons are not set in options, check if they are set in secret if rbdSnap.MonValueFromSecret, ok = snapOptions["monValueFromSecret"]; !ok { - return nil, fmt.Errorf("either monitors or monValueFromSecret must be set") + return nil, errors.New("either monitors or monValueFromSecret must be set") } } rbdSnap.AdminID, ok = snapOptions["adminid"] From dd4129590098fc69d5f29a1ccf4a69428fd9f2b4 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 12 Mar 2019 18:37:10 +0530 Subject: [PATCH 53/89] store volume size in bytes in configmap during volume creation we check volume size in bytes, and even during listing of volumes and snapshots we need to check size in bytes Signed-off-by: Madhu Rajanna --- pkg/rbd/controllerserver.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 4313576f7..d3bda5ce6 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -181,6 +181,9 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol if err != nil { return nil, err } + // store volume size in bytes (snapshot and check existing volume needs volume + // size in bytes) + rbdVol.VolSize = rbdVol.VolSize * util.MiB rbdVolumes[rbdVol.VolID] = rbdVol @@ -191,7 +194,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ VolumeId: rbdVol.VolID, - CapacityBytes: rbdVol.VolSize * util.MiB, + CapacityBytes: rbdVol.VolSize, VolumeContext: req.GetParameters(), }, }, nil From d61a87b42e453d901635df8b1f5f84fb02c2f5fe Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 13 Mar 2019 10:39:58 +0530 Subject: [PATCH 54/89] Fix driver name as per CSI spec Signed-off-by: Madhu Rajanna --- cmd/cephfs/main.go | 9 +++++++- cmd/rbd/main.go | 9 +++++++- .../helm/templates/nodeplugin-daemonset.yaml | 10 ++++++-- .../templates/provisioner-statefulset.yaml | 4 +++- deploy/cephfs/helm/values.yaml | 4 ++-- .../kubernetes/csi-cephfsplugin-attacher.yaml | 6 ++--- .../csi-cephfsplugin-provisioner.yaml | 12 +++++----- .../cephfs/kubernetes/csi-cephfsplugin.yaml | 10 ++++---- .../helm/templates/nodeplugin-daemonset.yaml | 10 ++++++-- .../templates/provisioner-statefulset.yaml | 4 +++- deploy/rbd/helm/values.yaml | 3 ++- .../kubernetes/csi-rbdplugin-attacher.yaml | 6 ++--- .../kubernetes/csi-rbdplugin-provisioner.yaml | 16 ++++++------- deploy/rbd/kubernetes/csi-rbdplugin.yaml | 14 +++++------ docs/deploy-cephfs.md | 2 +- docs/deploy-rbd.md | 2 +- examples/README.md | 4 ++-- examples/cephfs/storageclass.yaml | 2 +- examples/rbd/snapshotclass.yaml | 2 +- examples/rbd/storageclass.yaml | 2 +- pkg/cephfs/driver.go | 8 ++++--- pkg/cephfs/identityserver.go | 2 +- pkg/cephfs/volume.go | 5 +++- pkg/rbd/identityserver.go | 2 +- pkg/rbd/rbd.go | 6 +++-- pkg/util/k8scmcache.go | 2 +- pkg/util/util.go | 23 +++++++++++++++++++ 27 files changed, 120 insertions(+), 59 deletions(-) diff --git a/cmd/cephfs/main.go b/cmd/cephfs/main.go index 8ee9141e6..fc5c0dbcc 100644 --- a/cmd/cephfs/main.go +++ b/cmd/cephfs/main.go @@ -27,7 +27,7 @@ import ( var ( endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") - driverName = flag.String("drivername", "csi-cephfsplugin", "name of the driver") + driverName = flag.String("drivername", "cephfs.csi.ceph.com", "name of the driver") nodeID = flag.String("nodeid", "", "node id") volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')") metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") @@ -43,6 +43,13 @@ func init() { func main() { + err := util.ValidateDriverName(*driverName) + if err != nil { + klog.Fatalln(err) + } + //update plugin name + cephfs.PluginFolder = cephfs.PluginFolder + *driverName + cp, err := util.CreatePersistanceStorage(cephfs.PluginFolder, *metadataStorage, *driverName) if err != nil { os.Exit(1) diff --git a/cmd/rbd/main.go b/cmd/rbd/main.go index 6f61c637f..35cf08390 100644 --- a/cmd/rbd/main.go +++ b/cmd/rbd/main.go @@ -27,7 +27,7 @@ import ( var ( endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") - driverName = flag.String("drivername", "csi-rbdplugin", "name of the driver") + driverName = flag.String("drivername", "rbd.csi.ceph.com", "name of the driver") nodeID = flag.String("nodeid", "", "node id") containerized = flag.Bool("containerized", true, "whether run as containerized") metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") @@ -43,6 +43,13 @@ func init() { func main() { + err := util.ValidateDriverName(*driverName) + if err != nil { + klog.Fatalln(err) + } + //update plugin name + rbd.PluginFolder = rbd.PluginFolder + *driverName + cp, err := util.CreatePersistanceStorage(rbd.PluginFolder, *metadataStorage, *driverName) if err != nil { os.Exit(1) diff --git a/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml index 20ac11933..9181d6102 100644 --- a/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml +++ b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml @@ -39,7 +39,11 @@ spec: lifecycle: preStop: exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi-cephfsplugin /registration/csi-cephfsplugin-reg.sock"] + command: [ + "/bin/sh", "-c", + 'rm -rf /registration/{{ .Values.driverName }} + /registration/{{ .Values.driverName }}-reg.sock' + ] env: - name: KUBE_NODE_NAME valueFrom: @@ -64,11 +68,13 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-cephfsplugin" + - "--drivername=$(DRIVER_NAME)" - "--metadatastorage=k8s_configmap" env: - name: HOST_ROOTFS value: "/rootfs" + - name: DRIVER_NAME + value: {{ .Values.driverName }} - name: NODE_ID valueFrom: fieldRef: diff --git a/deploy/cephfs/helm/templates/provisioner-statefulset.yaml b/deploy/cephfs/helm/templates/provisioner-statefulset.yaml index 2f5c48c8a..fe4fc6428 100644 --- a/deploy/cephfs/helm/templates/provisioner-statefulset.yaml +++ b/deploy/cephfs/helm/templates/provisioner-statefulset.yaml @@ -52,11 +52,13 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-cephfsplugin" + - "--drivername=$(DRIVER_NAME)" - "--metadatastorage=k8s_configmap" env: - name: HOST_ROOTFS value: "/rootfs" + - name: DRIVER_NAME + value: {{ .Values.driverName }} - name: NODE_ID valueFrom: fieldRef: diff --git a/deploy/cephfs/helm/values.yaml b/deploy/cephfs/helm/values.yaml index f662c8849..b31c9733e 100644 --- a/deploy/cephfs/helm/values.yaml +++ b/deploy/cephfs/helm/values.yaml @@ -13,11 +13,11 @@ serviceAccounts: create: true name: -socketDir: /var/lib/kubelet/plugins/csi-cephfsplugin +socketDir: /var/lib/kubelet/plugins/cephfs.csi.ceph.com socketFile: csi.sock registrationDir: /var/lib/kubelet/plugins_registry volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices - +driverName: cephfs.csi.ceph.com attacher: name: attacher diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin-attacher.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin-attacher.yaml index 06f27ca80..1cd97126b 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin-attacher.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin-attacher.yaml @@ -34,13 +34,13 @@ spec: - "--csi-address=$(ADDRESS)" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock + value: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin + mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com volumes: - name: socket-dir hostPath: - path: /var/lib/kubelet/plugins/csi-cephfsplugin + path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com type: DirectoryOrCreate diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml index b3c2ffc0d..8d515aacc 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml @@ -34,11 +34,11 @@ spec: - "--v=5" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi-provisioner.sock + value: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin + mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com - name: csi-cephfsplugin securityContext: privileged: true @@ -49,7 +49,7 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-cephfsplugin" + - "--drivername=cephfs.csi.ceph.com" - "--metadatastorage=k8s_configmap" env: - name: NODE_ID @@ -61,11 +61,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins/csi-cephfsplugin/csi-provisioner.sock + value: unix://var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin + mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com - name: host-sys mountPath: /sys - name: lib-modules @@ -76,7 +76,7 @@ spec: volumes: - name: socket-dir hostPath: - path: /var/lib/kubelet/plugins/csi-cephfsplugin + path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com type: DirectoryOrCreate - name: host-sys hostPath: diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index 9e482c75a..0452efa3d 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -23,7 +23,7 @@ spec: args: - "--v=5" - "--csi-address=/csi/csi.sock" - - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock" lifecycle: preStop: exec: @@ -53,7 +53,7 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-cephfsplugin" + - "--drivername=cephfs.csi.ceph.com" - "--metadatastorage=k8s_configmap" env: - name: NODE_ID @@ -65,11 +65,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock + value: unix://var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: plugin-dir - mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin + mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com - name: csi-plugins-dir mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi mountPropagation: "Bidirectional" @@ -86,7 +86,7 @@ spec: volumes: - name: plugin-dir hostPath: - path: /var/lib/kubelet/plugins/csi-cephfsplugin/ + path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/ type: DirectoryOrCreate - name: csi-plugins-dir hostPath: diff --git a/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml b/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml index 31ffa1db5..355983b49 100644 --- a/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml +++ b/deploy/rbd/helm/templates/nodeplugin-daemonset.yaml @@ -39,7 +39,11 @@ spec: lifecycle: preStop: exec: - command: ["/bin/sh", "-c", "rm -rf /registration/csi-rbdplugin /registration/csi-rbdplugin-reg.sock"] + command: [ + "/bin/sh", "-c", + 'rm -rf /registration/{{ .Values.driverName }} + /registration/{{ .Values.driverName }}-reg.sock' + ] env: - name: KUBE_NODE_NAME valueFrom: @@ -64,12 +68,14 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-rbdplugin" + - "--drivername=$(DRIVER_NAME)" - "--containerized=true" - "--metadatastorage=k8s_configmap" env: - name: HOST_ROOTFS value: "/rootfs" + - name: DRIVER_NAME + value: {{ .Values.driverName }} - name: NODE_ID valueFrom: fieldRef: diff --git a/deploy/rbd/helm/templates/provisioner-statefulset.yaml b/deploy/rbd/helm/templates/provisioner-statefulset.yaml index 2a455ee50..269cb0a44 100644 --- a/deploy/rbd/helm/templates/provisioner-statefulset.yaml +++ b/deploy/rbd/helm/templates/provisioner-statefulset.yaml @@ -69,12 +69,14 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-rbdplugin" + - "--drivername=$(DRIVER_NAME)" - "--containerized=true" - "--metadatastorage=k8s_configmap" env: - name: HOST_ROOTFS value: "/rootfs" + - name: DRIVER_NAME + value: {{ .Values.driverName }} - name: NODE_ID valueFrom: fieldRef: diff --git a/deploy/rbd/helm/values.yaml b/deploy/rbd/helm/values.yaml index bf4c2fb5b..fdeb5d6d5 100644 --- a/deploy/rbd/helm/values.yaml +++ b/deploy/rbd/helm/values.yaml @@ -13,10 +13,11 @@ serviceAccounts: create: true name: -socketDir: /var/lib/kubelet/plugins/csi-rbdplugin +socketDir: /var/lib/kubelet/plugins/rbd.csi.ceph.com socketFile: csi.sock registrationDir: /var/lib/kubelet/plugins_registry volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices +driverName: rbd.csi.ceph.com attacher: name: attacher diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml index 4b7c15e83..a328f105f 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml @@ -34,13 +34,13 @@ spec: - "--csi-address=$(ADDRESS)" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock + value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/csi-rbdplugin + mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com volumes: - name: socket-dir hostPath: - path: /var/lib/kubelet/plugins/csi-rbdplugin + path: /var/lib/kubelet/plugins/rbd.csi.ceph.com type: DirectoryOrCreate diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml index cc647b7b9..61ab47495 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml @@ -34,11 +34,11 @@ spec: - "--v=5" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock + value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/csi-rbdplugin + mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com - name: csi-snapshotter image: quay.io/k8scsi/csi-snapshotter:v1.0.1 args: @@ -47,13 +47,13 @@ spec: - "--v=5" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock + value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi-provisioner.sock imagePullPolicy: Always securityContext: privileged: true volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/csi-rbdplugin + mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com - name: csi-rbdplugin securityContext: privileged: true @@ -64,7 +64,7 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-rbdplugin" + - "--drivername=rbd.csi.ceph.com" - "--containerized=true" - "--metadatastorage=k8s_configmap" env: @@ -79,11 +79,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock + value: unix://var/lib/kubelet/plugins/rbd.csi.ceph.com/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/csi-rbdplugin + mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com - mountPath: /dev name: host-dev - mountPath: /rootfs @@ -108,5 +108,5 @@ spec: path: /lib/modules - name: socket-dir hostPath: - path: /var/lib/kubelet/plugins/csi-rbdplugin + path: /var/lib/kubelet/plugins/rbd.csi.ceph.com type: DirectoryOrCreate diff --git a/deploy/rbd/kubernetes/csi-rbdplugin.yaml b/deploy/rbd/kubernetes/csi-rbdplugin.yaml index 86291dc7e..dbe7539bc 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin.yaml @@ -24,14 +24,14 @@ spec: args: - "--v=5" - "--csi-address=/csi/csi.sock" - - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-rbdplugin/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock" lifecycle: preStop: exec: command: [ "/bin/sh", "-c", - "rm -rf /registration/csi-rbdplugin \ - /registration/csi-rbdplugin-reg.sock" + "rm -rf /registration/rbd.csi.ceph.com \ + /registration/rbd.csi.ceph.com-reg.sock" ] env: - name: KUBE_NODE_NAME @@ -54,7 +54,7 @@ spec: - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--v=5" - - "--drivername=csi-rbdplugin" + - "--drivername=rbd.csi.ceph.com" - "--containerized=true" - "--metadatastorage=k8s_configmap" env: @@ -69,11 +69,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins_registry/csi-rbdplugin/csi.sock + value: unix://var/lib/kubelet/plugins_registry/rbd.csi.ceph.com/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: plugin-dir - mountPath: /var/lib/kubelet/plugins_registry/csi-rbdplugin + mountPath: /var/lib/kubelet/plugins_registry/rbd.csi.ceph.com - name: pods-mount-dir mountPath: /var/lib/kubelet/pods mountPropagation: "Bidirectional" @@ -92,7 +92,7 @@ spec: volumes: - name: plugin-dir hostPath: - path: /var/lib/kubelet/plugins/csi-rbdplugin + path: /var/lib/kubelet/plugins/rbd.csi.ceph.com type: DirectoryOrCreate - name: plugin-mount-dir hostPath: diff --git a/docs/deploy-cephfs.md b/docs/deploy-cephfs.md index 659407d31..a88293a14 100644 --- a/docs/deploy-cephfs.md +++ b/docs/deploy-cephfs.md @@ -30,7 +30,7 @@ make image-cephfsplugin Option | Default value | Description --------------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket -`--drivername` | `csi-cephfsplugin` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) +`--drivername` | `cephfs.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) `--nodeid` | _empty_ | This node's ID `--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed. `--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index acfde7d46..02a9e1d19 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -29,7 +29,7 @@ make image-rbdplugin Option | Default value | Description ------ | ------------- | ----------- `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket -`--drivername` | `csi-cephfsplugin` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) +`--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) `--nodeid` | _empty_ | This node's ID `--containerized` | true | Whether running in containerized mode `--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) diff --git a/examples/README.md b/examples/README.md index 94b239b26..08bf0ad80 100644 --- a/examples/README.md +++ b/examples/README.md @@ -108,7 +108,7 @@ one of your Ceph pod. To restore the snapshot to a new PVC, deploy [pvc-restore.yaml](./rbd/pvc-restore.yaml) and a testing pod -[pod-restore.yaml](./rbd/pvc-restore.yaml): +[pod-restore.yaml](./rbd/pod-restore.yaml): ```bash kubectl create -f pvc-restore.yaml @@ -133,7 +133,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-rbd -provisioner: csi-rbdplugin +provisioner: rbd.csi.ceph.com parameters: monitors: rook-ceph-mon-b.rook-ceph.svc.cluster.local:6789 pool: rbd diff --git a/examples/cephfs/storageclass.yaml b/examples/cephfs/storageclass.yaml index fb8dd0aa9..771c33998 100644 --- a/examples/cephfs/storageclass.yaml +++ b/examples/cephfs/storageclass.yaml @@ -3,7 +3,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-cephfs -provisioner: csi-cephfsplugin +provisioner: cephfs.csi.ceph.com parameters: # Comma separated list of Ceph monitors # if using FQDN, make sure csi plugin's dns policy is appropriate. diff --git a/examples/rbd/snapshotclass.yaml b/examples/rbd/snapshotclass.yaml index 778f2b084..5e3b332f4 100644 --- a/examples/rbd/snapshotclass.yaml +++ b/examples/rbd/snapshotclass.yaml @@ -3,7 +3,7 @@ apiVersion: snapshot.storage.k8s.io/v1alpha1 kind: VolumeSnapshotClass metadata: name: csi-rbdplugin-snapclass -snapshotter: csi-rbdplugin +snapshotter: rbd.csi.ceph.com parameters: pool: rbd monitors: mon1:port,mon2:port,... diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index f7de85f61..9751ed741 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -3,7 +3,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-rbd -provisioner: csi-rbdplugin +provisioner: rbd.csi.ceph.com parameters: # Comma separated list of Ceph monitors # if using FQDN, make sure csi plugin's dns policy is appropriate. diff --git a/pkg/cephfs/driver.go b/pkg/cephfs/driver.go index 43a25b740..b2272e853 100644 --- a/pkg/cephfs/driver.go +++ b/pkg/cephfs/driver.go @@ -19,19 +19,21 @@ package cephfs import ( "k8s.io/klog" - "github.com/ceph/ceph-csi/pkg/csi-common" + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" "github.com/container-storage-interface/spec/lib/go/csi" ) const ( - // PluginFolder defines the location of ceph plugin - PluginFolder = "/var/lib/kubelet/plugins/csi-cephfsplugin" + // version of ceph driver version = "1.0.0" ) +// PluginFolder defines the location of ceph plugin +var PluginFolder = "/var/lib/kubelet/plugins/" + // Driver contains the default identity,node and controller struct type Driver struct { cd *csicommon.CSIDriver diff --git a/pkg/cephfs/identityserver.go b/pkg/cephfs/identityserver.go index cf343ca89..c8d5edc52 100644 --- a/pkg/cephfs/identityserver.go +++ b/pkg/cephfs/identityserver.go @@ -19,7 +19,7 @@ package cephfs import ( "context" - "github.com/ceph/ceph-csi/pkg/csi-common" + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/container-storage-interface/spec/lib/go/csi" ) diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index caf9887dc..c8a77f335 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -25,12 +25,15 @@ import ( ) const ( - cephRootPrefix = PluginFolder + "/controller/volumes/root-" cephVolumesRoot = "csi-volumes" namespacePrefix = "ns-" ) +var ( + cephRootPrefix = PluginFolder + "/controller/volumes/root-" +) + func getCephRootPathLocal(volID volumeID) string { return cephRootPrefix + string(volID) } diff --git a/pkg/rbd/identityserver.go b/pkg/rbd/identityserver.go index 155586b31..891759f7c 100644 --- a/pkg/rbd/identityserver.go +++ b/pkg/rbd/identityserver.go @@ -19,7 +19,7 @@ package rbd import ( "context" - "github.com/ceph/ceph-csi/pkg/csi-common" + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/container-storage-interface/spec/lib/go/csi" ) diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index c7b2eab89..a5b6cc975 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -17,7 +17,7 @@ limitations under the License. package rbd import ( - "github.com/ceph/ceph-csi/pkg/csi-common" + csicommon "github.com/ceph/ceph-csi/pkg/csi-common" "github.com/ceph/ceph-csi/pkg/util" "github.com/container-storage-interface/spec/lib/go/csi" @@ -29,11 +29,13 @@ import ( // PluginFolder defines the location of rbdplugin const ( - PluginFolder = "/var/lib/kubelet/plugins/csi-rbdplugin" rbdDefaultAdminID = "admin" rbdDefaultUserID = rbdDefaultAdminID ) +// PluginFolder defines the location of ceph plugin +var PluginFolder = "/var/lib/kubelet/plugins/" + // Driver contains the default identity,node and controller struct type Driver struct { cd *csicommon.CSIDriver diff --git a/pkg/util/k8scmcache.go b/pkg/util/k8scmcache.go index 10a6d7ef5..5982602e2 100644 --- a/pkg/util/k8scmcache.go +++ b/pkg/util/k8scmcache.go @@ -25,7 +25,7 @@ import ( "github.com/pkg/errors" "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8s "k8s.io/client-go/kubernetes" diff --git a/pkg/util/util.go b/pkg/util/util.go index 2382f87d7..8e70b78bf 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -19,7 +19,10 @@ package util import ( "os" "path" + "strings" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/klog" ) @@ -47,3 +50,23 @@ func CreatePersistanceStorage(sPath, metaDataStore, driverName string) (CachePer func createPersistentStorage(persistentStoragePath string) error { return os.MkdirAll(persistentStoragePath, os.FileMode(0755)) } + +// ValidateDriverName validates the driver name +func ValidateDriverName(driverName string) error { + if len(driverName) == 0 { + return errors.New("driver name is empty") + } + + if len(driverName) > 63 { + return errors.New("driver name length should be less than 63 chars") + } + var err error + for _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) { + if err == nil { + err = errors.New(msg) + continue + } + err = errors.Wrap(err, msg) + } + return err +} From 497411b26c35b4dcbca799ec48d8a8c28d09d864 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 13 Mar 2019 11:53:54 +0530 Subject: [PATCH 55/89] update readme to delete namespace Signed-off-by: Madhu Rajanna --- deploy/cephfs/helm/README.md | 6 ++++++ deploy/rbd/helm/README.md | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/deploy/cephfs/helm/README.md b/deploy/cephfs/helm/README.md index 0e4a1f223..fcd4a98a8 100644 --- a/deploy/cephfs/helm/README.md +++ b/deploy/cephfs/helm/README.md @@ -21,3 +21,9 @@ If you want to delete your Chart, use this command ```bash helm delete --purge "ceph-csi-cephfs" ``` + +If you want to delete the namespace, use this command + +```bash +kubectl delete namespace ceph-csi-rbd +``` diff --git a/deploy/rbd/helm/README.md b/deploy/rbd/helm/README.md index 35ce742a0..250c10add 100644 --- a/deploy/rbd/helm/README.md +++ b/deploy/rbd/helm/README.md @@ -21,3 +21,9 @@ If you want to delete your Chart, use this command ```bash helm delete --purge "ceph-csi-rbd" ``` + +If you want to delete the namespace, use this command + +```bash +kubectl delete namespace ceph-csi-rbd +``` From a164169fd30db4419c9ff04c9e5cc688b41688eb Mon Sep 17 00:00:00 2001 From: j-griffith Date: Wed, 13 Mar 2019 12:19:14 -0600 Subject: [PATCH 56/89] Revert "Add multiNodeWritable option for RBD Volumes" This reverts commit b5b8e4646094d0ec1f3dfe96060a183c863d7d1a. --- Makefile | 2 +- docs/deploy-rbd.md | 15 --- examples/README.md | 164 --------------------------------- examples/rbd/storageclass.yaml | 3 - pkg/rbd/controllerserver.go | 27 +----- pkg/rbd/nodeserver.go | 11 +-- pkg/rbd/rbd.go | 7 +- pkg/rbd/rbd_attach.go | 8 -- pkg/rbd/rbd_util.go | 9 +- 9 files changed, 8 insertions(+), 238 deletions(-) diff --git a/Makefile b/Makefile index 825ed8b66..734a761b8 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ go-test: ./scripts/test-go.sh static-check: - ./scripts/lint-go.sh + ./scripts/lint-go.sh ./scripts/lint-text.sh rbdplugin: diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index acfde7d46..bfaa46bb9 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -58,21 +58,6 @@ Parameter | Required | Description `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value `csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | for Kubernetes | namespaces of the above Secret objects `mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images -`fsType` | no | allows setting to `ext3 | ext-4 | xfs`, default is `ext-4` -`multiNodeWritable` | no | if set to `enabled` allows RBD volumes with MultiNode Access Modes to bypass watcher checks. By default multiple attachments of an RBD volume are NOT allowed. Even if this option is set in the StorageClass, it's ignored if a standard SingleNodeWriter Access Mode is requested - -**Warning for multiNodeWritable:** - -*NOTE* the `multiNodeWritable` setting is NOT safe for use by workloads -that are not designed to coordinate access. This does NOT add any sort -of a clustered filesystem or write syncronization, it's specifically for -special workloads that handle access coordination on their own -(ie Active/Passive scenarios). - -Using this mode for general purposes *WILL RESULT IN DATA CORRUPTION*. -We attempt to limit exposure to trouble here but ignoring the Storage Class -setting unless your Volume explicitly asks for multi node access, and assume -you know what you're doing. **Required secrets:** diff --git a/examples/README.md b/examples/README.md index 16238fd98..d309cdcaf 100644 --- a/examples/README.md +++ b/examples/README.md @@ -114,167 +114,3 @@ To restore the snapshot to a new PVC, deploy kubectl create -f pvc-restore.yaml kubectl create -f pod-restore.yaml ``` - -## How to enable multi node attach support for RBD - -*WARNING* This feature is strictly for workloads that know how to deal -with concurrent acces to the Volume (eg Active/Passive applications). -Using RWX modes on non clustered file systems with applications trying -to simultaneously access the Volume will likely result in data corruption! - -### Example process to test the multiNodeWritable feature - -Modify your current storage class, or create a new storage class specifically -for multi node writers by adding the `multiNodeWritable: "enabled"` entry to -your parameters. Here's an example: - -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: csi-rbd -provisioner: csi-rbdplugin -parameters: - monitors: rook-ceph-mon-b.rook-ceph.svc.cluster.local:6789 - pool: rbd - imageFormat: "2" - imageFeatures: layering - csiProvisionerSecretName: csi-rbd-secret - csiProvisionerSecretNamespace: default - csiNodePublishSecretName: csi-rbd-secret - csiNodePublishSecretNamespace: default - adminid: admin - userid: admin - fsType: xfs - multiNodeWritable: "enabled" -reclaimPolicy: Delete -``` - -Now, you can request Claims from the configured storage class that include -the `ReadWriteMany` access mode: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: pvc-1 -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - storageClassName: csi-rbd -``` - -Create a POD that uses this PVC: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: test-1 -spec: - containers: - - name: web-server - image: nginx - volumeMounts: - - name: mypvc - mountPath: /var/lib/www/html - volumes: - - name: mypvc - persistentVolumeClaim: - claimName: pvc-1 - readOnly: false -``` - -Wait for the POD to enter Running state, write some data to -`/var/lib/www/html` - -Now, we can create a second POD (ensure the POD is scheduled on a different -node; multiwriter single node works without this feature) that also uses this -PVC at the same time - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: test-2 -spec: - containers: - - name: web-server - image: nginx - volumeMounts: - - name: mypvc - mountPath: /var/lib/www/html - volumes: - - name: mypvc - persistentVolumeClaim: - claimName: pvc-1 - readOnly: false -``` - -If you access the pod you can check that your data is avaialable at -`/var/lib/www/html` - -## Testing Raw Block feature in kubernetes with RBD volumes - -CSI block volume support is feature-gated and turned off by default. To run CSI -with block volume support enabled, a cluster administrator must enable the -feature for each Kubernetes component using the following feature gate flags: - ---feature-gates=BlockVolume=true,CSIBlockVolume=true - -these feature-gates must be enabled on both api-server and kubelet - -### create a raw-block PVC - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: raw-block-pvc -spec: - accessModes: - - ReadWriteOnce - volumeMode: Block - resources: - requests: - storage: 1Gi - storageClassName: csi-rbd -``` - -create raw block pvc - -```console -kubectl create -f raw-block-pvc.yaml -``` - -### create a pod to mount raw-block PVC - -```yaml ---- -apiVersion: v1 -kind: Pod -metadata: - name: pod-with-raw-block-volume -spec: - containers: - - name: fc-container - image: fedora:26 - command: ["/bin/sh", "-c"] - args: [ "tail -f /dev/null" ] - volumeDevices: - - name: data - devicePath: /dev/xvda - volumes: - - name: data - persistentVolumeClaim: - claimName: raw-block-pvc -``` - -Create a POD that uses raw block PVC - -```console -kubectl create -f raw-block-pod.yaml -``` diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index f7de85f61..320a489a8 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -35,7 +35,4 @@ parameters: userid: kubernetes # uncomment the following to use rbd-nbd as mounter on supported nodes # mounter: rbd-nbd - # fsType: xfs - # uncomment the following line to enable multi-attach on RBD volumes - # multiNodeWritable: enabled reclaimPolicy: Delete diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index 41195f15c..ba7cb9092 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -21,7 +21,6 @@ import ( "os/exec" "sort" "strconv" - "strings" "syscall" csicommon "github.com/ceph/ceph-csi/pkg/csi-common" @@ -94,16 +93,7 @@ func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) erro func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { // TODO (sbezverk) Last check for not exceeding total storage capacity - // MultiNodeWriters are accepted but they're only for special cases, and we skip the watcher checks for them which isn't the greatest - // let's make sure we ONLY skip that if the user is requesting a MULTI Node accessible mode - disableMultiWriter := true - for _, am := range req.VolumeCapabilities { - if am.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { - disableMultiWriter = false - } - } - - rbdVol, err := getRBDVolumeOptions(req.GetParameters(), disableMultiWriter) + rbdVol, err := getRBDVolumeOptions(req.GetParameters()) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } @@ -344,20 +334,11 @@ func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume // ValidateVolumeCapabilities checks whether the volume capabilities requested // are supported. func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { - params := req.GetParameters() - multiWriter := params["multiNodeWritable"] - if strings.ToLower(multiWriter) == "enabled" { - klog.V(3).Info("detected multiNodeWritable parameter in Storage Class, allowing multi-node access modes") - - } else { - for _, cap := range req.VolumeCapabilities { - if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { - return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil - } + for _, cap := range req.VolumeCapabilities { + if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { + return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil } - } - return &csi.ValidateVolumeCapabilitiesResponse{ Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ VolumeCapabilities: req.VolumeCapabilities, diff --git a/pkg/rbd/nodeserver.go b/pkg/rbd/nodeserver.go index 2faed49c1..21d7ae829 100644 --- a/pkg/rbd/nodeserver.go +++ b/pkg/rbd/nodeserver.go @@ -70,19 +70,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis if !notMnt { return &csi.NodePublishVolumeResponse{}, nil } - - // if our access mode is a simple SINGLE_NODE_WRITER we're going to ignore the SC directive and use the - // watcher still - ignoreMultiWriterEnabled := true - if req.VolumeCapability.AccessMode.Mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { - ignoreMultiWriterEnabled = false - } - - volOptions, err := getRBDVolumeOptions(req.GetVolumeContext(), ignoreMultiWriterEnabled) + volOptions, err := getRBDVolumeOptions(req.GetVolumeContext()) if err != nil { return nil, err } - volOptions.VolName = volName // Mapping RBD image devicePath, err := attachRBDImage(volOptions, volOptions.UserID, req.GetSecrets()) diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index c7b2eab89..73911aec4 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -102,12 +102,7 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, ca csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, csi.ControllerServiceCapability_RPC_CLONE_VOLUME, }) - - // TODO: JDG Should also look at remaining modes like MULT_NODE_READER (SINGLE_READER) - r.cd.AddVolumeCapabilityAccessModes( - []csi.VolumeCapability_AccessMode_Mode{ - csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}) + r.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER}) // Create GRPC servers r.ids = NewIdentityServer(r.cd) diff --git a/pkg/rbd/rbd_attach.go b/pkg/rbd/rbd_attach.go index 88834757b..354554d12 100644 --- a/pkg/rbd/rbd_attach.go +++ b/pkg/rbd/rbd_attach.go @@ -313,16 +313,8 @@ func waitForrbdImage(backoff wait.Backoff, volOptions *rbdVolume, userID string, if err != nil { return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) } - // In the case of multiattach we want to short circuit the retries when used (so r`if used; return used`) - // otherwise we're setting this to false which translates to !ok, which means backoff and try again - // NOTE: we ONLY do this if an multi-node access mode is requested for this volume - if (strings.ToLower(volOptions.MultiNodeWritable) == "enabled") && (used) { - klog.V(2).Info("detected MultiNodeWritable enabled, ignoring watcher in-use result") - return used, nil - } return !used, nil }) - // return error if rbd image has not become available for the specified timeout if err == wait.ErrWaitTimeout { return fmt.Errorf("rbd image %s is still being used", imagePath) diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 5f7f16c89..5f8215496 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -51,7 +51,6 @@ type rbdVolume struct { AdminID string `json:"adminId"` UserID string `json:"userId"` Mounter string `json:"mounter"` - MultiNodeWritable string `json:"multiNodeWritable"` } type rbdSnapshot struct { @@ -227,7 +226,7 @@ func execCommand(command string, args []string) ([]byte, error) { return cmd.CombinedOutput() } -func getRBDVolumeOptions(volOptions map[string]string, ignoreMultiNodeWritable bool) (*rbdVolume, error) { +func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) { var ok bool rbdVol := &rbdVolume{} rbdVol.Pool, ok = volOptions["pool"] @@ -261,12 +260,6 @@ func getRBDVolumeOptions(volOptions map[string]string, ignoreMultiNodeWritable b } getCredsFromVol(rbdVol, volOptions) - - klog.V(3).Infof("ignoreMultiNodeWritable flag in parse getRBDVolumeOptions is: %v", ignoreMultiNodeWritable) - // If the volume we're working with is NOT requesting multi-node attach then don't treat it special, ignore the setting in the SC and just keep our watcher checks - if !ignoreMultiNodeWritable { - rbdVol.MultiNodeWritable = volOptions["multiNodeWritable"] - } return rbdVol, nil } From 6ec1196f478ebc07dadc87c351b411f4e028de7f Mon Sep 17 00:00:00 2001 From: j-griffith Date: Wed, 13 Mar 2019 18:18:04 -0600 Subject: [PATCH 57/89] Rework multi-node-multi-writer feature This commit reverts the initial implementation of the multi-node-multi-writer feature: commit: b5b8e4646094d0ec1f3dfe96060a183c863d7d1a It replaces that implementation with a more restrictive version that only allows multi-node-multi-writer for volumes of type `block` With this change there are no volume parameters required in the stoarge class, we also fail any attempt to create a file based device with multi-node-multi-write being specified, this way a user doesn't have to wait until they try and do the publish before realizing it doesn't work. --- Makefile | 2 +- examples/README.md | 99 +++++++++++++++++++++++++++++++++++++ pkg/rbd/controllerserver.go | 20 +++++++- pkg/rbd/nodeserver.go | 15 +++++- pkg/rbd/rbd.go | 9 +++- pkg/rbd/rbd_attach.go | 5 ++ pkg/rbd/rbd_util.go | 7 ++- 7 files changed, 152 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 734a761b8..825ed8b66 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ go-test: ./scripts/test-go.sh static-check: - ./scripts/lint-go.sh + ./scripts/lint-go.sh ./scripts/lint-text.sh rbdplugin: diff --git a/examples/README.md b/examples/README.md index d309cdcaf..d27f9777a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -114,3 +114,102 @@ To restore the snapshot to a new PVC, deploy kubectl create -f pvc-restore.yaml kubectl create -f pod-restore.yaml ``` + +## How to test RBD MULTI_NODE_MULTI_WRITER BLOCK feature + +Requires feature-gates: `BlockVolume=true` `CSIBlockVolume=true` + +*NOTE* The MULTI_NODE_MULTI_WRITER capability is only available for +Volumes that are of access_type `block` + +*WARNING* This feature is strictly for workloads that know how to deal +with concurrent access to the Volume (eg Active/Passive applications). +Using RWX modes on non clustered file systems with applications trying +to simultaneously access the Volume will likely result in data corruption! + +Following are examples for issuing a request for a `Block` +`ReadWriteMany` Claim, and using the resultant Claim for a POD + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: block-pvc +spec: + accessModes: + - ReadWriteMany + volumeMode: Block + resources: + requests: + storage: 1Gi + storageClassName: csi-rbd +``` + +Create a POD that uses this PVC: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: my-pod +spec: + containers: + - name: my-container + image: debian + command: ["/bin/bash", "-c"] + args: [ "tail -f /dev/null" ] + volumeDevices: + - devicePath: /dev/rbdblock + name: my-volume + imagePullPolicy: IfNotPresent + volumes: + - name: my-volume + persistentVolumeClaim: + claimName: block-pvc + +``` + +Now, we can create a second POD (ensure the POD is scheduled on a different +node; multiwriter single node works without this feature) that also uses this +PVC at the same time, again wait for the pod to enter running state, and verify +the block device is available. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: another-pod +spec: + containers: + - name: my-container + image: debian + command: ["/bin/bash", "-c"] + args: [ "tail -f /dev/null" ] + volumeDevices: + - devicePath: /dev/rbdblock + name: my-volume + imagePullPolicy: IfNotPresent + volumes: + - name: my-volume + persistentVolumeClaim: + claimName: block-pvc +``` + +Wait for the PODs to enter Running state, check that our block device +is available in the container at `/dev/rdbblock` in both containers: + +```bash +$ kubectl exec -it my-pod -- fdisk -l /dev/rbdblock +Disk /dev/rbdblock: 1 GiB, 1073741824 bytes, 2097152 sectors +Units: sectors of 1 * 512 = 512 bytes +Sector size (logical/physical): 512 bytes / 512 bytes +I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes +``` + +```bash +$ kubectl exec -it another-pod -- fdisk -l /dev/rbdblock +Disk /dev/rbdblock: 1 GiB, 1073741824 bytes, 2097152 sectors +Units: sectors of 1 * 512 = 512 bytes +Sector size (logical/physical): 512 bytes / 512 bytes +I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes +``` diff --git a/pkg/rbd/controllerserver.go b/pkg/rbd/controllerserver.go index ba7cb9092..49af63206 100644 --- a/pkg/rbd/controllerserver.go +++ b/pkg/rbd/controllerserver.go @@ -93,7 +93,25 @@ func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) erro func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) { // TODO (sbezverk) Last check for not exceeding total storage capacity - rbdVol, err := getRBDVolumeOptions(req.GetParameters()) + isMultiNode := false + isBlock := false + for _, cap := range req.VolumeCapabilities { + // Only checking SINGLE_NODE_SINGLE_WRITER here because regardless of the other types (MULTI READER) we need to implement the same logic to ignore the in-use response + if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER { + isMultiNode = true + } + if cap.GetBlock() != nil { + isBlock = true + } + } + + // We want to fail early if the user is trying to create a RWX on a non-block type device + if isMultiNode && !isBlock { + return nil, status.Error(codes.InvalidArgument, "multi node access modes are only supported on rbd `block` type volumes") + } + + // if it's NOT SINGLE_NODE_WRITER and it's BLOCK we'll set the parameter to ignore the in-use checks + rbdVol, err := getRBDVolumeOptions(req.GetParameters(), (isMultiNode && isBlock)) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } diff --git a/pkg/rbd/nodeserver.go b/pkg/rbd/nodeserver.go index 21d7ae829..3a88b456e 100644 --- a/pkg/rbd/nodeserver.go +++ b/pkg/rbd/nodeserver.go @@ -48,6 +48,7 @@ type NodeServer struct { func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { targetPath := req.GetTargetPath() targetPathMutex.LockKey(targetPath) + disableInUseChecks := false defer func() { if err := targetPathMutex.UnlockKey(targetPath); err != nil { @@ -70,7 +71,19 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis if !notMnt { return &csi.NodePublishVolumeResponse{}, nil } - volOptions, err := getRBDVolumeOptions(req.GetVolumeContext()) + + // MULTI_NODE_MULTI_WRITER is supported by default for Block access type volumes + if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { + if isBlock { + disableInUseChecks = true + } else { + klog.Warningf("MULTI_NODE_MULTI_WRITER currently only supported with volumes of access type `block`, invalid AccessMode for volume: %v", req.GetVolumeId()) + e := fmt.Errorf("rbd: MULTI_NODE_MULTI_WRITER access mode only allowed with BLOCK access type") + return nil, status.Error(codes.InvalidArgument, e.Error()) + } + } + + volOptions, err := getRBDVolumeOptions(req.GetVolumeContext(), disableInUseChecks) if err != nil { return nil, err } diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index 73911aec4..3fab59e01 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -102,7 +102,14 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, ca csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, csi.ControllerServiceCapability_RPC_CLONE_VOLUME, }) - r.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER}) + + // We only support the multi-writer option when using block, but it's a supported capability for the plugin in general + // In addition, we want to add the remaining modes like MULTI_NODE_READER_ONLY, + // MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first + // will work those as follow up features + r.cd.AddVolumeCapabilityAccessModes( + []csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}) // Create GRPC servers r.ids = NewIdentityServer(r.cd) diff --git a/pkg/rbd/rbd_attach.go b/pkg/rbd/rbd_attach.go index 354554d12..613cfa765 100644 --- a/pkg/rbd/rbd_attach.go +++ b/pkg/rbd/rbd_attach.go @@ -258,6 +258,7 @@ func attachRBDImage(volOptions *rbdVolume, userID string, credentials map[string Factor: rbdImageWatcherFactor, Steps: rbdImageWatcherSteps, } + err = waitForrbdImage(backoff, volOptions, userID, credentials) if err != nil { @@ -313,6 +314,10 @@ func waitForrbdImage(backoff wait.Backoff, volOptions *rbdVolume, userID string, if err != nil { return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) } + if (volOptions.DisableInUseChecks) && (used) { + klog.V(2).Info("valid multi-node attach requested, ignoring watcher in-use result") + return used, nil + } return !used, nil }) // return error if rbd image has not become available for the specified timeout diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 5f8215496..aa5b19f79 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -51,6 +51,7 @@ type rbdVolume struct { AdminID string `json:"adminId"` UserID string `json:"userId"` Mounter string `json:"mounter"` + DisableInUseChecks bool `json:"disableInUseChecks"` } type rbdSnapshot struct { @@ -226,7 +227,7 @@ func execCommand(command string, args []string) ([]byte, error) { return cmd.CombinedOutput() } -func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) { +func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) { var ok bool rbdVol := &rbdVolume{} rbdVol.Pool, ok = volOptions["pool"] @@ -259,6 +260,10 @@ func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) { } } + + klog.V(3).Infof("setting disableInUseChecks on rbd volume to: %v", disableInUseChecks) + rbdVol.DisableInUseChecks = disableInUseChecks + getCredsFromVol(rbdVol, volOptions) return rbdVol, nil } From 5e43e1fefad275b395cfe08ef5b15cf8ac9a7d61 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 13 Mar 2019 15:21:48 -0400 Subject: [PATCH 58/89] makefile: use a variable to choose "docker" command This change allows the use of alternatives to or wrappers around the normal docker command for container builds. Example 1: make image-rbdplugin CONTAINER_CMD=podman Example 2: CONTAINER_CMD=podman make image-rbdplugin Signed-off-by: John Mulligan --- Makefile | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 825ed8b66..11b1a67fc 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,8 @@ .PHONY: all rbdplugin cephfsplugin +CONTAINER_CMD?=docker + RBD_IMAGE_NAME=$(if $(ENV_RBD_IMAGE_NAME),$(ENV_RBD_IMAGE_NAME),quay.io/cephcsi/rbdplugin) RBD_IMAGE_VERSION=$(if $(ENV_RBD_IMAGE_VERSION),$(ENV_RBD_IMAGE_VERSION),v1.0.0) @@ -40,7 +42,7 @@ rbdplugin: image-rbdplugin: rbdplugin cp _output/rbdplugin deploy/rbd/docker - docker build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker + $(CONTAINER_CMD) build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker cephfsplugin: if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi @@ -48,13 +50,13 @@ cephfsplugin: image-cephfsplugin: cephfsplugin cp _output/cephfsplugin deploy/cephfs/docker - docker build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker + $(CONTAINER_CMD) build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker push-image-rbdplugin: image-rbdplugin - docker push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) + $(CONTAINER_CMD) push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) push-image-cephfsplugin: image-cephfsplugin - docker push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) + $(CONTAINER_CMD) push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) clean: go clean -r -x From 6e7373cd38257d6d002ba29b27e9d6206367599d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 31 Jan 2019 18:03:31 +0100 Subject: [PATCH 59/89] add mergify as a merge engine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From now on, each PR will be merged automatically if: * there is no DNM label on the PR AND * the PR has at least one approuval AND * the travis CI successfully passed Closes: https://github.com/ceph/ceph-csi/issues/154 Signed-off-by: Sébastien Han --- .mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .mergify.yml diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 000000000..7a31c581b --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,13 @@ +pull_request_rules: + - name: automatic merge + conditions: + - label!=DNM + - '#approved-reviews-by>=1' + - 'status-success=continuous-integration/travis-ci/pr' + actions: + merge: + method: rebase + rebase_fallback: merge + strict: smart + dismiss_reviews: {} + delete_head_branch: {} From 33a2fb1b0675cfb482d7e7f7781481d30a549d87 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Tue, 19 Mar 2019 13:30:47 -0400 Subject: [PATCH 60/89] deploy.sh: use a variable to choose "docker" command This change allows the use of alternatives to or wrappers around the normal docker command when running the deploy.sh script. Example: CONTAINER_CMD=podman ./deploy.sh Signed-off-by: John Mulligan --- deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy.sh b/deploy.sh index dff25413b..1cb218d15 100755 --- a/deploy.sh +++ b/deploy.sh @@ -36,7 +36,7 @@ else fi if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then - docker login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io + "${CONTAINER_CMD:-docker}" login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io make push-image-rbdplugin push-image-cephfsplugin set -xe From 52397b4dc4d6f5b45b7d0351401d445036d3ad01 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 18 Mar 2019 12:20:06 +0530 Subject: [PATCH 61/89] rename socket directory to a common name as the socket directory will be created inside the container no need to follow the plugin name in for the directory creation, this will also reduce the code changes if we want to change driver name. Signed-off-by: Madhu Rajanna --- .../kubernetes/csi-cephfsplugin-provisioner.yaml | 8 ++++---- deploy/cephfs/kubernetes/csi-cephfsplugin.yaml | 4 ++-- deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml | 4 ++-- deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml | 12 ++++++------ deploy/rbd/kubernetes/csi-rbdplugin.yaml | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml index 8d515aacc..af5962933 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml @@ -34,11 +34,11 @@ spec: - "--v=5" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi-provisioner.sock + value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com + mountPath: /csi - name: csi-cephfsplugin securityContext: privileged: true @@ -61,11 +61,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi-provisioner.sock + value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com + mountPath: /csi - name: host-sys mountPath: /sys - name: lib-modules diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index 0452efa3d..af4322fee 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -65,11 +65,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock + value: unix:///csi/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: plugin-dir - mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com + mountPath: /csi - name: csi-plugins-dir mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi mountPropagation: "Bidirectional" diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml index a328f105f..81029b733 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-attacher.yaml @@ -34,11 +34,11 @@ spec: - "--csi-address=$(ADDRESS)" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock + value: unix:///csi/csi-attacher.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com + mountPath: /csi volumes: - name: socket-dir hostPath: diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml index 61ab47495..aef25c04c 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml @@ -34,11 +34,11 @@ spec: - "--v=5" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi-provisioner.sock + value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com + mountPath: /csi - name: csi-snapshotter image: quay.io/k8scsi/csi-snapshotter:v1.0.1 args: @@ -47,13 +47,13 @@ spec: - "--v=5" env: - name: ADDRESS - value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi-provisioner.sock + value: unix:///csi/csi-provisioner.sock imagePullPolicy: Always securityContext: privileged: true volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com + mountPath: /csi - name: csi-rbdplugin securityContext: privileged: true @@ -79,11 +79,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins/rbd.csi.ceph.com/csi-provisioner.sock + value: unix:///csi/csi-provisioner.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir - mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com + mountPath: /csi - mountPath: /dev name: host-dev - mountPath: /rootfs diff --git a/deploy/rbd/kubernetes/csi-rbdplugin.yaml b/deploy/rbd/kubernetes/csi-rbdplugin.yaml index dbe7539bc..22fe06f3f 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin.yaml @@ -69,11 +69,11 @@ spec: fieldRef: fieldPath: metadata.namespace - name: CSI_ENDPOINT - value: unix://var/lib/kubelet/plugins_registry/rbd.csi.ceph.com/csi.sock + value: unix:///csi/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: - name: plugin-dir - mountPath: /var/lib/kubelet/plugins_registry/rbd.csi.ceph.com + mountPath: /csi - name: pods-mount-dir mountPath: /var/lib/kubelet/pods mountPropagation: "Bidirectional" From 593b357e1d27cbfff467464fa589342feffa0ace Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Fri, 22 Mar 2019 10:57:56 +0530 Subject: [PATCH 62/89] Fix yaml lint error in mergify Signed-off-by: Madhu Rajanna --- .mergify.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.mergify.yml b/.mergify.yml index 7a31c581b..d74b5199e 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,3 +1,4 @@ +--- pull_request_rules: - name: automatic merge conditions: From b318964af5a361098d8cb455abda45ce02e63f57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Mon, 25 Mar 2019 22:47:39 +0800 Subject: [PATCH 63/89] issue #91 issue #217 Goal we try to solve when csi exit unexpect, the pod use cephfs pv can not auto recovery because lost mount relation until pod be killed and reschedule to other node. i think this is may be a problem. may be csi plugin can do more thing to remount the old path so when pod may be auto recovery when pod exit and restart, the old mount path can use. NoGoal Pod should exit and restart when csi plugin pod exit and mount point lost. if pod not exit will get error of **transport endpoint is not connected**. implment logic csi-plugin start: 1. load all MountCachEntry from node local dir 2. check if volID exist in cluster, if no we ignore this entry, if yes continue 3. check if stagingPath exist, if yes we mount the path 4. check if all targetPath exist, if yes we binmount to staging path NodeServer: 1. NodeStageVolume: add MountCachEntry on local dir include readonly attr and ceph secret 2. NodeStagePublishVolume: add pod bind mount path to MountCachEntry and persist local dir 3. NodeStageunPublishVolume: remove pod bind mount path From MountCachEntry and persist local dir 4. NodeStageunStageVolume: remove MountCachEntry from local dir --- pkg/cephfs/driver.go | 4 + pkg/cephfs/mountcache.go | 314 ++++++++++++++++++++++++++++++++++ pkg/cephfs/mountcache_test.go | 38 ++++ pkg/cephfs/nodeserver.go | 17 ++ pkg/util/cachepersister.go | 1 + pkg/util/nodecache.go | 29 +++- 6 files changed, 395 insertions(+), 8 deletions(-) create mode 100644 pkg/cephfs/mountcache.go create mode 100644 pkg/cephfs/mountcache_test.go diff --git a/pkg/cephfs/driver.go b/pkg/cephfs/driver.go index b2272e853..ee7b446b8 100644 --- a/pkg/cephfs/driver.go +++ b/pkg/cephfs/driver.go @@ -105,6 +105,10 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter string, cacheP klog.Fatalf("failed to write ceph configuration file: %v", err) } + if err := remountHisMountedPath(driverName, version, nodeID, cachePersister); err != nil { + klog.Warningf("failed to remounted history mounted path: %v", err) + //ignore remount fail + } // Initialize default library driver fs.cd = csicommon.NewCSIDriver(driverName, version, nodeID) diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go new file mode 100644 index 000000000..2f7a0a5c6 --- /dev/null +++ b/pkg/cephfs/mountcache.go @@ -0,0 +1,314 @@ +package cephfs + +import ( + "encoding/base64" + "os" + "sync" + "syscall" + "time" + + "github.com/ceph/ceph-csi/pkg/util" + "github.com/pkg/errors" + "k8s.io/klog" +) + +type volumeMountEntry struct { + NodeID string `json:"nodeID"` + DriverName string `json:"driverName"` + DriverVersion string `json:"driverVersion"` + + Namespace string `json:"namespace"` + + VolumeID string `json:"volumeID"` + Secrets map[string]string `json:"secrets"` + StagingPath string `json:"stagingPath"` + TargetPaths map[string]bool `json:"targetPaths"` + CreateTime time.Time `json:"createTime"` + LastMountTime time.Time `json:"lastMountTime"` + LoadCount uint64 `json:"loadCount"` +} + +type volumeMountCacheMap struct { + DriverName string + DriverVersion string + NodeID string + MountFailNum int64 + MountSuccNum int64 + Volumes map[string]volumeMountEntry + NodeCacheStore util.NodeCache + MetadataStore util.CachePersister +} + +var ( + csiPersistentVolumeRoot = "/var/lib/kubelet/plugins/kubernetes.io/csi" + volumeMountCachePrefix = "cephfs-mount-cache-" + volumeMountCache volumeMountCacheMap + volumeMountCacheMtx sync.Mutex +) + +func remountHisMountedPath(name string, v string, nodeID string, cachePersister util.CachePersister) error { + volumeMountCache.Volumes = make(map[string]volumeMountEntry) + volumeMountCache.NodeID = nodeID + volumeMountCache.DriverName = name + volumeMountCache.DriverVersion = v + volumeMountCache.MountSuccNum = 0 + volumeMountCache.MountFailNum = 0 + + volumeMountCache.MetadataStore = cachePersister + + volumeMountCache.NodeCacheStore.BasePath = PluginFolder + volumeMountCache.NodeCacheStore.CacheDir = "volumes-mount-cache" + + if _, err := os.Stat(csiPersistentVolumeRoot); err != nil { + klog.Infof("mount-cache: csi pv root path %s stat fail %v, may not in daemonset csi plugin, exit", csiPersistentVolumeRoot, err) + return err + } + + if err := os.MkdirAll(volumeMountCache.NodeCacheStore.BasePath, 0755); err != nil { + klog.Fatalf("mount-cache: failed to create %s: %v", volumeMountCache.NodeCacheStore.BasePath, err) + return err + } + me := &volumeMountEntry{} + ce := &controllerCacheEntry{} + err := volumeMountCache.NodeCacheStore.ForAll(volumeMountCachePrefix, me, func(identifier string) error { + volID := me.VolumeID + klog.Infof("mount-cache: load %v", me) + if err := volumeMountCache.MetadataStore.Get(volID, ce); err != nil { + if err, ok := err.(*util.CacheEntryNotFound); ok { + klog.Infof("cephfs: metadata for volume %s not found, assuming the volume to be already deleted (%v)", volID, err) + if err := volumeMountCache.NodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { + klog.Infof("mount-cache: metadata nofound, delete volume cache entry for volume %s", volID) + } + } + } else { + if err := mountOneCacheEntry(ce, me); err == nil { + volumeMountCache.MountSuccNum++ + volumeMountCache.Volumes[me.VolumeID] = *me + } else { + volumeMountCache.MountFailNum++ + } + } + return nil + }) + if err != nil { + klog.Infof("mount-cache: metastore list cache fail %v", err) + return err + } + if volumeMountCache.MountFailNum > volumeMountCache.MountSuccNum { + return errors.New("mount-cache: too many volumes mount fail") + } + klog.Infof("mount-cache: succ remount %d volumes, fail remount %d volumes", volumeMountCache.MountSuccNum, volumeMountCache.MountFailNum) + return nil +} + +func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountEntry) error { + volumeMountCacheMtx.Lock() + defer volumeMountCacheMtx.Unlock() + + var err error + volID := ce.VolumeID + volOptions := ce.VolOptions + + adminCr, err := getAdminCredentials(decodeCredentials(me.Secrets)) + if err != nil { + return err + } + entity, err := getCephUser(&volOptions, adminCr, volID) + if err != nil { + klog.Infof("mount-cache: failed to get ceph user: %s %v", volID, me.StagingPath) + } + cr := entity.toCredentials() + + if volOptions.ProvisionVolume { + volOptions.RootPath = getVolumeRootPathCeph(volID) + } + + err = cleanupMountPoint(me.StagingPath) + if err != nil { + klog.Infof("mount-cache: failed to cleanup volume mount point %s, remove it: %s %v", volID, me.StagingPath, err) + return err + } + + isMnt, err := isMountPoint(me.StagingPath) + if err != nil { + isMnt = false + klog.Infof("mount-cache: failed to check volume mounted %s: %s %v", volID, me.StagingPath, err) + } + + if !isMnt { + m, err := newMounter(&volOptions) + if err != nil { + klog.Errorf("mount-cache: failed to create mounter for volume %s: %v", volID, err) + return err + } + if err := m.mount(me.StagingPath, cr, &volOptions); err != nil { + klog.Errorf("mount-cache: failed to mount volume %s: %v", volID, err) + return err + } + } + for targetPath, readOnly := range me.TargetPaths { + if err := cleanupMountPoint(targetPath); err == nil { + if err := bindMount(me.StagingPath, targetPath, readOnly); err != nil { + klog.Errorf("mount-cache: failed to bind-mount volume %s: %s %s %v %v", + volID, me.StagingPath, targetPath, readOnly, err) + } else { + klog.Infof("mount-cache: succ bind-mount volume %s: %s %s %v", + volID, me.StagingPath, targetPath, readOnly) + } + } + } + return nil +} + +func cleanupMountPoint(mountPoint string) error { + if _, err := os.Stat(mountPoint); err != nil { + if IsCorruptedMnt(err) { + klog.Infof("mount-cache: corrupted mount point %s, need unmount", mountPoint) + err := execCommandErr("umount", mountPoint) + if err != nil { + klog.Infof("mount-cache: unmount %s fail %v", mountPoint, err) + //ignore error return err + } + } + } + if _, err := os.Stat(mountPoint); err != nil { + klog.Errorf("mount-cache: mount point %s stat fail %v", mountPoint, err) + return err + } + return nil +} + +func IsCorruptedMnt(err error) bool { + if err == nil { + return false + } + var underlyingError error + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + underlyingError = pe.Err + case *os.LinkError: + underlyingError = pe.Err + case *os.SyscallError: + underlyingError = pe.Err + } + + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES +} + +func genVolumeMountCacheFileName(volID string) string { + cachePath := volumeMountCachePrefix + volID + return cachePath +} + +func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath string, secrets map[string]string) error { + volumeMountCacheMtx.Lock() + defer volumeMountCacheMtx.Unlock() + + lastTargetPaths := make(map[string]bool) + me, ok := volumeMountCache.Volumes[volID] + if ok { + if me.StagingPath == stagingTargetPath { + klog.Infof("mount-cache: node stage volume last cache entry for volume %s stagingTargetPath %s no equal %s", + volID, me.StagingPath, stagingTargetPath) + return nil + } + lastTargetPaths = me.TargetPaths + klog.Warningf("mount-cache: node stage volume ignore last cache entry for volume %s", volID) + } + + me = volumeMountEntry{NodeID: mc.NodeID, DriverName: mc.DriverName, DriverVersion: mc.DriverVersion} + + me.VolumeID = volID + me.Secrets = encodeCredentials(secrets) + me.StagingPath = stagingTargetPath + me.TargetPaths = lastTargetPaths + + curTime := time.Now() + me.CreateTime = curTime + me.CreateTime = curTime + me.LoadCount = 0 + volumeMountCache.Volumes[volID] = me + if err := mc.NodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { + klog.Errorf("mount-cache: node stage volume failed to store a cache entry for volume %s: %v", volID, err) + return err + } + klog.Infof("mount-cache: node stage volume succ to store a cache entry for volume %s: %v", volID, me) + return nil +} + +func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string, stagingTargetPath string) error { + volumeMountCacheMtx.Lock() + defer volumeMountCacheMtx.Unlock() + delete(volumeMountCache.Volumes, volID) + if err := mc.NodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err != nil { + klog.Infof("mount-cache: node unstage volume failed to delete cache entry for volume %s: %s %v", volID, stagingTargetPath, err) + return err + } + return nil +} + +func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string, readOnly bool) error { + volumeMountCacheMtx.Lock() + defer volumeMountCacheMtx.Unlock() + + _, ok := volumeMountCache.Volumes[volID] + if !ok { + klog.Errorf("mount-cache: node publish volume failed to find cache entry for volume %s", volID) + return errors.New("mount-cache: node publish volume failed to find cache entry for volume") + } + volumeMountCache.Volumes[volID].TargetPaths[targetPath] = readOnly + me := volumeMountCache.Volumes[volID] + if err := mc.NodeCacheStore.Update(genVolumeMountCacheFileName(volID), me); err != nil { + klog.Errorf("mount-cache: node publish volume failed to store a cache entry for volume %s: %v", volID, err) + return err + } + return nil +} + +func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath string) error { + volumeMountCacheMtx.Lock() + defer volumeMountCacheMtx.Unlock() + + _, ok := volumeMountCache.Volumes[volID] + if !ok { + klog.Errorf("mount-cache: node unpublish volume failed to find cache entry for volume %s", volID) + return errors.New("mount-cache: node unpublish volume failed to find cache entry for volume") + } + delete(volumeMountCache.Volumes[volID].TargetPaths, targetPath) + me := volumeMountCache.Volumes[volID] + if err := mc.NodeCacheStore.Update(genVolumeMountCacheFileName(volID), me); err != nil { + klog.Errorf("mount-cache: node unpublish volume failed to store a cache entry for volume %s: %v", volID, err) + return err + } + return nil +} + +func encodeCredentials(input map[string]string) (output map[string]string) { + output = make(map[string]string) + for key, value := range input { + nKey := base64.StdEncoding.EncodeToString([]byte(key)) + nValue := base64.StdEncoding.EncodeToString([]byte(value)) + output[nKey] = nValue + } + return output +} + +func decodeCredentials(input map[string]string) (output map[string]string) { + output = make(map[string]string) + for key, value := range input { + nKey, err := base64.StdEncoding.DecodeString(key) + if err != nil { + klog.Errorf("mount-cache: decode secret fail") + continue + } + nValue, err := base64.StdEncoding.DecodeString(value) + if err != nil { + klog.Errorf("mount-cache: decode secret fail") + continue + } + output[string(nKey)] = string(nValue) + } + return output +} diff --git a/pkg/cephfs/mountcache_test.go b/pkg/cephfs/mountcache_test.go new file mode 100644 index 000000000..6bba59c55 --- /dev/null +++ b/pkg/cephfs/mountcache_test.go @@ -0,0 +1,38 @@ +package cephfs + +import ( + "testing" +) + +func init() { +} + +func TestMountOneCacheEntry(t *testing.T) { +} + +func TestRemountHisMountedPath(t *testing.T) { +} + +func TestNodeStageVolume(t *testing.T) { +} + +func TestNodeUnStageVolume(t *testing.T) { +} + +func TestNodePublishVolume(t *testing.T) { +} + +func TestNodeUnpublishVolume(t *testing.T) { +} + +func TestEncodeDecodeCredentials(t *testing.T) { + secrets := make(map[string]string) + secrets["user_1"] = "value_1" + enSecrets := encodeCredentials(secrets) + deSecrets := decodeCredentials(enSecrets) + for key, value := range secrets { + if deSecrets[key] != value { + t.Errorf("key %s value %s not equal %s after encode decode", key, value, deSecrets[key]) + } + } +} diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index 51c44933a..345e4904d 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -154,6 +154,9 @@ func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequ klog.Errorf("failed to mount volume %s: %v", volID, err) return status.Error(codes.Internal, err.Error()) } + if err := volumeMountCache.nodeStageVolume(req.GetVolumeId(), stagingTargetPath, req.GetSecrets()); err != nil { + klog.Warningf("mount-cache: failed stage volume %s %s: %v", volID, stagingTargetPath, err) + } return nil } @@ -195,6 +198,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis return nil, status.Error(codes.Internal, err.Error()) } + if err := volumeMountCache.nodePublishVolume(volID, targetPath, req.GetReadonly()); err != nil { + klog.Warningf("mount-cache: failed publish volume %s %s: %v", volID, targetPath, err) + } + klog.Infof("cephfs: successfully bind-mounted volume %s to %s", volID, targetPath) return &csi.NodePublishVolumeResponse{}, nil @@ -209,6 +216,11 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu targetPath := req.GetTargetPath() + volID := req.GetVolumeId() + if err = volumeMountCache.nodeUnPublishVolume(volID, targetPath); err != nil { + klog.Warningf("mount-cache: failed unpublish volume %s %s: %v", volID, targetPath, err) + } + // Unmount the bind-mount if err = unmountVolume(targetPath); err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -232,6 +244,11 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag stagingTargetPath := req.GetStagingTargetPath() + volID := req.GetVolumeId() + if err = volumeMountCache.nodeUnStageVolume(volID, stagingTargetPath); err != nil { + klog.Warningf("mount-cache: failed unstage volume %s %s: %v", volID, stagingTargetPath, err) + } + // Unmount the volume if err = unmountVolume(stagingTargetPath); err != nil { return nil, status.Error(codes.Internal, err.Error()) diff --git a/pkg/util/cachepersister.go b/pkg/util/cachepersister.go index ba8918587..4faf6366c 100644 --- a/pkg/util/cachepersister.go +++ b/pkg/util/cachepersister.go @@ -56,6 +56,7 @@ func NewCachePersister(metadataStore, driverName string) (CachePersister, error) klog.Infof("cache-persister: using node as metadata cache persister") nc := &NodeCache{} nc.BasePath = PluginFolder + "/" + driverName + nc.CacheDir = "controller" return nc, nil } return nil, errors.New("cache-persister: couldn't parse metadatastorage flag") diff --git a/pkg/util/nodecache.go b/pkg/util/nodecache.go index 5659d4eaa..86278f4d2 100644 --- a/pkg/util/nodecache.go +++ b/pkg/util/nodecache.go @@ -32,10 +32,9 @@ import ( // NodeCache to store metadata type NodeCache struct { BasePath string + CacheDir string } -var cacheDir = "controller" - var errDec = errors.New("file not found") // EnsureCacheDirectory creates cache directory if not present @@ -52,15 +51,15 @@ func (nc *NodeCache) EnsureCacheDirectory(cacheDir string) error { //ForAll list the metadata in Nodecache and filters outs based on the pattern func (nc *NodeCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) error { - err := nc.EnsureCacheDirectory(cacheDir) + err := nc.EnsureCacheDirectory(nc.CacheDir) if err != nil { return errors.Wrap(err, "node-cache: couldn't ensure cache directory exists") } - files, err := ioutil.ReadDir(path.Join(nc.BasePath, cacheDir)) + files, err := ioutil.ReadDir(path.Join(nc.BasePath, nc.CacheDir)) if err != nil { return errors.Wrapf(err, "node-cache: failed to read %s folder", nc.BasePath) } - path := path.Join(nc.BasePath, cacheDir) + path := path.Join(nc.BasePath, nc.CacheDir) for _, file := range files { err = decodeObj(path, pattern, file, destObj) if err == errDec { @@ -102,9 +101,23 @@ func decodeObj(filepath, pattern string, file os.FileInfo, destObj interface{}) } +func (nc *NodeCache) Update(identifier string, data interface{}) error { + file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json") + identifierTmp := identifier + ".creating" + fileTmp := path.Join(nc.BasePath, nc.CacheDir, identifierTmp+".json") + os.Remove(fileTmp) + if err := nc.Create(identifierTmp, data); err != nil { + return errors.Wrapf(err, "node-cache: failed to create metadata storage file %s\n", file) + } + if err := os.Rename(fileTmp, file); err != nil { + return errors.Wrapf(err, "node-cache: couldn't rename %s as %s", fileTmp, file) + } + return nil +} + // Create creates the metadata file in cache directory with identifier name func (nc *NodeCache) Create(identifier string, data interface{}) error { - file := path.Join(nc.BasePath, cacheDir, identifier+".json") + file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json") fp, err := os.Create(file) if err != nil { return errors.Wrapf(err, "node-cache: failed to create metadata storage file %s\n", file) @@ -126,7 +139,7 @@ func (nc *NodeCache) Create(identifier string, data interface{}) error { // Get retrieves the metadata from cache directory with identifier name func (nc *NodeCache) Get(identifier string, data interface{}) error { - file := path.Join(nc.BasePath, cacheDir, identifier+".json") + file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json") // #nosec fp, err := os.Open(file) if err != nil { @@ -153,7 +166,7 @@ func (nc *NodeCache) Get(identifier string, data interface{}) error { // Delete deletes the metadata file from cache directory with identifier name func (nc *NodeCache) Delete(identifier string) error { - file := path.Join(nc.BasePath, cacheDir, identifier+".json") + file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json") err := os.Remove(file) if err != nil { if err == os.ErrNotExist { From 97f8c4b6774346a11124fd3a272c74b1b78e9e6a Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Sat, 2 Mar 2019 12:29:52 -0500 Subject: [PATCH 64/89] Provide options to pass in Ceph cluster-id This commit provides the option to pass in Ceph cluster-id instead of a MON list from the storage class. This helps in moving towards a stateless CSI implementation. Tested the following, - PV provisioning and staging using cluster-id in storage class - PV provisioning and staging using MON list in storage class Did not test, - snapshot operations in either forms of the storage class Signed-off-by: ShyamsundarR --- cmd/rbd/main.go | 3 +- docs/deploy-rbd.md | 10 +- examples/README.md | 36 ++ examples/rbd/snapshotclass.yaml | 6 + examples/rbd/storageclass.yaml | 5 +- .../rbd/template-ceph-cluster-ID-config.yaml | 22 ++ ...te-ceph-cluster-ID-provisioner-secret.yaml | 19 + ...mplate-ceph-cluster-ID-publish-secret.yaml | 19 + .../rbd/template-csi-rbdplugin-patch.yaml | 33 ++ ...plate-csi-rbdplugin-provisioner-patch.yaml | 33 ++ pkg/rbd/rbd.go | 8 +- pkg/rbd/rbd_attach.go | 2 +- pkg/rbd/rbd_util.go | 154 +++++--- pkg/util/fileconfig.go | 257 +++++++++++++ pkg/util/fileconfig_test.go | 338 ++++++++++++++++++ 15 files changed, 897 insertions(+), 48 deletions(-) create mode 100644 examples/rbd/template-ceph-cluster-ID-config.yaml create mode 100644 examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml create mode 100644 examples/rbd/template-ceph-cluster-ID-publish-secret.yaml create mode 100644 examples/rbd/template-csi-rbdplugin-patch.yaml create mode 100644 examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml create mode 100644 pkg/util/fileconfig.go create mode 100644 pkg/util/fileconfig_test.go diff --git a/cmd/rbd/main.go b/cmd/rbd/main.go index 35cf08390..cd78b6fc4 100644 --- a/cmd/rbd/main.go +++ b/cmd/rbd/main.go @@ -31,6 +31,7 @@ var ( nodeID = flag.String("nodeid", "", "node id") containerized = flag.Bool("containerized", true, "whether run as containerized") metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") + configRoot = flag.String("configroot", "/etc", "Directory under which Ceph CSI configuration files will be present") ) func init() { @@ -56,7 +57,7 @@ func main() { } driver := rbd.NewDriver() - driver.Run(*driverName, *nodeID, *endpoint, *containerized, cp) + driver.Run(*driverName, *nodeID, *endpoint, *containerized, *configRoot, cp) os.Exit(0) } diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index a2f40a439..aef0ff32c 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -50,8 +50,9 @@ the configmaps to be stored Parameter | Required | Description --------- | -------- | ----------- -`monitors` | one of `monitors` and `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) -`monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. +`monitors` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) +`monValueFromSecret` | one of `monitors`, `clusterID` or and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. +`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Value of Ceph cluster fsid, into which RBD images shall be created (e.g. `4ae5ae3d-ebfb-4150-bfc8-798970f4e3d9`) `pool` | yes | Ceph pool into which the RBD image shall be created `imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format) `imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature) @@ -65,6 +66,11 @@ Admin credentials are required for provisioning new RBD images `ADMIN_NAME`: `ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the client with admin privileges, and the value is its password +If clusterID is specified, then a pair of secrets are required, with keys named +`subjectid` and `credentials`. Where, `subjectid` is the name of the client +with admin privileges and `credentials` contain its password. The pair required +are provisioner and publish secrets, and should contain the same value. + ## Deployment with Kubernetes Requires Kubernetes 1.11 diff --git a/examples/README.md b/examples/README.md index 956f666b9..f295a3cbf 100644 --- a/examples/README.md +++ b/examples/README.md @@ -12,6 +12,11 @@ Once the plugin is successfully deployed, you'll need to customize setup. Please consult the documentation for info about available parameters. +**NOTE:** See section +[Cluster ID based configuration](#cluster-id-based-configuration) if using +the `clusterID` instead of `monitors` or `monValueFromSecret` options in the +storage class for RBD based provisioning before proceeding. + After configuring the secrets, monitors, etc. you can deploy a testing Pod mounting a RBD image / CephFS volume: @@ -213,3 +218,34 @@ Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes ``` + +## Cluster ID based configuration + +Before creating a storage class that uses the option `clusterID` to refer to a +Ceph cluster, + +**NOTE**: Substitute the output of `ceph fsid` instead of `` in + the mentioned template YAML files, and also the Ceph admin ID and + credentials in their respective options. Further, update options like + `monitors` and `pools` in the respective YAML files to contain the + appropriate information. + +Create the following config maps and secrets + +* `kubectl create -f ./rbd/template-ceph-cluster-ID-provisioner-secret.yaml` +* `kubectl create -f ./rbd/template-ceph-cluster-ID-publish-secret.yaml` +* `kubectl create -f ./rbd/template-ceph-cluster-ID-config.yaml` + +Modify the deployed CSI pods to additionally pass in the config maps and +secrets as volumes, + +* `kubectl patch daemonset csi-rbdplugin --patch "$(cat ./rbd/template-csi-rbdplugin-patch.yaml)"` +* `kubectl patch statefulset csi-rbdplugin-provisioner --patch "$(cat ./rbd/template-csi-rbdplugin-provisioner-patch.yaml)"` + +Restart the provisioner and node plugin daemonset. + +Storage class and snapshot class, using the `` as the value for + the option `clusterID`, can now be created on the cluster. + +Remaining steps to test functionality remains the same as mentioned in the +sections above. diff --git a/examples/rbd/snapshotclass.yaml b/examples/rbd/snapshotclass.yaml index 5e3b332f4..a85e707b3 100644 --- a/examples/rbd/snapshotclass.yaml +++ b/examples/rbd/snapshotclass.yaml @@ -6,6 +6,12 @@ metadata: snapshotter: rbd.csi.ceph.com parameters: pool: rbd + # Comma separated list of Ceph monitors + # if using FQDN, make sure csi plugin's dns policy is appropriate. monitors: mon1:port,mon2:port,... + # OR, + # Ceph cluster fsid, of the cluster to provision storage from + # clusterID: + csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret csi.storage.k8s.io/snapshotter-secret-namespace: default diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index 75e95f3af..d77a39ba9 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -8,7 +8,10 @@ parameters: # Comma separated list of Ceph monitors # if using FQDN, make sure csi plugin's dns policy is appropriate. monitors: mon1:port,mon2:port,... - + # OR, + # Ceph cluster fsid, of the cluster to provision storage from + # clusterID: + # OR, # if "monitors" parameter is not set, driver to get monitors from same # secret as admin/user credentials. "monValueFromSecret" provides the # key in the secret whose value is the mons diff --git a/examples/rbd/template-ceph-cluster-ID-config.yaml b/examples/rbd/template-ceph-cluster-ID-config.yaml new file mode 100644 index 000000000..c859f22ee --- /dev/null +++ b/examples/rbd/template-ceph-cluster-ID-config.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ceph-cluster- + namespace: default +data: + cluster-config: | + { + "version": 1, + "cluster-config": { + "cluster-fsid": "", + "monitors": [ + "", + "" + ], + "pools": [ + "", + "" + ] + } + } diff --git a/examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml b/examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml new file mode 100644 index 000000000..707307596 --- /dev/null +++ b/examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + # The is used by the CSI plugin to uniquely identify and use a + # Ceph cluster, hence the value MUST match the output of the following + # command. + # - Output of: `ceph fsid` + name: ceph-cluster--provisioner-secret + namespace: default +data: + # Base64 encoded ID of the admin name + # - Typically output of: `echo -n "" | base64` + # Substitute the entire string including angle braces, with the base64 value + subjectid: + # Credentials of the above admin/user + # - Output of: `ceph auth get-key client.admin | base64` + # Substitute the entire string including angle braces, with the base64 value + credentials: diff --git a/examples/rbd/template-ceph-cluster-ID-publish-secret.yaml b/examples/rbd/template-ceph-cluster-ID-publish-secret.yaml new file mode 100644 index 000000000..ca31c0917 --- /dev/null +++ b/examples/rbd/template-ceph-cluster-ID-publish-secret.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + # The is used by the CSI plugin to uniquely identify and use a + # Ceph cluster, hence the value MUST match the output of the following + # command. + # - Output of: `ceph fsid` + name: ceph-cluster--publish-secret + namespace: default +data: + # Base64 encoded ID of the admin name + # - Typically output of: `echo -n "" | base64` + # Substitute the entire string including angle braces, with the base64 value + subjectid: + # Credentials of the above admin/user + # - Output of: `ceph auth get-key client.admin | base64` + # Substitute the entire string including angle braces, with the base64 value + credentials: diff --git a/examples/rbd/template-csi-rbdplugin-patch.yaml b/examples/rbd/template-csi-rbdplugin-patch.yaml new file mode 100644 index 000000000..016f8c711 --- /dev/null +++ b/examples/rbd/template-csi-rbdplugin-patch.yaml @@ -0,0 +1,33 @@ +--- +# This is a patch to the existing daemonset deployment of CSI rbdplugin. +# This is to be used when adding a new Ceph cluster to the CSI plugin. +# NOTE: Update csi-rbdplugin-provisioner StatefulSet as well with similar patch +# Post substituting the in all places execute, +# `kubectl patch daemonset csi-rbdplugin --patch\ +# "$(cat template-csi-rbdplugin-patch.yaml)"` +# to patch the statefulset deployment. +spec: + template: + spec: + containers: + - name: csi-rbdplugin + volumeMounts: + - name: provisioner-secret- + mountPath: "/etc/ceph-cluster--provisioner-secret" + readOnly: true + - name: publish-secret- + mountPath: "/etc/ceph-cluster--publish-secret" + readOnly: true + - name: ceph-cluster- + mountPath: "/etc/ceph-cluster-/" + readOnly: true + volumes: + - name: provisioner-secret- + secret: + secretName: ceph-cluster--provisioner-secret + - name: publish-secret- + secret: + secretName: ceph-cluster--publish-secret + - name: ceph-cluster- + configMap: + name: ceph-cluster- diff --git a/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml b/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml new file mode 100644 index 000000000..083f14d2c --- /dev/null +++ b/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml @@ -0,0 +1,33 @@ +--- +# This is a patch to the existing statefulset deployment of CSI rbdplugin. +# This is to be used when adding a new Ceph cluster to the CSI plugin. +# NOTE: Update csi-rbdplugin DaemonSet as well with similar patch +# Post substituting the in all places execute, +# `kubectl patch statefulset csi-rbdplugin-provisioner --patch\ +# "$(cat template-csi-rbdplugin-provisioner-patch.yaml)"` +# to patch the statefulset deployment. +spec: + template: + spec: + containers: + - name: csi-rbdplugin + volumeMounts: + - name: provisioner-secret- + mountPath: "/etc/ceph-cluster--provisioner-secret" + readOnly: true + - name: publish-secret- + mountPath: "/etc/ceph-cluster--publish-secret" + readOnly: true + - name: ceph-cluster- + mountPath: "/etc/ceph-cluster-/" + readOnly: true + volumes: + - name: provisioner-secret- + secret: + secretName: ceph-cluster--provisioner-secret + - name: publish-secret- + secret: + secretName: ceph-cluster--publish-secret + - name: ceph-cluster- + configMap: + name: ceph-cluster- diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index 3962a99f4..62983fdb2 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -47,6 +47,9 @@ type Driver struct { var ( version = "1.0.0" + // Fc is the global file config type, and stores the top level directory + // under which rest of the Ceph config files can be found + Fc util.FileConfig ) // NewDriver returns new rbd driver @@ -87,10 +90,13 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, err // Run start a non-blocking grpc controller,node and identityserver for // rbd CSI driver which can serve multiple parallel requests -func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, cachePersister util.CachePersister) { +func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, configroot string, cachePersister util.CachePersister) { var err error klog.Infof("Driver: %v version: %v", driverName, version) + // Initialize fileconfig base path + Fc.BasePath = configroot + // Initialize default library driver r.cd = csicommon.NewCSIDriver(driverName, version, nodeID) if r.cd == nil { diff --git a/pkg/rbd/rbd_attach.go b/pkg/rbd/rbd_attach.go index 613cfa765..c187af562 100644 --- a/pkg/rbd/rbd_attach.go +++ b/pkg/rbd/rbd_attach.go @@ -280,7 +280,7 @@ func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (stri } klog.V(5).Infof("rbd: map mon %s", mon) - key, err := getRBDKey(userID, creds) + key, err := getRBDKey(volOpt.FsID, userID, creds) if err != nil { return "", err } diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index aa5b19f79..5ff7c5104 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -52,6 +52,7 @@ type rbdVolume struct { UserID string `json:"userId"` Mounter string `json:"mounter"` DisableInUseChecks bool `json:"disableInUseChecks"` + FsID string `json:"fsid"` } type rbdSnapshot struct { @@ -66,6 +67,7 @@ type rbdSnapshot struct { SizeBytes int64 `json:"sizeBytes"` AdminID string `json:"adminId"` UserID string `json:"userId"` + FsID string `json:"fsid"` } var ( @@ -85,12 +87,23 @@ var ( supportedFeatures = sets.NewString("layering") ) -func getRBDKey(id string, credentials map[string]string) (string, error) { - - if key, ok := credentials[id]; ok { - return key, nil +func getRBDKey(fsid string, id string, credentials map[string]string) (string, error) { + var ok bool + var err error + var key string + if key, ok = credentials[id]; !ok { + if fsid != "" { + key, err = Fc.GetCredentialForSubject(fsid, id) + if err != nil { + klog.Errorf("failed getting credentials (%s)", err) + return "", fmt.Errorf("RBD key for ID: %s not found in config store", id) + } + } else { + return "", fmt.Errorf("RBD key for ID: %s not found", id) + } } - return "", fmt.Errorf("RBD key for ID: %s not found", id) + + return key, nil } func getMon(pOpts *rbdVolume, credentials map[string]string) (string, error) { @@ -123,7 +136,7 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map image := pOpts.VolName volSzMiB := fmt.Sprintf("%dM", volSz) - key, err := getRBDKey(adminID, credentials) + key, err := getRBDKey(pOpts.FsID, adminID, credentials) if err != nil { return err } @@ -154,7 +167,7 @@ func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) ( image := pOpts.VolName // If we don't have admin id/secret (e.g. attaching), fallback to user id/secret. - key, err := getRBDKey(userID, credentials) + key, err := getRBDKey(pOpts.FsID, userID, credentials) if err != nil { return false, "", err } @@ -202,7 +215,7 @@ func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]str klog.Info("rbd is still being used ", image) return fmt.Errorf("rbd %s is still being used", image) } - key, err := getRBDKey(adminID, credentials) + key, err := getRBDKey(pOpts.FsID, adminID, credentials) if err != nil { return err } @@ -227,24 +240,79 @@ func execCommand(command string, args []string) ([]byte, error) { return cmd.CombinedOutput() } +func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret string, noerr error) { + var err error + var ok bool + + monitors, ok = options["monitors"] + if !ok { + // if mons are not set in options, check if they are set in secret + if monInSecret, ok = options["monValueFromSecret"]; !ok { + // if mons are not in secret, check if we have a cluster-fsid + if fsID, ok = options["clusterID"]; !ok { + return "", "", "", fmt.Errorf("either monitors or monValueFromSecret or clusterID must be set") + } + if monitors, err = Fc.GetMons(fsID); err != nil { + klog.Errorf("failed getting mons (%s)", err) + return "", "", "", fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID) + } + } + } + + return +} + +func getIDs(options map[string]string, fsID string) (adminID, userID string, noerr error) { + var err error + var ok bool + + adminID, ok = options["adminid"] + if !ok { + if fsID != "" { + if adminID, err = Fc.GetProvisionerSubjectID(fsID); err != nil { + klog.Errorf("failed getting subject (%s)", err) + return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID) + } + } else { + adminID = rbdDefaultAdminID + } + } + + userID, ok = options["userid"] + if !ok { + if fsID != "" { + if userID, err = Fc.GetPublishSubjectID(fsID); err != nil { + klog.Errorf("failed getting subject (%s)", err) + return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID) + } + } else { + userID = rbdDefaultUserID + } + } + + return +} + func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) { var ok bool + var err error + rbdVol := &rbdVolume{} rbdVol.Pool, ok = volOptions["pool"] if !ok { return nil, errors.New("missing required parameter pool") } - rbdVol.Monitors, ok = volOptions["monitors"] - if !ok { - // if mons are not set in options, check if they are set in secret - if rbdVol.MonValueFromSecret, ok = volOptions["monValueFromSecret"]; !ok { - return nil, errors.New("either monitors or monValueFromSecret must be set") - } + + rbdVol.Monitors, rbdVol.FsID, rbdVol.MonValueFromSecret, err = getMonsAndFsID(volOptions) + if err != nil { + return nil, err } + rbdVol.ImageFormat, ok = volOptions["imageFormat"] if !ok { rbdVol.ImageFormat = rbdImageFormat2 } + if rbdVol.ImageFormat == rbdImageFormat2 { // if no image features is provided, it results in empty string // which disable all RBD image format 2 features as we expected @@ -264,48 +332,50 @@ func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) klog.V(3).Infof("setting disableInUseChecks on rbd volume to: %v", disableInUseChecks) rbdVol.DisableInUseChecks = disableInUseChecks - getCredsFromVol(rbdVol, volOptions) + err = getCredsFromVol(rbdVol, volOptions) + if err != nil { + return nil, err + } + return rbdVol, nil } -func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) { +func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) error { var ok bool - rbdVol.AdminID, ok = volOptions["adminid"] - if !ok { - rbdVol.AdminID = rbdDefaultAdminID - } - rbdVol.UserID, ok = volOptions["userid"] - if !ok { - rbdVol.UserID = rbdDefaultUserID + var err error + + rbdVol.AdminID, rbdVol.UserID, err = getIDs(volOptions, rbdVol.FsID) + if err != nil { + return err } + rbdVol.Mounter, ok = volOptions["mounter"] if !ok { rbdVol.Mounter = rbdDefaultMounter } + + return nil } + func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) { var ok bool + var err error + rbdSnap := &rbdSnapshot{} rbdSnap.Pool, ok = snapOptions["pool"] if !ok { return nil, errors.New("missing required parameter pool") } - rbdSnap.Monitors, ok = snapOptions["monitors"] - if !ok { - // if mons are not set in options, check if they are set in secret - if rbdSnap.MonValueFromSecret, ok = snapOptions["monValueFromSecret"]; !ok { - return nil, errors.New("either monitors or monValueFromSecret must be set") - } - } - rbdSnap.AdminID, ok = snapOptions["adminid"] - if !ok { - rbdSnap.AdminID = rbdDefaultAdminID - } - rbdSnap.UserID, ok = snapOptions["userid"] - if !ok { - rbdSnap.UserID = rbdDefaultUserID + + rbdSnap.Monitors, rbdSnap.FsID, rbdSnap.MonValueFromSecret, err = getMonsAndFsID(snapOptions) + if err != nil { + return nil, err } + rbdSnap.AdminID, rbdSnap.UserID, err = getIDs(snapOptions, rbdSnap.FsID) + if err != nil { + return nil, err + } return rbdSnap, nil } @@ -367,7 +437,7 @@ func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string] image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminID, credentials) + key, err := getRBDKey(pOpts.FsID, adminID, credentials) if err != nil { return err } @@ -430,7 +500,7 @@ func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminID, credentials) + key, err := getRBDKey(pOpts.FsID, adminID, credentials) if err != nil { return err } @@ -457,7 +527,7 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[strin image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminID, credentials) + key, err := getRBDKey(pOpts.FsID, adminID, credentials) if err != nil { return err } @@ -484,7 +554,7 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(adminID, credentials) + key, err := getRBDKey(pOpts.FsID, adminID, credentials) if err != nil { return err } @@ -511,7 +581,7 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string image := pVolOpts.VolName snapID := pSnapOpts.SnapID - key, err := getRBDKey(adminID, credentials) + key, err := getRBDKey(pVolOpts.FsID, adminID, credentials) if err != nil { return err } diff --git a/pkg/util/fileconfig.go b/pkg/util/fileconfig.go new file mode 100644 index 000000000..fb58dcbb8 --- /dev/null +++ b/pkg/util/fileconfig.go @@ -0,0 +1,257 @@ +/* +Copyright 2019 ceph-csi authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strings" +) + +/* FileConfig processes config information stored in files, mostly mapped into + the runtime container. + + The calls explicitly do not cache any information, to ensure that updated + configuration is always read from the files (for example when these are + mapped in as k8s config maps or secrets). + + The BasePath is the path where config files are found, and config files are + expected to be named in the following manner, + - BasePath/ceph-cluster-/cluster-config + - BasePath/ceph-cluster--provisioner-secret/credentials + - BasePath/ceph-cluster--provisioner-secret/subjectid + - BasePath/ceph-cluster--publish-secret/credentials + - BasePath/ceph-cluster--publish-secret/subjectid + Where, + - cluster-fsid is the Ceph cluster fsid in UUID ascii notation + - The cluster-fsid corresponds to the cluster for which the + configuration information is present in the mentioned files + - cluster-config is expected to be a JSON blob with the following + structure, + { + "version": 1, + "cluster-config": { + "cluster-fsid": "", + "monitors": [ + "IP/DNS:port", + "IP/DNS:port" + ], + "pools": [ + "", + "" + ] + } + } + - credentials is expected to contain Base64 encoded credentials for the + user encoded in subjectid + - subjectid is the username/subject to use with calls to Ceph, and is + also Base64 encoded + - Provisioner secret contains secrets to use by the provisioning system + - Publish secret contains secrets to use by the publishing/staging + system +*/ + +// FileConfig type with basepath that points to source of all config files +type FileConfig struct { + BasePath string +} + +// ClusterConfigv1 strongly typed JSON spec for cluster-config above +type ClusterConfigv1 struct { + ClusterFsID string `json:"cluster-fsid"` + Monitors []string `json:"monitors"` + Pools []string `json:"pools"` +} + +// ClusterConfigJSONv1 strongly typed JSON spec for cluster-config above +type ClusterConfigJSONv1 struct { + Version int `json:"version"` + ClusterConf *ClusterConfigv1 `json:"cluster-config"` +} + +// Constants and enum for constructPath operation +type pathType int + +const ( + clusterConfig pathType = 0 + pubSubject pathType = 1 + pubCreds pathType = 2 + provSubject pathType = 3 + provCreds pathType = 4 +) + +const ( + fNamePrefix = "ceph-cluster" + fNameSep = "-" + fNamePubPrefix = "publish-secret" + fNameProvPrefix = "provisioner-secret" + fNameCephConfig = "cluster-config" + fNamePubSubject = "subjectid" + fNameProvSubject = "subjectid" + fNamePubCred = "credentials" + fNameProvCred = "credentials" +) + +// constructPath constructs well defined paths based on the type of config +// file that needs to be accessed. +func (pType pathType) constructPath(basepath string, fsid string) (filePath string, noerr error) { + if fsid == "" || basepath == "" { + return "", fmt.Errorf("missing/empty fsid (%s) or basepath (%s) for config files", fsid, basepath) + } + + switch pType { + case clusterConfig: + filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + + "/" + fNameCephConfig + case pubSubject: + filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + + fNameSep + fNamePubPrefix + "/" + fNamePubSubject + case pubCreds: + filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + + fNameSep + fNamePubPrefix + "/" + fNamePubCred + case provSubject: + filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + + fNameSep + fNameProvPrefix + "/" + fNameProvSubject + case provCreds: + filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + + fNameSep + fNameProvPrefix + "/" + fNameProvCred + default: + return "", fmt.Errorf("invalid path type (%d) specified", pType) + } + + return +} + +// GetMons returns a comma separated MON list, that is read in from the config +// files, based on the passed in fsid +func (fc *FileConfig) GetMons(fsid string) (string, error) { + fPath, err := clusterConfig.constructPath(fc.BasePath, fsid) + if err != nil { + return "", err + } + + // #nosec + contentRaw, err := ioutil.ReadFile(fPath) + if err != nil { + return "", err + } + + var cephConfig ClusterConfigJSONv1 + + err = json.Unmarshal(contentRaw, &cephConfig) + if err != nil { + return "", err + } + + if cephConfig.ClusterConf.ClusterFsID != fsid { + return "", fmt.Errorf("mismatching Ceph cluster fsid (%s) in file, passed in (%s)", cephConfig.ClusterConf.ClusterFsID, fsid) + } + + if len(cephConfig.ClusterConf.Monitors) == 0 { + return "", fmt.Errorf("monitor list empty in configuration file") + } + + return strings.Join(cephConfig.ClusterConf.Monitors, ","), nil +} + +// GetProvisionerSubjectID returns the provisioner subject ID from the on-disk +// configuration file, based on the passed in fsid +func (fc *FileConfig) GetProvisionerSubjectID(fsid string) (string, error) { + fPath, err := provSubject.constructPath(fc.BasePath, fsid) + if err != nil { + return "", err + } + + // #nosec + contentRaw, err := ioutil.ReadFile(fPath) + if err != nil { + return "", err + } + + if string(contentRaw) == "" { + return "", fmt.Errorf("missing/empty provisioner subject ID from file (%s)", fPath) + } + + return string(contentRaw), nil +} + +// GetPublishSubjectID returns the publish subject ID from the on-disk +// configuration file, based on the passed in fsid +func (fc *FileConfig) GetPublishSubjectID(fsid string) (string, error) { + fPath, err := pubSubject.constructPath(fc.BasePath, fsid) + if err != nil { + return "", err + } + + // #nosec + contentRaw, err := ioutil.ReadFile(fPath) + if err != nil { + return "", err + } + + if string(contentRaw) == "" { + return "", fmt.Errorf("missing/empty publish subject ID from file (%s)", fPath) + } + + return string(contentRaw), nil +} + +// GetCredentialForSubject returns the credentials for the requested subject +// from the cluster config for the passed in fsid +func (fc *FileConfig) GetCredentialForSubject(fsid, subject string) (string, error) { + var fPath string + var err error + + tmpSubject, err := fc.GetPublishSubjectID(fsid) + if err != nil { + return "", err + } + + if tmpSubject != subject { + tmpSubject, err = fc.GetProvisionerSubjectID(fsid) + if err != nil { + return "", err + } + + if tmpSubject != subject { + return "", fmt.Errorf("requested subject did not match stored publish/provisioner subjectID") + } + + fPath, err = provCreds.constructPath(fc.BasePath, fsid) + if err != nil { + return "", err + } + } else { + fPath, err = pubCreds.constructPath(fc.BasePath, fsid) + if err != nil { + return "", err + } + } + + // #nosec + contentRaw, err := ioutil.ReadFile(fPath) + if err != nil { + return "", err + } + + if string(contentRaw) == "" { + return "", fmt.Errorf("missing/empty credentials in file (%s)", fPath) + } + + return string(contentRaw), nil +} diff --git a/pkg/util/fileconfig_test.go b/pkg/util/fileconfig_test.go new file mode 100644 index 000000000..ae86e2de0 --- /dev/null +++ b/pkg/util/fileconfig_test.go @@ -0,0 +1,338 @@ +/* +Copyright 2019 ceph-csi authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// nolint: gocyclo + +package util + +import ( + "fmt" + "io/ioutil" + "os" + "testing" +) + +var testFsid = "dummy-fs-id" +var basePath = "./test_artifacts" + +// nolint: gocyclo +func TestGetMons(t *testing.T) { + var fc FileConfig + var err error + + configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + defer os.RemoveAll(basePath) + + fc.BasePath = basePath + + // TEST: Empty fsid should error out + _, err = fc.GetMons("") + if err == nil { + t.Errorf("Call passed, expected to fail due to fsid missing!") + } + + // TEST: Missing file should error out + _, err = fc.GetMons(testFsid) + if err == nil { + t.Errorf("Call passed, expected to fail due to missing config file!") + } + + // TEST: Empty file should error out + err = os.MkdirAll(configFileDir, 0700) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + data := []byte{} + err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + _, err = fc.GetMons(testFsid) + if err == nil { + t.Errorf("Call passed, expected to fail due to missing config file!") + } + + /* Tests with bad JSON content should get caught due to strongly typed JSON + struct in implementation and are not tested here */ + + // TEST: Send JSON with incorrect fsid + data = []byte(` + { + "version": 1, + "cluster-config": { + "cluster-fsid": "bad_fsid", + "monitors": ["IP1:port1","IP2:port2"], + "pools": ["pool1","pool2"] + } + }`) + err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + _, err = fc.GetMons(testFsid) + if err == nil { + t.Errorf("Expected to fail on bad fsid in JSON") + } + + // TEST: Send JSON with empty mon list + data = []byte(` + { + "version": 1, + "cluster-config": { + "cluster-fsid": "` + testFsid + `", + "monitors": [], + "pools": ["pool1","pool2"] + } + }`) + err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + _, err = fc.GetMons(testFsid) + if err == nil { + t.Errorf("Expected to fail in empty MON list in JSON") + } + + // TEST: Check valid return from successful call + data = []byte(` + { + "version": 1, + "cluster-config": { + "cluster-fsid": "` + testFsid + `", + "monitors": ["IP1:port1","IP2:port2"], + "pools": ["pool1","pool2"] + } + }`) + err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + output, err := fc.GetMons(testFsid) + if err != nil { + t.Errorf("Call failed %s", err) + } + if output != "IP1:port1,IP2:port2" { + t.Errorf("Failed to generate correct output: expected %s, got %s", + "IP1:port1,IP2:port2", output) + } +} + +func TestGetProvisionerSubjectID(t *testing.T) { + var fc FileConfig + var err error + + configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNameProvPrefix + defer os.RemoveAll(basePath) + + fc.BasePath = basePath + + // TEST: Empty fsid should error out + _, err = fc.GetProvisionerSubjectID("") + if err == nil { + t.Errorf("Call passed, expected to fail due to fsid missing!") + } + + // TEST: Missing file should error out + _, err = fc.GetProvisionerSubjectID(testFsid) + if err == nil { + t.Errorf("Call passed, expected to fail due to missing config file!") + } + + // TEST: Empty file should error out + err = os.MkdirAll(configFileDir, 0700) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + data := []byte{} + err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + _, err = fc.GetProvisionerSubjectID(testFsid) + if err == nil { + t.Errorf("Call passed, expected to fail due to missing config file!") + } + + // TEST: Check valid return from successful call + data = []byte("admin") + err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + output, err := fc.GetProvisionerSubjectID(testFsid) + if err != nil || output != "admin" { + t.Errorf("Failed to get valid subject ID: expected %s, got %s, err %s", "admin", output, err) + } +} + +func TestGetPublishSubjectID(t *testing.T) { + var fc FileConfig + var err error + + configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNamePubPrefix + defer os.RemoveAll(basePath) + + fc.BasePath = basePath + + // TEST: Empty fsid should error out + _, err = fc.GetPublishSubjectID("") + if err == nil { + t.Errorf("Call passed, expected to fail due to fsid missing!") + } + + // TEST: Missing file should error out + _, err = fc.GetPublishSubjectID(testFsid) + if err == nil { + t.Errorf("Call passed, expected to fail due to missing config file!") + } + + // TEST: Empty file should error out + err = os.MkdirAll(configFileDir, 0700) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + data := []byte{} + err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + _, err = fc.GetPublishSubjectID(testFsid) + if err == nil { + t.Errorf("Call passed, expected to fail due to missing config file!") + } + + // TEST: Check valid return from successful call + data = []byte("admin") + err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + output, err := fc.GetPublishSubjectID(testFsid) + if err != nil || output != "admin" { + t.Errorf("Failed to get valid subject ID: expected %s, got %s, err %s", "admin", output, err) + } +} + +// nolint: gocyclo +func TestGetCredentialForSubject(t *testing.T) { + var fc FileConfig + var err error + + configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNamePubPrefix + defer os.RemoveAll(basePath) + + fc.BasePath = basePath + + // TEST: Empty fsid should error out + _, err = fc.GetCredentialForSubject("", "subject") + if err == nil { + t.Errorf("Call passed, expected to fail due to fsid missing!") + } + + // TEST: Missing file should error out + _, err = fc.GetCredentialForSubject(testFsid, "") + if err == nil { + t.Errorf("Call passed, expected to fail due to missing config file!") + } + + // TEST: Empty subject file should error out + err = os.MkdirAll(configFileDir, 0700) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + data := []byte{} + err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + _, err = fc.GetCredentialForSubject(testFsid, "adminpub") + if err == nil { + t.Errorf("Call passed, expected to fail due to empty subject file!") + } + + // TEST: Empty subject cred file should error out + data = []byte("adminpub") + err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + data = []byte{} + err = ioutil.WriteFile(configFileDir+"/"+fNamePubCred, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + _, err = fc.GetCredentialForSubject(testFsid, "adminpub") + if err == nil { + t.Errorf("Call passed, expected to fail due to missing cred content!") + } + + // TEST: Success fetching pub creds + data = []byte("testpwd") + err = ioutil.WriteFile(configFileDir+"/"+fNamePubCred, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + output, err := fc.GetCredentialForSubject(testFsid, "adminpub") + if err != nil || output != "testpwd" { + t.Errorf("Failed to get valid Publish credentials: expected %s, got %s, err %s", "testpwd", output, err) + } + + // TEST: Fetch missing prov creds + configFileDir = basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNameProvPrefix + err = os.MkdirAll(configFileDir, 0700) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + data = []byte("adminprov") + err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + fmt.Printf("Starting test") + _, err = fc.GetCredentialForSubject(testFsid, "adminprov") + if err == nil { + t.Errorf("Call passed, expected to fail due to missing cred content!") + } + + // TEST: Fetch prov creds successfully + data = []byte("testpwd") + err = ioutil.WriteFile(configFileDir+"/"+fNameProvCred, data, 0644) + if err != nil { + t.Errorf("Test utility error %s", err) + } + + output, err = fc.GetCredentialForSubject(testFsid, "adminprov") + if err != nil || output != "testpwd" { + t.Errorf("Call passed, expected to fail due to missing cred content!") + } +} From 2064e674a4912d12d327c6e4818a1357ecfd95ce Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Thu, 7 Mar 2019 16:03:33 -0500 Subject: [PATCH 65/89] Addressed using k8s client APIs to fetch secrets Based on the review comments addressed the following, - Moved away from having to update the pod with volumes when a new Ceph cluster is added for provisioning via the CSI driver - The above now used k8s APIs to fetch secrets - TBD: Need to add a watch mechanisim such that these secrets can be cached and updated when changed - Folded the Cephc configuration and ID/key config map and secrets into a single secret - Provided the ability to read the same config via mapped or created files within the pod Tests: - Ran PV creation/deletion/attach/use using new scheme StorageClass - Ran PV creation/deletion/attach/use using older scheme to ensure nothing is broken - Did not execute snapshot related tests Signed-off-by: ShyamsundarR --- cmd/rbd/main.go | 2 +- .../rbd/kubernetes/csi-nodeplugin-rbac.yaml | 3 + .../kubernetes/csi-rbdplugin-provisioner.yaml | 1 + deploy/rbd/kubernetes/csi-rbdplugin.yaml | 1 + docs/deploy-rbd.md | 8 +- examples/README.md | 39 +- examples/rbd/storageclass.yaml | 9 + .../rbd/template-ceph-cluster-ID-config.yaml | 22 -- ...te-ceph-cluster-ID-provisioner-secret.yaml | 19 - ...mplate-ceph-cluster-ID-publish-secret.yaml | 19 - .../rbd/template-ceph-cluster-ID-secret.yaml | 37 ++ .../rbd/template-csi-rbdplugin-patch.yaml | 18 +- ...plate-csi-rbdplugin-provisioner-patch.yaml | 18 +- pkg/rbd/rbd.go | 12 +- pkg/rbd/rbd_util.go | 52 +-- pkg/util/configstore.go | 138 +++++++ pkg/util/configstore_test.go | 161 +++++++++ pkg/util/fileconfig.go | 259 ++------------ pkg/util/fileconfig_test.go | 338 ------------------ pkg/util/k8sconfig.go | 59 +++ 20 files changed, 506 insertions(+), 709 deletions(-) delete mode 100644 examples/rbd/template-ceph-cluster-ID-config.yaml delete mode 100644 examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml delete mode 100644 examples/rbd/template-ceph-cluster-ID-publish-secret.yaml create mode 100644 examples/rbd/template-ceph-cluster-ID-secret.yaml create mode 100644 pkg/util/configstore.go create mode 100644 pkg/util/configstore_test.go delete mode 100644 pkg/util/fileconfig_test.go create mode 100644 pkg/util/k8sconfig.go diff --git a/cmd/rbd/main.go b/cmd/rbd/main.go index cd78b6fc4..f5f727bc9 100644 --- a/cmd/rbd/main.go +++ b/cmd/rbd/main.go @@ -31,7 +31,7 @@ var ( nodeID = flag.String("nodeid", "", "node id") containerized = flag.Bool("containerized", true, "whether run as containerized") metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") - configRoot = flag.String("configroot", "/etc", "Directory under which Ceph CSI configuration files will be present") + configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets") ) func init() { diff --git a/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml b/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml index c960408e6..9a5ffed1b 100644 --- a/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml +++ b/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml @@ -10,6 +10,9 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-csi-nodeplugin rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "update"] diff --git a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml index aef25c04c..bd14fa363 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml @@ -67,6 +67,7 @@ spec: - "--drivername=rbd.csi.ceph.com" - "--containerized=true" - "--metadatastorage=k8s_configmap" + - "--configroot=k8s_objects" env: - name: HOST_ROOTFS value: "/rootfs" diff --git a/deploy/rbd/kubernetes/csi-rbdplugin.yaml b/deploy/rbd/kubernetes/csi-rbdplugin.yaml index 22fe06f3f..95bd27b7d 100644 --- a/deploy/rbd/kubernetes/csi-rbdplugin.yaml +++ b/deploy/rbd/kubernetes/csi-rbdplugin.yaml @@ -57,6 +57,7 @@ spec: - "--drivername=rbd.csi.ceph.com" - "--containerized=true" - "--metadatastorage=k8s_configmap" + - "--configroot=k8s_objects" env: - name: HOST_ROOTFS value: "/rootfs" diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index aef0ff32c..a0f9b2d34 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -33,6 +33,7 @@ Option | Default value | Description `--nodeid` | _empty_ | This node's ID `--containerized` | true | Whether running in containerized mode `--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) +`--configroot` | `/etc/csi-config` | Directory in which CSI specific Ceph cluster configurations are present, OR the value `k8s_objects` if present as kubernetes secrets" **Available environmental variables:** @@ -52,7 +53,7 @@ Parameter | Required | Description --------- | -------- | ----------- `monitors` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) `monValueFromSecret` | one of `monitors`, `clusterID` or and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. -`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Value of Ceph cluster fsid, into which RBD images shall be created (e.g. `4ae5ae3d-ebfb-4150-bfc8-798970f4e3d9`) +`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Value of `ceph fsid`, into which RBD images shall be created (e.g. `4ae5ae3d-ebfb-4150-bfc8-798970f4e3d9`) `pool` | yes | Ceph pool into which the RBD image shall be created `imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format) `imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature) @@ -60,6 +61,11 @@ Parameter | Required | Description `csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | for Kubernetes | namespaces of the above Secret objects `mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images +NOTE: If `clusterID` parameter is used, then an accompanying Ceph cluster +configuration secret or config files needs to be provided to the running pods. +Refer to `examples/README.md` section titled "Cluster ID based configuration" +for more information. + **Required secrets:** Admin credentials are required for provisioning new RBD images `ADMIN_NAME`: diff --git a/examples/README.md b/examples/README.md index f295a3cbf..aa7f710d9 100644 --- a/examples/README.md +++ b/examples/README.md @@ -14,7 +14,7 @@ Please consult the documentation for info about available parameters. **NOTE:** See section [Cluster ID based configuration](#cluster-id-based-configuration) if using -the `clusterID` instead of `monitors` or `monValueFromSecret` options in the +the `clusterID` instead of `monitors` or `monValueFromSecret` option in the storage class for RBD based provisioning before proceeding. After configuring the secrets, monitors, etc. you can deploy a @@ -222,30 +222,29 @@ I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes ## Cluster ID based configuration Before creating a storage class that uses the option `clusterID` to refer to a -Ceph cluster, +Ceph cluster, the following actions need to be completed. -**NOTE**: Substitute the output of `ceph fsid` instead of `` in - the mentioned template YAML files, and also the Ceph admin ID and - credentials in their respective options. Further, update options like - `monitors` and `pools` in the respective YAML files to contain the - appropriate information. +Get the following information from the Ceph cluster, -Create the following config maps and secrets +* Ceph Cluster fsid + * Output of `ceph fsid` + * Used to substitute `` references in the files below +* Admin ID and key, that has privileges to perform CRUD operations on the Ceph + cluster and pools of choice + * Key is typically the output of, `ceph auth get-key client.admin` where + `admin` is the Admin ID + * Used to substitute admin/user id and key values in the files below +* Ceph monitor list + * Typically in the output of `ceph mon dump` + * Used to prepare comma separated MON list where required in the files below -* `kubectl create -f ./rbd/template-ceph-cluster-ID-provisioner-secret.yaml` -* `kubectl create -f ./rbd/template-ceph-cluster-ID-publish-secret.yaml` -* `kubectl create -f ./rbd/template-ceph-cluster-ID-config.yaml` +Update the template `rbd/template-ceph-cluster-ID-secret.yaml` with values from +a Ceph cluster and create the following secret, -Modify the deployed CSI pods to additionally pass in the config maps and -secrets as volumes, +* `kubectl create -f rbd/template-ceph-cluster-ID-secret.yaml` -* `kubectl patch daemonset csi-rbdplugin --patch "$(cat ./rbd/template-csi-rbdplugin-patch.yaml)"` -* `kubectl patch statefulset csi-rbdplugin-provisioner --patch "$(cat ./rbd/template-csi-rbdplugin-provisioner-patch.yaml)"` - -Restart the provisioner and node plugin daemonset. - -Storage class and snapshot class, using the `` as the value for - the option `clusterID`, can now be created on the cluster. +Storage class and snapshot class, using `` as the value for the + option `clusterID`, can now be created on the cluster. Remaining steps to test functionality remains the same as mentioned in the sections above. diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index d77a39ba9..3fa497919 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -11,6 +11,9 @@ parameters: # OR, # Ceph cluster fsid, of the cluster to provision storage from # clusterID: + # If using clusterID based configuration, CSI pods need to be passed in a + # secret named ceph-cluster- that contains the cluster + # information. (as in the provided template-ceph-cluster-ID-secret.yaml) # OR, # if "monitors" parameter is not set, driver to get monitors from same # secret as admin/user credentials. "monValueFromSecret" provides the @@ -28,12 +31,18 @@ parameters: imageFeatures: layering # The secrets have to contain Ceph admin credentials. + # NOTE: If using "clusterID" instead of "monitors" above, the following + # secrets MAY be added to the ceph-cluster- secret and skipped + # here csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret csi.storage.k8s.io/provisioner-secret-namespace: default csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret csi.storage.k8s.io/node-publish-secret-namespace: default # Ceph users for operating RBD + # NOTE: If using "clusterID" instead of "monitors" above, the following + # IDs MAY be added to the ceph-cluster- secret and skipped + # here adminid: admin userid: kubernetes # uncomment the following to use rbd-nbd as mounter on supported nodes diff --git a/examples/rbd/template-ceph-cluster-ID-config.yaml b/examples/rbd/template-ceph-cluster-ID-config.yaml deleted file mode 100644 index c859f22ee..000000000 --- a/examples/rbd/template-ceph-cluster-ID-config.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: ceph-cluster- - namespace: default -data: - cluster-config: | - { - "version": 1, - "cluster-config": { - "cluster-fsid": "", - "monitors": [ - "", - "" - ], - "pools": [ - "", - "" - ] - } - } diff --git a/examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml b/examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml deleted file mode 100644 index 707307596..000000000 --- a/examples/rbd/template-ceph-cluster-ID-provisioner-secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - # The is used by the CSI plugin to uniquely identify and use a - # Ceph cluster, hence the value MUST match the output of the following - # command. - # - Output of: `ceph fsid` - name: ceph-cluster--provisioner-secret - namespace: default -data: - # Base64 encoded ID of the admin name - # - Typically output of: `echo -n "" | base64` - # Substitute the entire string including angle braces, with the base64 value - subjectid: - # Credentials of the above admin/user - # - Output of: `ceph auth get-key client.admin | base64` - # Substitute the entire string including angle braces, with the base64 value - credentials: diff --git a/examples/rbd/template-ceph-cluster-ID-publish-secret.yaml b/examples/rbd/template-ceph-cluster-ID-publish-secret.yaml deleted file mode 100644 index ca31c0917..000000000 --- a/examples/rbd/template-ceph-cluster-ID-publish-secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - # The is used by the CSI plugin to uniquely identify and use a - # Ceph cluster, hence the value MUST match the output of the following - # command. - # - Output of: `ceph fsid` - name: ceph-cluster--publish-secret - namespace: default -data: - # Base64 encoded ID of the admin name - # - Typically output of: `echo -n "" | base64` - # Substitute the entire string including angle braces, with the base64 value - subjectid: - # Credentials of the above admin/user - # - Output of: `ceph auth get-key client.admin | base64` - # Substitute the entire string including angle braces, with the base64 value - credentials: diff --git a/examples/rbd/template-ceph-cluster-ID-secret.yaml b/examples/rbd/template-ceph-cluster-ID-secret.yaml new file mode 100644 index 000000000..d4c70c0fb --- /dev/null +++ b/examples/rbd/template-ceph-cluster-ID-secret.yaml @@ -0,0 +1,37 @@ +--- +# This is a template secret that helps define a Ceph cluster configuration +# as required by the CSI driver. This is used when a StorageClass has the +# "clusterID" defined as one of the parameters, to provide the CSI instance +# Ceph cluster configuration information. +apiVersion: v1 +kind: Secret +metadata: + # The is used by the CSI plugin to uniquely identify and use a + # Ceph cluster, hence the value MUST match the output of the following + # command. + # - Output of: `ceph fsid` + name: ceph-cluster- + namespace: default +data: + # Base64 encoded and comma separated Ceph cluster monitor list + # - Typically output of: `echo -n "mon1:port,mon2:port,..." | base64` + monitors: + # Base64 encoded and comma separated list of pool names from which volumes + # can be provisioned + pools: + # Base64 encoded admin ID to use for provisioning + # - Typically output of: `echo -n "" | base64` + # Substitute the entire string including angle braces, with the base64 value + adminid: + # Base64 encoded key of the provisioner admin ID + # - Output of: `ceph auth get-key client.admin | base64` + # Substitute the entire string including angle braces, with the base64 value + adminkey: + # Base64 encoded user ID to use for publishing + # - Typically output of: `echo -n "" | base64` + # Substitute the entire string including angle braces, with the base64 value + userid: + # Base64 encoded key of the publisher user ID + # - Output of: `ceph auth get-key client.admin | base64` + # Substitute the entire string including angle braces, with the base64 value + userkey: diff --git a/examples/rbd/template-csi-rbdplugin-patch.yaml b/examples/rbd/template-csi-rbdplugin-patch.yaml index 016f8c711..4a507b2f2 100644 --- a/examples/rbd/template-csi-rbdplugin-patch.yaml +++ b/examples/rbd/template-csi-rbdplugin-patch.yaml @@ -12,22 +12,10 @@ spec: containers: - name: csi-rbdplugin volumeMounts: - - name: provisioner-secret- - mountPath: "/etc/ceph-cluster--provisioner-secret" - readOnly: true - - name: publish-secret- - mountPath: "/etc/ceph-cluster--publish-secret" - readOnly: true - name: ceph-cluster- - mountPath: "/etc/ceph-cluster-/" + mountPath: "/etc/csi-config/ceph-cluster-" readOnly: true volumes: - - name: provisioner-secret- - secret: - secretName: ceph-cluster--provisioner-secret - - name: publish-secret- - secret: - secretName: ceph-cluster--publish-secret - name: ceph-cluster- - configMap: - name: ceph-cluster- + secret: + secretName: ceph-cluster- diff --git a/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml b/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml index 083f14d2c..1d12e634b 100644 --- a/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml +++ b/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml @@ -12,22 +12,10 @@ spec: containers: - name: csi-rbdplugin volumeMounts: - - name: provisioner-secret- - mountPath: "/etc/ceph-cluster--provisioner-secret" - readOnly: true - - name: publish-secret- - mountPath: "/etc/ceph-cluster--publish-secret" - readOnly: true - name: ceph-cluster- - mountPath: "/etc/ceph-cluster-/" + mountPath: "/etc/csi-config/ceph-cluster-" readOnly: true volumes: - - name: provisioner-secret- - secret: - secretName: ceph-cluster--provisioner-secret - - name: publish-secret- - secret: - secretName: ceph-cluster--publish-secret - name: ceph-cluster- - configMap: - name: ceph-cluster- + secret: + secretName: ceph-cluster- diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index 62983fdb2..aa8dd3930 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -47,9 +47,8 @@ type Driver struct { var ( version = "1.0.0" - // Fc is the global file config type, and stores the top level directory - // under which rest of the Ceph config files can be found - Fc util.FileConfig + // ConfStore is the global config store + ConfStore *util.ConfigStore ) // NewDriver returns new rbd driver @@ -94,8 +93,11 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, co var err error klog.Infof("Driver: %v version: %v", driverName, version) - // Initialize fileconfig base path - Fc.BasePath = configroot + // Initialize config store + ConfStore, err = util.NewConfigStore(configroot) + if err != nil { + klog.Fatalln("Failed to initialize config store.") + } // Initialize default library driver r.cd = csicommon.NewCSIDriver(driverName, version, nodeID) diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 5ff7c5104..f19535e03 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -91,9 +91,10 @@ func getRBDKey(fsid string, id string, credentials map[string]string) (string, e var ok bool var err error var key string + if key, ok = credentials[id]; !ok { if fsid != "" { - key, err = Fc.GetCredentialForSubject(fsid, id) + key, err = ConfStore.CredentialForUser(fsid, id) if err != nil { klog.Errorf("failed getting credentials (%s)", err) return "", fmt.Errorf("RBD key for ID: %s not found in config store", id) @@ -240,8 +241,7 @@ func execCommand(command string, args []string) ([]byte, error) { return cmd.CombinedOutput() } -func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret string, noerr error) { - var err error +func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret string, err error) { var ok bool monitors, ok = options["monitors"] @@ -250,11 +250,14 @@ func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret stri if monInSecret, ok = options["monValueFromSecret"]; !ok { // if mons are not in secret, check if we have a cluster-fsid if fsID, ok = options["clusterID"]; !ok { - return "", "", "", fmt.Errorf("either monitors or monValueFromSecret or clusterID must be set") + err = errors.New("either monitors or monValueFromSecret or clusterID must be set") + return } - if monitors, err = Fc.GetMons(fsID); err != nil { + + if monitors, err = ConfStore.Mons(fsID); err != nil { klog.Errorf("failed getting mons (%s)", err) - return "", "", "", fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID) + err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID) + return } } } @@ -262,35 +265,34 @@ func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret stri return } -func getIDs(options map[string]string, fsID string) (adminID, userID string, noerr error) { - var err error +func getIDs(options map[string]string, fsID string) (adminID, userID string, err error) { var ok bool adminID, ok = options["adminid"] - if !ok { - if fsID != "" { - if adminID, err = Fc.GetProvisionerSubjectID(fsID); err != nil { - klog.Errorf("failed getting subject (%s)", err) - return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID) - } - } else { - adminID = rbdDefaultAdminID + switch { + case ok: + case fsID != "": + if adminID, err = ConfStore.AdminID(fsID); err != nil { + klog.Errorf("failed getting subject (%s)", err) + return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID) } + default: + adminID = rbdDefaultAdminID } userID, ok = options["userid"] - if !ok { - if fsID != "" { - if userID, err = Fc.GetPublishSubjectID(fsID); err != nil { - klog.Errorf("failed getting subject (%s)", err) - return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID) - } - } else { - userID = rbdDefaultUserID + switch { + case ok: + case fsID != "": + if userID, err = ConfStore.UserID(fsID); err != nil { + klog.Errorf("failed getting subject (%s)", err) + return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID) } + default: + userID = rbdDefaultUserID } - return + return adminID, userID, err } func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) { diff --git a/pkg/util/configstore.go b/pkg/util/configstore.go new file mode 100644 index 000000000..5064699ef --- /dev/null +++ b/pkg/util/configstore.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "k8s.io/klog" + "path" + "strings" +) + +// StoreReader interface enables plugging different stores, that contain the +// keys and data. (e.g k8s secrets or local files) +type StoreReader interface { + DataForKey(fsid string, key string) (string, error) +} + +/* ConfigKeys contents and format, +- csMonitors: MON list, comma separated +- csAdminID: adminID, used for provisioning +- csUserID: userID, used for publishing +- csAdminKey: key, for userID in csProvisionerUser +- csUserKey: key, for userID in csPublisherUser +- csPools: Pool list, comma separated +*/ + +// Constants for various ConfigKeys +const ( + csMonitors = "monitors" + csAdminID = "adminid" + csUserID = "userid" + csAdminKey = "adminkey" + csUserKey = "userkey" + csPools = "pools" +) + +// ConfigStore provides various gettors for ConfigKeys +type ConfigStore struct { + StoreReader +} + +// dataForKey returns data from the config store for the provided key +func (dc *ConfigStore) dataForKey(fsid string, key string) (string, error) { + if dc.StoreReader != nil { + return dc.StoreReader.DataForKey(fsid, key) + } + + err := errors.New("config store location uninitialized") + return "", err +} + +// Mons returns a comma separated MON list from the cluster config represented by fsid +func (dc *ConfigStore) Mons(fsid string) (string, error) { + return dc.dataForKey(fsid, csMonitors) +} + +// Pools returns a list of pool names from the cluster config represented by fsid +func (dc *ConfigStore) Pools(fsid string) ([]string, error) { + content, err := dc.dataForKey(fsid, csPools) + if err != nil { + return nil, err + } + + return strings.Split(content, ","), nil +} + +// AdminID returns the admin ID from the cluster config represented by fsid +func (dc *ConfigStore) AdminID(fsid string) (string, error) { + return dc.dataForKey(fsid, csAdminID) +} + +// UserID returns the user ID from the cluster config represented by fsid +func (dc *ConfigStore) UserID(fsid string) (string, error) { + return dc.dataForKey(fsid, csUserID) +} + +// CredentialForUser returns the credentials for the requested user ID +// from the cluster config represented by fsid +func (dc *ConfigStore) CredentialForUser(fsid, userID string) (data string, err error) { + var credkey string + user, err := dc.AdminID(fsid) + if err != nil { + return + } + + if user == userID { + credkey = csAdminKey + } else { + user, err = dc.UserID(fsid) + if err != nil { + return + } + + if user != userID { + err = fmt.Errorf("requested user (%s) not found in cluster configuration of (%s)", userID, fsid) + return + } + + credkey = csUserKey + } + + return dc.dataForKey(fsid, credkey) +} + +// NewConfigStore returns a config store based on value of configRoot. If +// configRoot is not "k8s_objects" then it is assumed to be a path to a +// directory, under which the configuration files can be found +func NewConfigStore(configRoot string) (*ConfigStore, error) { + if configRoot != "k8s_objects" { + klog.Infof("cache-store: using files in path (%s) as config store", configRoot) + fc := &FileConfig{} + fc.BasePath = path.Clean(configRoot) + dc := &ConfigStore{fc} + return dc, nil + } + + klog.Infof("cache-store: using k8s objects as config store") + kc := &K8sConfig{} + kc.Client = NewK8sClient() + kc.Namespace = GetK8sNamespace() + dc := &ConfigStore{kc} + return dc, nil +} diff --git a/pkg/util/configstore_test.go b/pkg/util/configstore_test.go new file mode 100644 index 000000000..d85cefffa --- /dev/null +++ b/pkg/util/configstore_test.go @@ -0,0 +1,161 @@ +/* +Copyright 2019 ceph-csi authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// nolint: gocyclo + +package util + +import ( + "io/ioutil" + "os" + "strings" + "testing" +) + +var basePath = "./test_artifacts" +var cs *ConfigStore + +func cleanupTestData() { + os.RemoveAll(basePath) +} + +// nolint: gocyclo +func TestConfigStore(t *testing.T) { + var err error + var data string + var content string + var testDir string + + defer cleanupTestData() + + cs, err = NewConfigStore(basePath) + if err != nil { + t.Errorf("Fatal, failed to get a new config store") + } + + err = os.MkdirAll(basePath, 0700) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Should fail as fsid directory is missing + _, err = cs.Mons("testfsid") + if err == nil { + t.Errorf("Failed: expected error due to missing parent directory") + } + + testDir = basePath + "/" + "ceph-cluster-testfsid" + err = os.MkdirAll(testDir, 0700) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Should fail as mons file is missing + _, err = cs.Mons("testfsid") + if err == nil { + t.Errorf("Failed: expected error due to missing mons file") + } + + data = "" + err = ioutil.WriteFile(testDir+"/"+csMonitors, []byte(data), 0644) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Should fail as MONs is an empty string + content, err = cs.Mons("testfsid") + if err == nil { + t.Errorf("Failed: want (%s), got (%s)", data, content) + } + + data = "mon1,mon2,mon3" + err = ioutil.WriteFile(testDir+"/"+csMonitors, []byte(data), 0644) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Fetching MONs should succeed + content, err = cs.Mons("testfsid") + if err != nil || content != data { + t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) + } + + data = "pool1,pool2" + err = ioutil.WriteFile(testDir+"/"+csPools, []byte(data), 0644) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Fetching MONs should succeed + listContent, err := cs.Pools("testfsid") + if err != nil || strings.Join(listContent, ",") != data { + t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) + } + + data = "provuser" + err = ioutil.WriteFile(testDir+"/"+csAdminID, []byte(data), 0644) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Fetching provuser should succeed + content, err = cs.AdminID("testfsid") + if err != nil || content != data { + t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) + } + + data = "pubuser" + err = ioutil.WriteFile(testDir+"/"+csUserID, []byte(data), 0644) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Fetching pubuser should succeed + content, err = cs.UserID("testfsid") + if err != nil || content != data { + t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) + } + + data = "provkey" + err = ioutil.WriteFile(testDir+"/"+csAdminKey, []byte(data), 0644) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Fetching provkey should succeed + content, err = cs.CredentialForUser("testfsid", "provuser") + if err != nil || content != data { + t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) + } + + data = "pubkey" + err = ioutil.WriteFile(testDir+"/"+csUserKey, []byte(data), 0644) + if err != nil { + t.Errorf("Test setup error %s", err) + } + + // TEST: Fetching pubkey should succeed + content, err = cs.CredentialForUser("testfsid", "pubuser") + if err != nil || content != data { + t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) + } + + // TEST: Fetching random user key should fail + _, err = cs.CredentialForUser("testfsid", "random") + if err == nil { + t.Errorf("Failed: Expected to fail fetching random user key") + } +} diff --git a/pkg/util/fileconfig.go b/pkg/util/fileconfig.go index fb58dcbb8..3bac9a3d0 100644 --- a/pkg/util/fileconfig.go +++ b/pkg/util/fileconfig.go @@ -17,241 +17,42 @@ limitations under the License. package util import ( - "encoding/json" - "fmt" - "io/ioutil" - "strings" + "fmt" + "io/ioutil" + "k8s.io/klog" + "path" ) -/* FileConfig processes config information stored in files, mostly mapped into - the runtime container. +/* +FileConfig is a ConfigStore interface implementation that reads configuration +information from files. - The calls explicitly do not cache any information, to ensure that updated - configuration is always read from the files (for example when these are - mapped in as k8s config maps or secrets). +BasePath defines the directory under which FileConfig will attempt to open and +read contents of various Ceph cluster configurations. - The BasePath is the path where config files are found, and config files are - expected to be named in the following manner, - - BasePath/ceph-cluster-/cluster-config - - BasePath/ceph-cluster--provisioner-secret/credentials - - BasePath/ceph-cluster--provisioner-secret/subjectid - - BasePath/ceph-cluster--publish-secret/credentials - - BasePath/ceph-cluster--publish-secret/subjectid - Where, - - cluster-fsid is the Ceph cluster fsid in UUID ascii notation - - The cluster-fsid corresponds to the cluster for which the - configuration information is present in the mentioned files - - cluster-config is expected to be a JSON blob with the following - structure, - { - "version": 1, - "cluster-config": { - "cluster-fsid": "", - "monitors": [ - "IP/DNS:port", - "IP/DNS:port" - ], - "pools": [ - "", - "" - ] - } - } - - credentials is expected to contain Base64 encoded credentials for the - user encoded in subjectid - - subjectid is the username/subject to use with calls to Ceph, and is - also Base64 encoded - - Provisioner secret contains secrets to use by the provisioning system - - Publish secret contains secrets to use by the publishing/staging - system +Each Ceph cluster configuration is stored under a directory named, +BasePath/ceph-cluster-, where is the Ceph cluster fsid. + +Under each Ceph cluster configuration directory, individual files named as per +the ConfigKeys constants in the ConfigStore interface, store the required +configuration information. */ - -// FileConfig type with basepath that points to source of all config files type FileConfig struct { - BasePath string + BasePath string } -// ClusterConfigv1 strongly typed JSON spec for cluster-config above -type ClusterConfigv1 struct { - ClusterFsID string `json:"cluster-fsid"` - Monitors []string `json:"monitors"` - Pools []string `json:"pools"` -} - -// ClusterConfigJSONv1 strongly typed JSON spec for cluster-config above -type ClusterConfigJSONv1 struct { - Version int `json:"version"` - ClusterConf *ClusterConfigv1 `json:"cluster-config"` -} - -// Constants and enum for constructPath operation -type pathType int - -const ( - clusterConfig pathType = 0 - pubSubject pathType = 1 - pubCreds pathType = 2 - provSubject pathType = 3 - provCreds pathType = 4 -) - -const ( - fNamePrefix = "ceph-cluster" - fNameSep = "-" - fNamePubPrefix = "publish-secret" - fNameProvPrefix = "provisioner-secret" - fNameCephConfig = "cluster-config" - fNamePubSubject = "subjectid" - fNameProvSubject = "subjectid" - fNamePubCred = "credentials" - fNameProvCred = "credentials" -) - -// constructPath constructs well defined paths based on the type of config -// file that needs to be accessed. -func (pType pathType) constructPath(basepath string, fsid string) (filePath string, noerr error) { - if fsid == "" || basepath == "" { - return "", fmt.Errorf("missing/empty fsid (%s) or basepath (%s) for config files", fsid, basepath) - } - - switch pType { - case clusterConfig: - filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + - "/" + fNameCephConfig - case pubSubject: - filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + - fNameSep + fNamePubPrefix + "/" + fNamePubSubject - case pubCreds: - filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + - fNameSep + fNamePubPrefix + "/" + fNamePubCred - case provSubject: - filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + - fNameSep + fNameProvPrefix + "/" + fNameProvSubject - case provCreds: - filePath = basepath + "/" + fNamePrefix + fNameSep + fsid + - fNameSep + fNameProvPrefix + "/" + fNameProvCred - default: - return "", fmt.Errorf("invalid path type (%d) specified", pType) - } - - return -} - -// GetMons returns a comma separated MON list, that is read in from the config -// files, based on the passed in fsid -func (fc *FileConfig) GetMons(fsid string) (string, error) { - fPath, err := clusterConfig.constructPath(fc.BasePath, fsid) - if err != nil { - return "", err - } - - // #nosec - contentRaw, err := ioutil.ReadFile(fPath) - if err != nil { - return "", err - } - - var cephConfig ClusterConfigJSONv1 - - err = json.Unmarshal(contentRaw, &cephConfig) - if err != nil { - return "", err - } - - if cephConfig.ClusterConf.ClusterFsID != fsid { - return "", fmt.Errorf("mismatching Ceph cluster fsid (%s) in file, passed in (%s)", cephConfig.ClusterConf.ClusterFsID, fsid) - } - - if len(cephConfig.ClusterConf.Monitors) == 0 { - return "", fmt.Errorf("monitor list empty in configuration file") - } - - return strings.Join(cephConfig.ClusterConf.Monitors, ","), nil -} - -// GetProvisionerSubjectID returns the provisioner subject ID from the on-disk -// configuration file, based on the passed in fsid -func (fc *FileConfig) GetProvisionerSubjectID(fsid string) (string, error) { - fPath, err := provSubject.constructPath(fc.BasePath, fsid) - if err != nil { - return "", err - } - - // #nosec - contentRaw, err := ioutil.ReadFile(fPath) - if err != nil { - return "", err - } - - if string(contentRaw) == "" { - return "", fmt.Errorf("missing/empty provisioner subject ID from file (%s)", fPath) - } - - return string(contentRaw), nil -} - -// GetPublishSubjectID returns the publish subject ID from the on-disk -// configuration file, based on the passed in fsid -func (fc *FileConfig) GetPublishSubjectID(fsid string) (string, error) { - fPath, err := pubSubject.constructPath(fc.BasePath, fsid) - if err != nil { - return "", err - } - - // #nosec - contentRaw, err := ioutil.ReadFile(fPath) - if err != nil { - return "", err - } - - if string(contentRaw) == "" { - return "", fmt.Errorf("missing/empty publish subject ID from file (%s)", fPath) - } - - return string(contentRaw), nil -} - -// GetCredentialForSubject returns the credentials for the requested subject -// from the cluster config for the passed in fsid -func (fc *FileConfig) GetCredentialForSubject(fsid, subject string) (string, error) { - var fPath string - var err error - - tmpSubject, err := fc.GetPublishSubjectID(fsid) - if err != nil { - return "", err - } - - if tmpSubject != subject { - tmpSubject, err = fc.GetProvisionerSubjectID(fsid) - if err != nil { - return "", err - } - - if tmpSubject != subject { - return "", fmt.Errorf("requested subject did not match stored publish/provisioner subjectID") - } - - fPath, err = provCreds.constructPath(fc.BasePath, fsid) - if err != nil { - return "", err - } - } else { - fPath, err = pubCreds.constructPath(fc.BasePath, fsid) - if err != nil { - return "", err - } - } - - // #nosec - contentRaw, err := ioutil.ReadFile(fPath) - if err != nil { - return "", err - } - - if string(contentRaw) == "" { - return "", fmt.Errorf("missing/empty credentials in file (%s)", fPath) - } - - return string(contentRaw), nil +// DataForKey reads the appropriate config file, named using key, and returns +// the contents of the file to the caller +func (fc *FileConfig) DataForKey(fsid string, key string) (data string, err error) { + pathToKey := path.Join(fc.BasePath, "ceph-cluster-"+fsid, key) + // #nosec + content, err := ioutil.ReadFile(pathToKey) + if err != nil || string(content) == "" { + err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", fsid, err) + return + } + + data = string(content) + klog.V(3).Infof("returning data (%s) for key (%s) against cluster (%s)", data, key, fsid) + return } diff --git a/pkg/util/fileconfig_test.go b/pkg/util/fileconfig_test.go deleted file mode 100644 index ae86e2de0..000000000 --- a/pkg/util/fileconfig_test.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2019 ceph-csi authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// nolint: gocyclo - -package util - -import ( - "fmt" - "io/ioutil" - "os" - "testing" -) - -var testFsid = "dummy-fs-id" -var basePath = "./test_artifacts" - -// nolint: gocyclo -func TestGetMons(t *testing.T) { - var fc FileConfig - var err error - - configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid - defer os.RemoveAll(basePath) - - fc.BasePath = basePath - - // TEST: Empty fsid should error out - _, err = fc.GetMons("") - if err == nil { - t.Errorf("Call passed, expected to fail due to fsid missing!") - } - - // TEST: Missing file should error out - _, err = fc.GetMons(testFsid) - if err == nil { - t.Errorf("Call passed, expected to fail due to missing config file!") - } - - // TEST: Empty file should error out - err = os.MkdirAll(configFileDir, 0700) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - data := []byte{} - err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - _, err = fc.GetMons(testFsid) - if err == nil { - t.Errorf("Call passed, expected to fail due to missing config file!") - } - - /* Tests with bad JSON content should get caught due to strongly typed JSON - struct in implementation and are not tested here */ - - // TEST: Send JSON with incorrect fsid - data = []byte(` - { - "version": 1, - "cluster-config": { - "cluster-fsid": "bad_fsid", - "monitors": ["IP1:port1","IP2:port2"], - "pools": ["pool1","pool2"] - } - }`) - err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - _, err = fc.GetMons(testFsid) - if err == nil { - t.Errorf("Expected to fail on bad fsid in JSON") - } - - // TEST: Send JSON with empty mon list - data = []byte(` - { - "version": 1, - "cluster-config": { - "cluster-fsid": "` + testFsid + `", - "monitors": [], - "pools": ["pool1","pool2"] - } - }`) - err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - _, err = fc.GetMons(testFsid) - if err == nil { - t.Errorf("Expected to fail in empty MON list in JSON") - } - - // TEST: Check valid return from successful call - data = []byte(` - { - "version": 1, - "cluster-config": { - "cluster-fsid": "` + testFsid + `", - "monitors": ["IP1:port1","IP2:port2"], - "pools": ["pool1","pool2"] - } - }`) - err = ioutil.WriteFile(configFileDir+"/"+fNameCephConfig, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - output, err := fc.GetMons(testFsid) - if err != nil { - t.Errorf("Call failed %s", err) - } - if output != "IP1:port1,IP2:port2" { - t.Errorf("Failed to generate correct output: expected %s, got %s", - "IP1:port1,IP2:port2", output) - } -} - -func TestGetProvisionerSubjectID(t *testing.T) { - var fc FileConfig - var err error - - configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNameProvPrefix - defer os.RemoveAll(basePath) - - fc.BasePath = basePath - - // TEST: Empty fsid should error out - _, err = fc.GetProvisionerSubjectID("") - if err == nil { - t.Errorf("Call passed, expected to fail due to fsid missing!") - } - - // TEST: Missing file should error out - _, err = fc.GetProvisionerSubjectID(testFsid) - if err == nil { - t.Errorf("Call passed, expected to fail due to missing config file!") - } - - // TEST: Empty file should error out - err = os.MkdirAll(configFileDir, 0700) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - data := []byte{} - err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - _, err = fc.GetProvisionerSubjectID(testFsid) - if err == nil { - t.Errorf("Call passed, expected to fail due to missing config file!") - } - - // TEST: Check valid return from successful call - data = []byte("admin") - err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - output, err := fc.GetProvisionerSubjectID(testFsid) - if err != nil || output != "admin" { - t.Errorf("Failed to get valid subject ID: expected %s, got %s, err %s", "admin", output, err) - } -} - -func TestGetPublishSubjectID(t *testing.T) { - var fc FileConfig - var err error - - configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNamePubPrefix - defer os.RemoveAll(basePath) - - fc.BasePath = basePath - - // TEST: Empty fsid should error out - _, err = fc.GetPublishSubjectID("") - if err == nil { - t.Errorf("Call passed, expected to fail due to fsid missing!") - } - - // TEST: Missing file should error out - _, err = fc.GetPublishSubjectID(testFsid) - if err == nil { - t.Errorf("Call passed, expected to fail due to missing config file!") - } - - // TEST: Empty file should error out - err = os.MkdirAll(configFileDir, 0700) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - data := []byte{} - err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - _, err = fc.GetPublishSubjectID(testFsid) - if err == nil { - t.Errorf("Call passed, expected to fail due to missing config file!") - } - - // TEST: Check valid return from successful call - data = []byte("admin") - err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - output, err := fc.GetPublishSubjectID(testFsid) - if err != nil || output != "admin" { - t.Errorf("Failed to get valid subject ID: expected %s, got %s, err %s", "admin", output, err) - } -} - -// nolint: gocyclo -func TestGetCredentialForSubject(t *testing.T) { - var fc FileConfig - var err error - - configFileDir := basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNamePubPrefix - defer os.RemoveAll(basePath) - - fc.BasePath = basePath - - // TEST: Empty fsid should error out - _, err = fc.GetCredentialForSubject("", "subject") - if err == nil { - t.Errorf("Call passed, expected to fail due to fsid missing!") - } - - // TEST: Missing file should error out - _, err = fc.GetCredentialForSubject(testFsid, "") - if err == nil { - t.Errorf("Call passed, expected to fail due to missing config file!") - } - - // TEST: Empty subject file should error out - err = os.MkdirAll(configFileDir, 0700) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - data := []byte{} - err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - _, err = fc.GetCredentialForSubject(testFsid, "adminpub") - if err == nil { - t.Errorf("Call passed, expected to fail due to empty subject file!") - } - - // TEST: Empty subject cred file should error out - data = []byte("adminpub") - err = ioutil.WriteFile(configFileDir+"/"+fNamePubSubject, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - data = []byte{} - err = ioutil.WriteFile(configFileDir+"/"+fNamePubCred, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - _, err = fc.GetCredentialForSubject(testFsid, "adminpub") - if err == nil { - t.Errorf("Call passed, expected to fail due to missing cred content!") - } - - // TEST: Success fetching pub creds - data = []byte("testpwd") - err = ioutil.WriteFile(configFileDir+"/"+fNamePubCred, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - output, err := fc.GetCredentialForSubject(testFsid, "adminpub") - if err != nil || output != "testpwd" { - t.Errorf("Failed to get valid Publish credentials: expected %s, got %s, err %s", "testpwd", output, err) - } - - // TEST: Fetch missing prov creds - configFileDir = basePath + "/" + fNamePrefix + fNameSep + testFsid + fNameSep + fNameProvPrefix - err = os.MkdirAll(configFileDir, 0700) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - data = []byte("adminprov") - err = ioutil.WriteFile(configFileDir+"/"+fNameProvSubject, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - fmt.Printf("Starting test") - _, err = fc.GetCredentialForSubject(testFsid, "adminprov") - if err == nil { - t.Errorf("Call passed, expected to fail due to missing cred content!") - } - - // TEST: Fetch prov creds successfully - data = []byte("testpwd") - err = ioutil.WriteFile(configFileDir+"/"+fNameProvCred, data, 0644) - if err != nil { - t.Errorf("Test utility error %s", err) - } - - output, err = fc.GetCredentialForSubject(testFsid, "adminprov") - if err != nil || output != "testpwd" { - t.Errorf("Call passed, expected to fail due to missing cred content!") - } -} diff --git a/pkg/util/k8sconfig.go b/pkg/util/k8sconfig.go new file mode 100644 index 000000000..08e69dcca --- /dev/null +++ b/pkg/util/k8sconfig.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 ceph-csi authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8s "k8s.io/client-go/kubernetes" + "k8s.io/klog" +) + +/* +K8sConfig is a ConfigStore interface implementation that reads configuration +information from k8s secrets. + +Each Ceph cluster configuration secret is expected to be named, +ceph-cluster-, where is the Ceph cluster fsid. + +The secret is expected to contain keys, as defined by the ConfigKeys constants +in the ConfigStore interface. +*/ +type K8sConfig struct { + Client *k8s.Clientset + Namespace string +} + +// DataForKey reads the appropriate k8s secret, named using fsid, and returns +// the contents of key within the secret +func (kc *K8sConfig) DataForKey(fsid string, key string) (data string, err error) { + secret, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get("ceph-cluster-"+fsid, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", fsid, err) + return + } + + content, ok := secret.Data[key] + if !ok { + err = fmt.Errorf("missing data for key (%s) in cluster configuration of (%s)", key, fsid) + return + } + + data = string(content) + klog.V(3).Infof("returning data (%s) for key (%s) against cluster (%s)", data, key, fsid) + return +} From bd03d057769946ba9cfc4ecaeef8f0bb846cac79 Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Thu, 7 Mar 2019 17:03:18 -0500 Subject: [PATCH 66/89] Address linter error Signed-off-by: ShyamsundarR --- cmd/rbd/main.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/rbd/main.go b/cmd/rbd/main.go index f5f727bc9..421da93e8 100644 --- a/cmd/rbd/main.go +++ b/cmd/rbd/main.go @@ -31,7 +31,8 @@ var ( nodeID = flag.String("nodeid", "", "node id") containerized = flag.Bool("containerized", true, "whether run as containerized") metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") - configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets") + configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph"+ + " cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets") ) func init() { From c9c1c871fc54789e68de0933d088866eaf080994 Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Thu, 7 Mar 2019 20:14:39 -0500 Subject: [PATCH 67/89] Removed a couple of debug logs Signed-off-by: ShyamsundarR --- pkg/util/fileconfig.go | 2 -- pkg/util/k8sconfig.go | 2 -- 2 files changed, 4 deletions(-) diff --git a/pkg/util/fileconfig.go b/pkg/util/fileconfig.go index 3bac9a3d0..abf388e37 100644 --- a/pkg/util/fileconfig.go +++ b/pkg/util/fileconfig.go @@ -19,7 +19,6 @@ package util import ( "fmt" "io/ioutil" - "k8s.io/klog" "path" ) @@ -53,6 +52,5 @@ func (fc *FileConfig) DataForKey(fsid string, key string) (data string, err erro } data = string(content) - klog.V(3).Infof("returning data (%s) for key (%s) against cluster (%s)", data, key, fsid) return } diff --git a/pkg/util/k8sconfig.go b/pkg/util/k8sconfig.go index 08e69dcca..0d1271169 100644 --- a/pkg/util/k8sconfig.go +++ b/pkg/util/k8sconfig.go @@ -20,7 +20,6 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8s "k8s.io/client-go/kubernetes" - "k8s.io/klog" ) /* @@ -54,6 +53,5 @@ func (kc *K8sConfig) DataForKey(fsid string, key string) (data string, err error } data = string(content) - klog.V(3).Infof("returning data (%s) for key (%s) against cluster (%s)", data, key, fsid) return } From e1c685ef3964895b34c6005b0b2e3eb7ff3030f2 Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Mon, 11 Mar 2019 08:36:33 -0400 Subject: [PATCH 68/89] Fixed scope of confStore Signed-off-by: ShyamsundarR --- pkg/rbd/rbd.go | 6 +++--- pkg/rbd/rbd_util.go | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index aa8dd3930..295752941 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -47,8 +47,8 @@ type Driver struct { var ( version = "1.0.0" - // ConfStore is the global config store - ConfStore *util.ConfigStore + // confStore is the global config store + confStore *util.ConfigStore ) // NewDriver returns new rbd driver @@ -94,7 +94,7 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, co klog.Infof("Driver: %v version: %v", driverName, version) // Initialize config store - ConfStore, err = util.NewConfigStore(configroot) + confStore, err = util.NewConfigStore(configroot) if err != nil { klog.Fatalln("Failed to initialize config store.") } diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index f19535e03..1ed7455be 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -94,7 +94,7 @@ func getRBDKey(fsid string, id string, credentials map[string]string) (string, e if key, ok = credentials[id]; !ok { if fsid != "" { - key, err = ConfStore.CredentialForUser(fsid, id) + key, err = confStore.CredentialForUser(fsid, id) if err != nil { klog.Errorf("failed getting credentials (%s)", err) return "", fmt.Errorf("RBD key for ID: %s not found in config store", id) @@ -254,7 +254,7 @@ func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret stri return } - if monitors, err = ConfStore.Mons(fsID); err != nil { + if monitors, err = confStore.Mons(fsID); err != nil { klog.Errorf("failed getting mons (%s)", err) err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID) return @@ -272,7 +272,7 @@ func getIDs(options map[string]string, fsID string) (adminID, userID string, err switch { case ok: case fsID != "": - if adminID, err = ConfStore.AdminID(fsID); err != nil { + if adminID, err = confStore.AdminID(fsID); err != nil { klog.Errorf("failed getting subject (%s)", err) return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID) } @@ -284,7 +284,7 @@ func getIDs(options map[string]string, fsID string) (adminID, userID string, err switch { case ok: case fsID != "": - if userID, err = ConfStore.UserID(fsID); err != nil { + if userID, err = confStore.UserID(fsID); err != nil { klog.Errorf("failed getting subject (%s)", err) return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID) } From fc0cf957bec769911da5d0108c57ce841c575f0c Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Tue, 12 Mar 2019 11:57:36 -0400 Subject: [PATCH 69/89] Updated code and docs to reflect correct terminology - Updated instances of fsid with clusterid - Updated instances of credentials/subject with user/key Signed-off-by: ShyamsundarR --- docs/deploy-rbd.md | 13 ++-- examples/README.md | 14 +++-- examples/rbd/snapshotclass.yaml | 10 ++- examples/rbd/storageclass.yaml | 17 +++--- .../rbd/template-ceph-cluster-ID-secret.yaml | 13 ++-- .../rbd/template-csi-rbdplugin-patch.yaml | 21 +++++-- ...plate-csi-rbdplugin-provisioner-patch.yaml | 21 +++++-- pkg/rbd/rbd_attach.go | 2 +- pkg/rbd/rbd_util.go | 61 +++++++++---------- pkg/util/configstore.go | 50 +++++++-------- pkg/util/configstore_test.go | 27 ++++---- pkg/util/fileconfig.go | 9 +-- pkg/util/k8sconfig.go | 15 ++--- 13 files changed, 151 insertions(+), 122 deletions(-) diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index a0f9b2d34..6d4ead827 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -53,7 +53,7 @@ Parameter | Required | Description --------- | -------- | ----------- `monitors` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`) `monValueFromSecret` | one of `monitors`, `clusterID` or and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. -`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Value of `ceph fsid`, into which RBD images shall be created (e.g. `4ae5ae3d-ebfb-4150-bfc8-798970f4e3d9`) +`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use `pool` | yes | Ceph pool into which the RBD image shall be created `imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format) `imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature) @@ -64,7 +64,8 @@ Parameter | Required | Description NOTE: If `clusterID` parameter is used, then an accompanying Ceph cluster configuration secret or config files needs to be provided to the running pods. Refer to `examples/README.md` section titled "Cluster ID based configuration" -for more information. +for more information. A suggested way to populate the clusterID is to use the +output of `ceph fsid` of the Ceph cluster to be used for provisioning. **Required secrets:** @@ -72,10 +73,10 @@ Admin credentials are required for provisioning new RBD images `ADMIN_NAME`: `ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the client with admin privileges, and the value is its password -If clusterID is specified, then a pair of secrets are required, with keys named -`subjectid` and `credentials`. Where, `subjectid` is the name of the client -with admin privileges and `credentials` contain its password. The pair required -are provisioner and publish secrets, and should contain the same value. +If clusterID is specified, then a secret with various keys and values as +specified in `examples/rbd/template-ceph-cluster-ID-secret.yaml` needs to be +created, with the secret name matching the string value provided as the +`clusterID`. ## Deployment with Kubernetes diff --git a/examples/README.md b/examples/README.md index aa7f710d9..55e2d376d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -226,9 +226,6 @@ Ceph cluster, the following actions need to be completed. Get the following information from the Ceph cluster, -* Ceph Cluster fsid - * Output of `ceph fsid` - * Used to substitute `` references in the files below * Admin ID and key, that has privileges to perform CRUD operations on the Ceph cluster and pools of choice * Key is typically the output of, `ceph auth get-key client.admin` where @@ -237,14 +234,19 @@ Get the following information from the Ceph cluster, * Ceph monitor list * Typically in the output of `ceph mon dump` * Used to prepare comma separated MON list where required in the files below +* Ceph Cluster fsid + * If choosing to use the Ceph cluster fsid as the unique value of clusterID, + * Output of `ceph fsid` + * Used to substitute `` references in the files below Update the template `rbd/template-ceph-cluster-ID-secret.yaml` with values from -a Ceph cluster and create the following secret, +a Ceph cluster and replace `` with the chosen clusterID to create +the following secret, * `kubectl create -f rbd/template-ceph-cluster-ID-secret.yaml` -Storage class and snapshot class, using `` as the value for the - option `clusterID`, can now be created on the cluster. +Storage class and snapshot class, using `` as the value for the +option `clusterID`, can now be created on the cluster. Remaining steps to test functionality remains the same as mentioned in the sections above. diff --git a/examples/rbd/snapshotclass.yaml b/examples/rbd/snapshotclass.yaml index a85e707b3..a07b41ea2 100644 --- a/examples/rbd/snapshotclass.yaml +++ b/examples/rbd/snapshotclass.yaml @@ -10,8 +10,14 @@ parameters: # if using FQDN, make sure csi plugin's dns policy is appropriate. monitors: mon1:port,mon2:port,... # OR, - # Ceph cluster fsid, of the cluster to provision storage from - # clusterID: + # String representing a Ceph cluster to provision storage from. + # Should be unique unique across all Ceph clusters in use for provisioning, + # cannot be greater than 36 bytes in length, and should remain immutable for + # the lifetime of the StorageClass in use. + # If using clusterID, ensure to create a secret, as in + # template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to + # represent the Ceph cluster in clusterID + # clusterID: csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret csi.storage.k8s.io/snapshotter-secret-namespace: default diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index 3fa497919..cc4041ad2 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -9,11 +9,14 @@ parameters: # if using FQDN, make sure csi plugin's dns policy is appropriate. monitors: mon1:port,mon2:port,... # OR, - # Ceph cluster fsid, of the cluster to provision storage from - # clusterID: - # If using clusterID based configuration, CSI pods need to be passed in a - # secret named ceph-cluster- that contains the cluster - # information. (as in the provided template-ceph-cluster-ID-secret.yaml) + # String representing a Ceph cluster to provision storage from. + # Should be unique unique across all Ceph clusters in use for provisioning, + # cannot be greater than 36 bytes in length, and should remain immutable for + # the lifetime of the StorageClass in use. + # If using clusterID, ensure to create a secret, as in + # template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to + # represent the Ceph cluster in clusterID + # clusterID: # OR, # if "monitors" parameter is not set, driver to get monitors from same # secret as admin/user credentials. "monValueFromSecret" provides the @@ -32,7 +35,7 @@ parameters: # The secrets have to contain Ceph admin credentials. # NOTE: If using "clusterID" instead of "monitors" above, the following - # secrets MAY be added to the ceph-cluster- secret and skipped + # secrets MAY be added to the ceph-cluster- secret and skipped # here csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret csi.storage.k8s.io/provisioner-secret-namespace: default @@ -41,7 +44,7 @@ parameters: # Ceph users for operating RBD # NOTE: If using "clusterID" instead of "monitors" above, the following - # IDs MAY be added to the ceph-cluster- secret and skipped + # IDs MAY be added to the ceph-cluster- secret and skipped # here adminid: admin userid: kubernetes diff --git a/examples/rbd/template-ceph-cluster-ID-secret.yaml b/examples/rbd/template-ceph-cluster-ID-secret.yaml index d4c70c0fb..ef778544f 100644 --- a/examples/rbd/template-ceph-cluster-ID-secret.yaml +++ b/examples/rbd/template-ceph-cluster-ID-secret.yaml @@ -6,11 +6,10 @@ apiVersion: v1 kind: Secret metadata: - # The is used by the CSI plugin to uniquely identify and use a - # Ceph cluster, hence the value MUST match the output of the following - # command. - # - Output of: `ceph fsid` - name: ceph-cluster- + # The is used by the CSI plugin to uniquely identify and use a + # Ceph cluster, the value MUST match the value provided as `clusterID` in the + # StorageClass + name: ceph-cluster- namespace: default data: # Base64 encoded and comma separated Ceph cluster monitor list @@ -24,7 +23,7 @@ data: # Substitute the entire string including angle braces, with the base64 value adminid: # Base64 encoded key of the provisioner admin ID - # - Output of: `ceph auth get-key client.admin | base64` + # - Output of: `ceph auth get-key client. | base64` # Substitute the entire string including angle braces, with the base64 value adminkey: # Base64 encoded user ID to use for publishing @@ -32,6 +31,6 @@ data: # Substitute the entire string including angle braces, with the base64 value userid: # Base64 encoded key of the publisher user ID - # - Output of: `ceph auth get-key client.admin | base64` + # - Output of: `ceph auth get-key client. | base64` # Substitute the entire string including angle braces, with the base64 value userkey: diff --git a/examples/rbd/template-csi-rbdplugin-patch.yaml b/examples/rbd/template-csi-rbdplugin-patch.yaml index 4a507b2f2..d89afbe8e 100644 --- a/examples/rbd/template-csi-rbdplugin-patch.yaml +++ b/examples/rbd/template-csi-rbdplugin-patch.yaml @@ -1,8 +1,17 @@ --- # This is a patch to the existing daemonset deployment of CSI rbdplugin. -# This is to be used when adding a new Ceph cluster to the CSI plugin. +# +# This is to be used when using `clusterID` instead of monitors or +# monValueFromSecret in the StorageClass to specify the Ceph cluster to +# provision storage from, AND when the value of `--configroot` option to the +# CSI pods is NOT "k8s_objects". +# +# This patch file, patches in the specified secret for the 'clusterID' as a +# volume, instead of the Ceph CSI plugin actively fetching and using kubernetes +# secrets. +# # NOTE: Update csi-rbdplugin-provisioner StatefulSet as well with similar patch -# Post substituting the in all places execute, +# Post substituting the in all places execute, # `kubectl patch daemonset csi-rbdplugin --patch\ # "$(cat template-csi-rbdplugin-patch.yaml)"` # to patch the statefulset deployment. @@ -12,10 +21,10 @@ spec: containers: - name: csi-rbdplugin volumeMounts: - - name: ceph-cluster- - mountPath: "/etc/csi-config/ceph-cluster-" + - name: ceph-cluster- + mountPath: "/etc/csi-config/ceph-cluster-" readOnly: true volumes: - - name: ceph-cluster- + - name: ceph-cluster- secret: - secretName: ceph-cluster- + secretName: ceph-cluster- diff --git a/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml b/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml index 1d12e634b..68a36fcdf 100644 --- a/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml +++ b/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml @@ -1,8 +1,17 @@ --- # This is a patch to the existing statefulset deployment of CSI rbdplugin. -# This is to be used when adding a new Ceph cluster to the CSI plugin. +# +# This is to be used when using `clusterID` instead of monitors or +# monValueFromSecret in the StorageClass to specify the Ceph cluster to +# provision storage from, AND when the value of `--configroot` option to the +# CSI pods is NOT "k8s_objects". +# +# This patch file, patches in the specified secret for the 'clusterID' as a +# volume, instead of the Ceph CSI plugin actively fetching and using kubernetes +# secrets. +# # NOTE: Update csi-rbdplugin DaemonSet as well with similar patch -# Post substituting the in all places execute, +# Post substituting the in all places execute, # `kubectl patch statefulset csi-rbdplugin-provisioner --patch\ # "$(cat template-csi-rbdplugin-provisioner-patch.yaml)"` # to patch the statefulset deployment. @@ -12,10 +21,10 @@ spec: containers: - name: csi-rbdplugin volumeMounts: - - name: ceph-cluster- - mountPath: "/etc/csi-config/ceph-cluster-" + - name: ceph-cluster- + mountPath: "/etc/csi-config/ceph-cluster-" readOnly: true volumes: - - name: ceph-cluster- + - name: ceph-cluster- secret: - secretName: ceph-cluster- + secretName: ceph-cluster- diff --git a/pkg/rbd/rbd_attach.go b/pkg/rbd/rbd_attach.go index c187af562..d56ac74f3 100644 --- a/pkg/rbd/rbd_attach.go +++ b/pkg/rbd/rbd_attach.go @@ -280,7 +280,7 @@ func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (stri } klog.V(5).Infof("rbd: map mon %s", mon) - key, err := getRBDKey(volOpt.FsID, userID, creds) + key, err := getRBDKey(volOpt.ClusterID, userID, creds) if err != nil { return "", err } diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 1ed7455be..25dbdb737 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -52,7 +52,7 @@ type rbdVolume struct { UserID string `json:"userId"` Mounter string `json:"mounter"` DisableInUseChecks bool `json:"disableInUseChecks"` - FsID string `json:"fsid"` + ClusterID string `json:"clusterId"` } type rbdSnapshot struct { @@ -67,7 +67,7 @@ type rbdSnapshot struct { SizeBytes int64 `json:"sizeBytes"` AdminID string `json:"adminId"` UserID string `json:"userId"` - FsID string `json:"fsid"` + ClusterID string `json:"clusterId"` } var ( @@ -87,17 +87,16 @@ var ( supportedFeatures = sets.NewString("layering") ) -func getRBDKey(fsid string, id string, credentials map[string]string) (string, error) { +func getRBDKey(clusterid string, id string, credentials map[string]string) (string, error) { var ok bool var err error var key string if key, ok = credentials[id]; !ok { - if fsid != "" { - key, err = confStore.CredentialForUser(fsid, id) + if clusterid != "" { + key, err = confStore.KeyForUser(clusterid, id) if err != nil { - klog.Errorf("failed getting credentials (%s)", err) - return "", fmt.Errorf("RBD key for ID: %s not found in config store", id) + return "", fmt.Errorf("RBD key for ID: %s not found in config store of clusterID (%s)", id, clusterid) } } else { return "", fmt.Errorf("RBD key for ID: %s not found", id) @@ -137,7 +136,7 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map image := pOpts.VolName volSzMiB := fmt.Sprintf("%dM", volSz) - key, err := getRBDKey(pOpts.FsID, adminID, credentials) + key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) if err != nil { return err } @@ -168,7 +167,7 @@ func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) ( image := pOpts.VolName // If we don't have admin id/secret (e.g. attaching), fallback to user id/secret. - key, err := getRBDKey(pOpts.FsID, userID, credentials) + key, err := getRBDKey(pOpts.ClusterID, userID, credentials) if err != nil { return false, "", err } @@ -216,7 +215,7 @@ func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]str klog.Info("rbd is still being used ", image) return fmt.Errorf("rbd %s is still being used", image) } - key, err := getRBDKey(pOpts.FsID, adminID, credentials) + key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) if err != nil { return err } @@ -241,22 +240,22 @@ func execCommand(command string, args []string) ([]byte, error) { return cmd.CombinedOutput() } -func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret string, err error) { +func getMonsAndClusterID(options map[string]string) (monitors, clusterID, monInSecret string, err error) { var ok bool monitors, ok = options["monitors"] if !ok { // if mons are not set in options, check if they are set in secret if monInSecret, ok = options["monValueFromSecret"]; !ok { - // if mons are not in secret, check if we have a cluster-fsid - if fsID, ok = options["clusterID"]; !ok { + // if mons are not in secret, check if we have a cluster-id + if clusterID, ok = options["clusterID"]; !ok { err = errors.New("either monitors or monValueFromSecret or clusterID must be set") return } - if monitors, err = confStore.Mons(fsID); err != nil { + if monitors, err = confStore.Mons(clusterID); err != nil { klog.Errorf("failed getting mons (%s)", err) - err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", fsID) + err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", clusterID) return } } @@ -265,16 +264,16 @@ func getMonsAndFsID(options map[string]string) (monitors, fsID, monInSecret stri return } -func getIDs(options map[string]string, fsID string) (adminID, userID string, err error) { +func getIDs(options map[string]string, clusterID string) (adminID, userID string, err error) { var ok bool adminID, ok = options["adminid"] switch { case ok: - case fsID != "": - if adminID, err = confStore.AdminID(fsID); err != nil { + case clusterID != "": + if adminID, err = confStore.AdminID(clusterID); err != nil { klog.Errorf("failed getting subject (%s)", err) - return "", "", fmt.Errorf("failed to fetch provisioner ID using clusterID (%s)", fsID) + return "", "", fmt.Errorf("failed to fetch admin ID for clusterID (%s)", clusterID) } default: adminID = rbdDefaultAdminID @@ -283,10 +282,10 @@ func getIDs(options map[string]string, fsID string) (adminID, userID string, err userID, ok = options["userid"] switch { case ok: - case fsID != "": - if userID, err = confStore.UserID(fsID); err != nil { + case clusterID != "": + if userID, err = confStore.UserID(clusterID); err != nil { klog.Errorf("failed getting subject (%s)", err) - return "", "", fmt.Errorf("failed to fetch publisher ID using clusterID (%s)", fsID) + return "", "", fmt.Errorf("failed to fetch user ID using clusterID (%s)", clusterID) } default: userID = rbdDefaultUserID @@ -305,7 +304,7 @@ func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) return nil, errors.New("missing required parameter pool") } - rbdVol.Monitors, rbdVol.FsID, rbdVol.MonValueFromSecret, err = getMonsAndFsID(volOptions) + rbdVol.Monitors, rbdVol.ClusterID, rbdVol.MonValueFromSecret, err = getMonsAndClusterID(volOptions) if err != nil { return nil, err } @@ -346,7 +345,7 @@ func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) error { var ok bool var err error - rbdVol.AdminID, rbdVol.UserID, err = getIDs(volOptions, rbdVol.FsID) + rbdVol.AdminID, rbdVol.UserID, err = getIDs(volOptions, rbdVol.ClusterID) if err != nil { return err } @@ -369,12 +368,12 @@ func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) return nil, errors.New("missing required parameter pool") } - rbdSnap.Monitors, rbdSnap.FsID, rbdSnap.MonValueFromSecret, err = getMonsAndFsID(snapOptions) + rbdSnap.Monitors, rbdSnap.ClusterID, rbdSnap.MonValueFromSecret, err = getMonsAndClusterID(snapOptions) if err != nil { return nil, err } - rbdSnap.AdminID, rbdSnap.UserID, err = getIDs(snapOptions, rbdSnap.FsID) + rbdSnap.AdminID, rbdSnap.UserID, err = getIDs(snapOptions, rbdSnap.ClusterID) if err != nil { return nil, err } @@ -439,7 +438,7 @@ func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string] image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(pOpts.FsID, adminID, credentials) + key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) if err != nil { return err } @@ -502,7 +501,7 @@ func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(pOpts.FsID, adminID, credentials) + key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) if err != nil { return err } @@ -529,7 +528,7 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[strin image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(pOpts.FsID, adminID, credentials) + key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) if err != nil { return err } @@ -556,7 +555,7 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s image := pOpts.VolName snapID := pOpts.SnapID - key, err := getRBDKey(pOpts.FsID, adminID, credentials) + key, err := getRBDKey(pOpts.ClusterID, adminID, credentials) if err != nil { return err } @@ -583,7 +582,7 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string image := pVolOpts.VolName snapID := pSnapOpts.SnapID - key, err := getRBDKey(pVolOpts.FsID, adminID, credentials) + key, err := getRBDKey(pVolOpts.ClusterID, adminID, credentials) if err != nil { return err } diff --git a/pkg/util/configstore.go b/pkg/util/configstore.go index 5064699ef..8e6c02ab4 100644 --- a/pkg/util/configstore.go +++ b/pkg/util/configstore.go @@ -27,7 +27,7 @@ import ( // StoreReader interface enables plugging different stores, that contain the // keys and data. (e.g k8s secrets or local files) type StoreReader interface { - DataForKey(fsid string, key string) (string, error) + DataForKey(clusterID string, key string) (string, error) } /* ConfigKeys contents and format, @@ -55,23 +55,23 @@ type ConfigStore struct { } // dataForKey returns data from the config store for the provided key -func (dc *ConfigStore) dataForKey(fsid string, key string) (string, error) { +func (dc *ConfigStore) dataForKey(clusterID string, key string) (string, error) { if dc.StoreReader != nil { - return dc.StoreReader.DataForKey(fsid, key) + return dc.StoreReader.DataForKey(clusterID, key) } err := errors.New("config store location uninitialized") return "", err } -// Mons returns a comma separated MON list from the cluster config represented by fsid -func (dc *ConfigStore) Mons(fsid string) (string, error) { - return dc.dataForKey(fsid, csMonitors) +// Mons returns a comma separated MON list from the cluster config represented by clusterID +func (dc *ConfigStore) Mons(clusterID string) (string, error) { + return dc.dataForKey(clusterID, csMonitors) } -// Pools returns a list of pool names from the cluster config represented by fsid -func (dc *ConfigStore) Pools(fsid string) ([]string, error) { - content, err := dc.dataForKey(fsid, csPools) +// Pools returns a list of pool names from the cluster config represented by clusterID +func (dc *ConfigStore) Pools(clusterID string) ([]string, error) { + content, err := dc.dataForKey(clusterID, csPools) if err != nil { return nil, err } @@ -79,42 +79,42 @@ func (dc *ConfigStore) Pools(fsid string) ([]string, error) { return strings.Split(content, ","), nil } -// AdminID returns the admin ID from the cluster config represented by fsid -func (dc *ConfigStore) AdminID(fsid string) (string, error) { - return dc.dataForKey(fsid, csAdminID) +// AdminID returns the admin ID from the cluster config represented by clusterID +func (dc *ConfigStore) AdminID(clusterID string) (string, error) { + return dc.dataForKey(clusterID, csAdminID) } -// UserID returns the user ID from the cluster config represented by fsid -func (dc *ConfigStore) UserID(fsid string) (string, error) { - return dc.dataForKey(fsid, csUserID) +// UserID returns the user ID from the cluster config represented by clusterID +func (dc *ConfigStore) UserID(clusterID string) (string, error) { + return dc.dataForKey(clusterID, csUserID) } -// CredentialForUser returns the credentials for the requested user ID -// from the cluster config represented by fsid -func (dc *ConfigStore) CredentialForUser(fsid, userID string) (data string, err error) { - var credkey string - user, err := dc.AdminID(fsid) +// KeyForUser returns the key for the requested user ID from the cluster config +// represented by clusterID +func (dc *ConfigStore) KeyForUser(clusterID, userID string) (data string, err error) { + var fetchKey string + user, err := dc.AdminID(clusterID) if err != nil { return } if user == userID { - credkey = csAdminKey + fetchKey = csAdminKey } else { - user, err = dc.UserID(fsid) + user, err = dc.UserID(clusterID) if err != nil { return } if user != userID { - err = fmt.Errorf("requested user (%s) not found in cluster configuration of (%s)", userID, fsid) + err = fmt.Errorf("requested user (%s) not found in cluster configuration of (%s)", userID, clusterID) return } - credkey = csUserKey + fetchKey = csUserKey } - return dc.dataForKey(fsid, credkey) + return dc.dataForKey(clusterID, fetchKey) } // NewConfigStore returns a config store based on value of configRoot. If diff --git a/pkg/util/configstore_test.go b/pkg/util/configstore_test.go index d85cefffa..92bd8b4a9 100644 --- a/pkg/util/configstore_test.go +++ b/pkg/util/configstore_test.go @@ -14,8 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// nolint: gocyclo - package util import ( @@ -26,6 +24,7 @@ import ( ) var basePath = "./test_artifacts" +var clusterID = "testclusterid" var cs *ConfigStore func cleanupTestData() { @@ -51,20 +50,20 @@ func TestConfigStore(t *testing.T) { t.Errorf("Test setup error %s", err) } - // TEST: Should fail as fsid directory is missing - _, err = cs.Mons("testfsid") + // TEST: Should fail as clusterid directory is missing + _, err = cs.Mons(clusterID) if err == nil { t.Errorf("Failed: expected error due to missing parent directory") } - testDir = basePath + "/" + "ceph-cluster-testfsid" + testDir = basePath + "/" + "ceph-cluster-" + clusterID err = os.MkdirAll(testDir, 0700) if err != nil { t.Errorf("Test setup error %s", err) } // TEST: Should fail as mons file is missing - _, err = cs.Mons("testfsid") + _, err = cs.Mons(clusterID) if err == nil { t.Errorf("Failed: expected error due to missing mons file") } @@ -76,7 +75,7 @@ func TestConfigStore(t *testing.T) { } // TEST: Should fail as MONs is an empty string - content, err = cs.Mons("testfsid") + content, err = cs.Mons(clusterID) if err == nil { t.Errorf("Failed: want (%s), got (%s)", data, content) } @@ -88,7 +87,7 @@ func TestConfigStore(t *testing.T) { } // TEST: Fetching MONs should succeed - content, err = cs.Mons("testfsid") + content, err = cs.Mons(clusterID) if err != nil || content != data { t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) } @@ -100,7 +99,7 @@ func TestConfigStore(t *testing.T) { } // TEST: Fetching MONs should succeed - listContent, err := cs.Pools("testfsid") + listContent, err := cs.Pools(clusterID) if err != nil || strings.Join(listContent, ",") != data { t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) } @@ -112,7 +111,7 @@ func TestConfigStore(t *testing.T) { } // TEST: Fetching provuser should succeed - content, err = cs.AdminID("testfsid") + content, err = cs.AdminID(clusterID) if err != nil || content != data { t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) } @@ -124,7 +123,7 @@ func TestConfigStore(t *testing.T) { } // TEST: Fetching pubuser should succeed - content, err = cs.UserID("testfsid") + content, err = cs.UserID(clusterID) if err != nil || content != data { t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) } @@ -136,7 +135,7 @@ func TestConfigStore(t *testing.T) { } // TEST: Fetching provkey should succeed - content, err = cs.CredentialForUser("testfsid", "provuser") + content, err = cs.KeyForUser(clusterID, "provuser") if err != nil || content != data { t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) } @@ -148,13 +147,13 @@ func TestConfigStore(t *testing.T) { } // TEST: Fetching pubkey should succeed - content, err = cs.CredentialForUser("testfsid", "pubuser") + content, err = cs.KeyForUser(clusterID, "pubuser") if err != nil || content != data { t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err) } // TEST: Fetching random user key should fail - _, err = cs.CredentialForUser("testfsid", "random") + _, err = cs.KeyForUser(clusterID, "random") if err == nil { t.Errorf("Failed: Expected to fail fetching random user key") } diff --git a/pkg/util/fileconfig.go b/pkg/util/fileconfig.go index abf388e37..8e00ff236 100644 --- a/pkg/util/fileconfig.go +++ b/pkg/util/fileconfig.go @@ -30,7 +30,8 @@ BasePath defines the directory under which FileConfig will attempt to open and read contents of various Ceph cluster configurations. Each Ceph cluster configuration is stored under a directory named, -BasePath/ceph-cluster-, where is the Ceph cluster fsid. +BasePath/ceph-cluster-, where uniquely identifies and +separates the each Ceph cluster configuration. Under each Ceph cluster configuration directory, individual files named as per the ConfigKeys constants in the ConfigStore interface, store the required @@ -42,12 +43,12 @@ type FileConfig struct { // DataForKey reads the appropriate config file, named using key, and returns // the contents of the file to the caller -func (fc *FileConfig) DataForKey(fsid string, key string) (data string, err error) { - pathToKey := path.Join(fc.BasePath, "ceph-cluster-"+fsid, key) +func (fc *FileConfig) DataForKey(clusterid string, key string) (data string, err error) { + pathToKey := path.Join(fc.BasePath, "ceph-cluster-"+clusterid, key) // #nosec content, err := ioutil.ReadFile(pathToKey) if err != nil || string(content) == "" { - err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", fsid, err) + err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterid, err) return } diff --git a/pkg/util/k8sconfig.go b/pkg/util/k8sconfig.go index 0d1271169..310e2613e 100644 --- a/pkg/util/k8sconfig.go +++ b/pkg/util/k8sconfig.go @@ -27,7 +27,8 @@ K8sConfig is a ConfigStore interface implementation that reads configuration information from k8s secrets. Each Ceph cluster configuration secret is expected to be named, -ceph-cluster-, where is the Ceph cluster fsid. +ceph-cluster-, where uniquely identifies and +separates the each Ceph cluster configuration. The secret is expected to contain keys, as defined by the ConfigKeys constants in the ConfigStore interface. @@ -37,18 +38,18 @@ type K8sConfig struct { Namespace string } -// DataForKey reads the appropriate k8s secret, named using fsid, and returns -// the contents of key within the secret -func (kc *K8sConfig) DataForKey(fsid string, key string) (data string, err error) { - secret, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get("ceph-cluster-"+fsid, metav1.GetOptions{}) +// DataForKey reads the appropriate k8s secret, named using clusterid, and +// returns the contents of key within the secret +func (kc *K8sConfig) DataForKey(clusterid string, key string) (data string, err error) { + secret, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get("ceph-cluster-"+clusterid, metav1.GetOptions{}) if err != nil { - err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", fsid, err) + err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterid, err) return } content, ok := secret.Data[key] if !ok { - err = fmt.Errorf("missing data for key (%s) in cluster configuration of (%s)", key, fsid) + err = fmt.Errorf("missing data for key (%s) in cluster configuration of (%s)", key, clusterid) return } From ba2e5cff51bdb5fe65b1370723afa8da27f38b6a Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Wed, 13 Mar 2019 09:46:56 -0400 Subject: [PATCH 70/89] Address remenant subject reference and code style reviews Signed-off-by: ShyamsundarR --- cmd/rbd/main.go | 2 +- docs/deploy-rbd.md | 2 +- examples/README.md | 4 +++- examples/rbd/snapshotclass.yaml | 2 +- examples/rbd/storageclass.yaml | 2 +- pkg/rbd/rbd.go | 4 ++-- pkg/rbd/rbd_util.go | 38 ++++++++++++++++++++------------- pkg/util/configstore.go | 9 ++++---- pkg/util/fileconfig.go | 2 +- pkg/util/k8sconfig.go | 2 +- 10 files changed, 38 insertions(+), 29 deletions(-) diff --git a/cmd/rbd/main.go b/cmd/rbd/main.go index 421da93e8..bc4ddf53e 100644 --- a/cmd/rbd/main.go +++ b/cmd/rbd/main.go @@ -58,7 +58,7 @@ func main() { } driver := rbd.NewDriver() - driver.Run(*driverName, *nodeID, *endpoint, *containerized, *configRoot, cp) + driver.Run(*driverName, *nodeID, *endpoint, *configRoot, *containerized, cp) os.Exit(0) } diff --git a/docs/deploy-rbd.md b/docs/deploy-rbd.md index 6d4ead827..50bdc5325 100644 --- a/docs/deploy-rbd.md +++ b/docs/deploy-rbd.md @@ -63,7 +63,7 @@ Parameter | Required | Description NOTE: If `clusterID` parameter is used, then an accompanying Ceph cluster configuration secret or config files needs to be provided to the running pods. -Refer to `examples/README.md` section titled "Cluster ID based configuration" +Refer to [Cluster ID based configuration](../examples/README.md#cluster-id-based-configuration) for more information. A suggested way to populate the clusterID is to use the output of `ceph fsid` of the Ceph cluster to be used for provisioning. diff --git a/examples/README.md b/examples/README.md index 55e2d376d..40b6fef26 100644 --- a/examples/README.md +++ b/examples/README.md @@ -239,7 +239,9 @@ Get the following information from the Ceph cluster, * Output of `ceph fsid` * Used to substitute `` references in the files below -Update the template `rbd/template-ceph-cluster-ID-secret.yaml` with values from +Update the template +[template-ceph-cluster-ID-secret.yaml](./rbd/template-ceph-cluster-ID-secret.yaml) +with values from a Ceph cluster and replace `` with the chosen clusterID to create the following secret, diff --git a/examples/rbd/snapshotclass.yaml b/examples/rbd/snapshotclass.yaml index a07b41ea2..2b1c01dca 100644 --- a/examples/rbd/snapshotclass.yaml +++ b/examples/rbd/snapshotclass.yaml @@ -11,7 +11,7 @@ parameters: monitors: mon1:port,mon2:port,... # OR, # String representing a Ceph cluster to provision storage from. - # Should be unique unique across all Ceph clusters in use for provisioning, + # Should be unique across all Ceph clusters in use for provisioning, # cannot be greater than 36 bytes in length, and should remain immutable for # the lifetime of the StorageClass in use. # If using clusterID, ensure to create a secret, as in diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index cc4041ad2..0ec689477 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -10,7 +10,7 @@ parameters: monitors: mon1:port,mon2:port,... # OR, # String representing a Ceph cluster to provision storage from. - # Should be unique unique across all Ceph clusters in use for provisioning, + # Should be unique across all Ceph clusters in use for provisioning, # cannot be greater than 36 bytes in length, and should remain immutable for # the lifetime of the StorageClass in use. # If using clusterID, ensure to create a secret, as in diff --git a/pkg/rbd/rbd.go b/pkg/rbd/rbd.go index 295752941..bcd5c6c90 100644 --- a/pkg/rbd/rbd.go +++ b/pkg/rbd/rbd.go @@ -89,12 +89,12 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, err // Run start a non-blocking grpc controller,node and identityserver for // rbd CSI driver which can serve multiple parallel requests -func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, configroot string, cachePersister util.CachePersister) { +func (r *Driver) Run(driverName, nodeID, endpoint, configRoot string, containerized bool, cachePersister util.CachePersister) { var err error klog.Infof("Driver: %v version: %v", driverName, version) // Initialize config store - confStore, err = util.NewConfigStore(configroot) + confStore, err = util.NewConfigStore(configRoot) if err != nil { klog.Fatalln("Failed to initialize config store.") } diff --git a/pkg/rbd/rbd_util.go b/pkg/rbd/rbd_util.go index 25dbdb737..14fcc0ebf 100644 --- a/pkg/rbd/rbd_util.go +++ b/pkg/rbd/rbd_util.go @@ -87,10 +87,12 @@ var ( supportedFeatures = sets.NewString("layering") ) -func getRBDKey(clusterid string, id string, credentials map[string]string) (string, error) { - var ok bool - var err error - var key string +func getRBDKey(clusterid, id string, credentials map[string]string) (string, error) { + var ( + ok bool + err error + key string + ) if key, ok = credentials[id]; !ok { if clusterid != "" { @@ -272,8 +274,8 @@ func getIDs(options map[string]string, clusterID string) (adminID, userID string case ok: case clusterID != "": if adminID, err = confStore.AdminID(clusterID); err != nil { - klog.Errorf("failed getting subject (%s)", err) - return "", "", fmt.Errorf("failed to fetch admin ID for clusterID (%s)", clusterID) + klog.Errorf("failed getting adminID (%s)", err) + return "", "", fmt.Errorf("failed to fetch adminID for clusterID (%s)", clusterID) } default: adminID = rbdDefaultAdminID @@ -284,8 +286,8 @@ func getIDs(options map[string]string, clusterID string) (adminID, userID string case ok: case clusterID != "": if userID, err = confStore.UserID(clusterID); err != nil { - klog.Errorf("failed getting subject (%s)", err) - return "", "", fmt.Errorf("failed to fetch user ID using clusterID (%s)", clusterID) + klog.Errorf("failed getting userID (%s)", err) + return "", "", fmt.Errorf("failed to fetch userID using clusterID (%s)", clusterID) } default: userID = rbdDefaultUserID @@ -295,8 +297,10 @@ func getIDs(options map[string]string, clusterID string) (adminID, userID string } func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) { - var ok bool - var err error + var ( + ok bool + err error + ) rbdVol := &rbdVolume{} rbdVol.Pool, ok = volOptions["pool"] @@ -342,8 +346,10 @@ func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) } func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) error { - var ok bool - var err error + var ( + ok bool + err error + ) rbdVol.AdminID, rbdVol.UserID, err = getIDs(volOptions, rbdVol.ClusterID) if err != nil { @@ -355,12 +361,14 @@ func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) error { rbdVol.Mounter = rbdDefaultMounter } - return nil + return err } func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) { - var ok bool - var err error + var ( + ok bool + err error + ) rbdSnap := &rbdSnapshot{} rbdSnap.Pool, ok = snapOptions["pool"] diff --git a/pkg/util/configstore.go b/pkg/util/configstore.go index 8e6c02ab4..a1194efb6 100644 --- a/pkg/util/configstore.go +++ b/pkg/util/configstore.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Ceph-CSI Authors. +Copyright 2019 The Ceph-CSI Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34,7 +34,7 @@ type StoreReader interface { - csMonitors: MON list, comma separated - csAdminID: adminID, used for provisioning - csUserID: userID, used for publishing -- csAdminKey: key, for userID in csProvisionerUser +- csAdminKey: key, for adminID in csProvisionerUser - csUserKey: key, for userID in csPublisherUser - csPools: Pool list, comma separated */ @@ -55,13 +55,12 @@ type ConfigStore struct { } // dataForKey returns data from the config store for the provided key -func (dc *ConfigStore) dataForKey(clusterID string, key string) (string, error) { +func (dc *ConfigStore) dataForKey(clusterID, key string) (string, error) { if dc.StoreReader != nil { return dc.StoreReader.DataForKey(clusterID, key) } - err := errors.New("config store location uninitialized") - return "", err + return "", errors.New("config store location uninitialized") } // Mons returns a comma separated MON list from the cluster config represented by clusterID diff --git a/pkg/util/fileconfig.go b/pkg/util/fileconfig.go index 8e00ff236..4d156a603 100644 --- a/pkg/util/fileconfig.go +++ b/pkg/util/fileconfig.go @@ -43,7 +43,7 @@ type FileConfig struct { // DataForKey reads the appropriate config file, named using key, and returns // the contents of the file to the caller -func (fc *FileConfig) DataForKey(clusterid string, key string) (data string, err error) { +func (fc *FileConfig) DataForKey(clusterid, key string) (data string, err error) { pathToKey := path.Join(fc.BasePath, "ceph-cluster-"+clusterid, key) // #nosec content, err := ioutil.ReadFile(pathToKey) diff --git a/pkg/util/k8sconfig.go b/pkg/util/k8sconfig.go index 310e2613e..d00ad71b0 100644 --- a/pkg/util/k8sconfig.go +++ b/pkg/util/k8sconfig.go @@ -40,7 +40,7 @@ type K8sConfig struct { // DataForKey reads the appropriate k8s secret, named using clusterid, and // returns the contents of key within the secret -func (kc *K8sConfig) DataForKey(clusterid string, key string) (data string, err error) { +func (kc *K8sConfig) DataForKey(clusterid, key string) (data string, err error) { secret, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get("ceph-cluster-"+clusterid, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterid, err) From b0d6737db81e2ba658514ea2ab19d043caa0b43f Mon Sep 17 00:00:00 2001 From: ShyamsundarR Date: Wed, 13 Mar 2019 09:58:34 -0400 Subject: [PATCH 71/89] Removed duplicate patch template Signed-off-by: ShyamsundarR --- .../rbd/template-csi-rbdplugin-patch.yaml | 5 +++- ...plate-csi-rbdplugin-provisioner-patch.yaml | 30 ------------------- 2 files changed, 4 insertions(+), 31 deletions(-) delete mode 100644 examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml diff --git a/examples/rbd/template-csi-rbdplugin-patch.yaml b/examples/rbd/template-csi-rbdplugin-patch.yaml index d89afbe8e..77570ae96 100644 --- a/examples/rbd/template-csi-rbdplugin-patch.yaml +++ b/examples/rbd/template-csi-rbdplugin-patch.yaml @@ -10,10 +10,13 @@ # volume, instead of the Ceph CSI plugin actively fetching and using kubernetes # secrets. # -# NOTE: Update csi-rbdplugin-provisioner StatefulSet as well with similar patch # Post substituting the in all places execute, # `kubectl patch daemonset csi-rbdplugin --patch\ # "$(cat template-csi-rbdplugin-patch.yaml)"` +# to patch the daemonset deployment. +# +# `kubectl patch statefulset csi-rbdplugin-provisioner --patch\ +# "$(cat template-csi-rbdplugin-patch.yaml)"` # to patch the statefulset deployment. spec: template: diff --git a/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml b/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml deleted file mode 100644 index 68a36fcdf..000000000 --- a/examples/rbd/template-csi-rbdplugin-provisioner-patch.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -# This is a patch to the existing statefulset deployment of CSI rbdplugin. -# -# This is to be used when using `clusterID` instead of monitors or -# monValueFromSecret in the StorageClass to specify the Ceph cluster to -# provision storage from, AND when the value of `--configroot` option to the -# CSI pods is NOT "k8s_objects". -# -# This patch file, patches in the specified secret for the 'clusterID' as a -# volume, instead of the Ceph CSI plugin actively fetching and using kubernetes -# secrets. -# -# NOTE: Update csi-rbdplugin DaemonSet as well with similar patch -# Post substituting the in all places execute, -# `kubectl patch statefulset csi-rbdplugin-provisioner --patch\ -# "$(cat template-csi-rbdplugin-provisioner-patch.yaml)"` -# to patch the statefulset deployment. -spec: - template: - spec: - containers: - - name: csi-rbdplugin - volumeMounts: - - name: ceph-cluster- - mountPath: "/etc/csi-config/ceph-cluster-" - readOnly: true - volumes: - - name: ceph-cluster- - secret: - secretName: ceph-cluster- From af330fe68ea4d1e9c295e0dc224839bb6e94f134 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Wed, 27 Mar 2019 16:04:58 +0800 Subject: [PATCH 72/89] 1. fix mountcache race conflict 2. support user-defined cache dir 3. if not define mountcachedir disable mountcache --- cmd/cephfs/main.go | 2 ++ pkg/cephfs/mountcache.go | 59 ++++++++++++++++++++++++++-------------- pkg/util/nodecache.go | 14 ---------- 3 files changed, 41 insertions(+), 34 deletions(-) diff --git a/cmd/cephfs/main.go b/cmd/cephfs/main.go index fc5c0dbcc..e0972f17f 100644 --- a/cmd/cephfs/main.go +++ b/cmd/cephfs/main.go @@ -31,6 +31,7 @@ var ( nodeID = flag.String("nodeid", "", "node id") volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')") metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") + mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir") ) func init() { @@ -49,6 +50,7 @@ func main() { } //update plugin name cephfs.PluginFolder = cephfs.PluginFolder + *driverName + cephfs.MountCacheDir = *mountCacheDir cp, err := util.CreatePersistanceStorage(cephfs.PluginFolder, *metadataStorage, *driverName) if err != nil { diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index 2f7a0a5c6..dd7e89cb0 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -40,10 +40,10 @@ type volumeMountCacheMap struct { } var ( - csiPersistentVolumeRoot = "/var/lib/kubelet/plugins/kubernetes.io/csi" - volumeMountCachePrefix = "cephfs-mount-cache-" - volumeMountCache volumeMountCacheMap - volumeMountCacheMtx sync.Mutex + MountCacheDir = "" + volumeMountCachePrefix = "cephfs-mount-cache-" + volumeMountCache volumeMountCacheMap + volumeMountCacheMtx sync.Mutex ) func remountHisMountedPath(name string, v string, nodeID string, cachePersister util.CachePersister) error { @@ -56,16 +56,18 @@ func remountHisMountedPath(name string, v string, nodeID string, cachePersister volumeMountCache.MetadataStore = cachePersister - volumeMountCache.NodeCacheStore.BasePath = PluginFolder - volumeMountCache.NodeCacheStore.CacheDir = "volumes-mount-cache" + volumeMountCache.NodeCacheStore.BasePath = MountCacheDir + volumeMountCache.NodeCacheStore.CacheDir = "" - if _, err := os.Stat(csiPersistentVolumeRoot); err != nil { - klog.Infof("mount-cache: csi pv root path %s stat fail %v, may not in daemonset csi plugin, exit", csiPersistentVolumeRoot, err) - return err + if len(MountCacheDir) == 0 { + //if mount cache dir unset, disable remount + klog.Infof("mount-cache: mountcachedir no define disalbe mount cache.") + return nil } + klog.Infof("mount-cache: MountCacheDir: %s", MountCacheDir) if err := os.MkdirAll(volumeMountCache.NodeCacheStore.BasePath, 0755); err != nil { - klog.Fatalf("mount-cache: failed to create %s: %v", volumeMountCache.NodeCacheStore.BasePath, err) + klog.Errorf("mount-cache: failed to create %s: %v", volumeMountCache.NodeCacheStore.BasePath, err) return err } me := &volumeMountEntry{} @@ -203,6 +205,10 @@ func genVolumeMountCacheFileName(volID string) string { } func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath string, secrets map[string]string) error { + if len(MountCacheDir) == 0 { + //if mount cache dir unset, disable remount + return nil + } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() @@ -210,8 +216,7 @@ func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath s me, ok := volumeMountCache.Volumes[volID] if ok { if me.StagingPath == stagingTargetPath { - klog.Infof("mount-cache: node stage volume last cache entry for volume %s stagingTargetPath %s no equal %s", - volID, me.StagingPath, stagingTargetPath) + klog.Warningf("mount-cache: node unexpected restage volume for volume %s", volID) return nil } lastTargetPaths = me.TargetPaths @@ -239,6 +244,10 @@ func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath s } func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string, stagingTargetPath string) error { + if len(MountCacheDir) == 0 { + //if mount cache dir unset, disable remount + return nil + } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() delete(volumeMountCache.Volumes, volID) @@ -250,6 +259,10 @@ func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string, stagingTargetPath } func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string, readOnly bool) error { + if len(MountCacheDir) == 0 { + //if mount cache dir unset, disable remount + return nil + } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() @@ -259,15 +272,14 @@ func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string return errors.New("mount-cache: node publish volume failed to find cache entry for volume") } volumeMountCache.Volumes[volID].TargetPaths[targetPath] = readOnly - me := volumeMountCache.Volumes[volID] - if err := mc.NodeCacheStore.Update(genVolumeMountCacheFileName(volID), me); err != nil { - klog.Errorf("mount-cache: node publish volume failed to store a cache entry for volume %s: %v", volID, err) - return err - } - return nil + return mc.updateNodeCache(volID) } func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath string) error { + if len(MountCacheDir) == 0 { + //if mount cache dir unset, disable remount + return nil + } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() @@ -277,9 +289,16 @@ func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath stri return errors.New("mount-cache: node unpublish volume failed to find cache entry for volume") } delete(volumeMountCache.Volumes[volID].TargetPaths, targetPath) + return mc.updateNodeCache(volID) +} + +func (mc *volumeMountCacheMap) updateNodeCache(volID string) error { me := volumeMountCache.Volumes[volID] - if err := mc.NodeCacheStore.Update(genVolumeMountCacheFileName(volID), me); err != nil { - klog.Errorf("mount-cache: node unpublish volume failed to store a cache entry for volume %s: %v", volID, err) + if err := volumeMountCache.NodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { + klog.Infof("mount-cache: metadata nofound, delete mount cache failed for volume %s", volID) + } + if err := mc.NodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { + klog.Errorf("mount-cache: mount cache failed to update for volume %s: %v", volID, err) return err } return nil diff --git a/pkg/util/nodecache.go b/pkg/util/nodecache.go index 86278f4d2..bcb94debc 100644 --- a/pkg/util/nodecache.go +++ b/pkg/util/nodecache.go @@ -101,20 +101,6 @@ func decodeObj(filepath, pattern string, file os.FileInfo, destObj interface{}) } -func (nc *NodeCache) Update(identifier string, data interface{}) error { - file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json") - identifierTmp := identifier + ".creating" - fileTmp := path.Join(nc.BasePath, nc.CacheDir, identifierTmp+".json") - os.Remove(fileTmp) - if err := nc.Create(identifierTmp, data); err != nil { - return errors.Wrapf(err, "node-cache: failed to create metadata storage file %s\n", file) - } - if err := os.Rename(fileTmp, file); err != nil { - return errors.Wrapf(err, "node-cache: couldn't rename %s as %s", fileTmp, file) - } - return nil -} - // Create creates the metadata file in cache directory with identifier name func (nc *NodeCache) Create(identifier string, data interface{}) error { file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json") From 5b53e90ee442f9b68e3c8e1ed014fa2398fdc82c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Fri, 29 Mar 2019 10:18:59 +0800 Subject: [PATCH 73/89] fix code style --- cmd/cephfs/main.go | 3 +- pkg/cephfs/driver.go | 11 ++- pkg/cephfs/mountcache.go | 168 ++++++++++++++++++--------------------- pkg/cephfs/nodeserver.go | 2 +- 4 files changed, 85 insertions(+), 99 deletions(-) diff --git a/cmd/cephfs/main.go b/cmd/cephfs/main.go index e0972f17f..a4d01a5bc 100644 --- a/cmd/cephfs/main.go +++ b/cmd/cephfs/main.go @@ -50,7 +50,6 @@ func main() { } //update plugin name cephfs.PluginFolder = cephfs.PluginFolder + *driverName - cephfs.MountCacheDir = *mountCacheDir cp, err := util.CreatePersistanceStorage(cephfs.PluginFolder, *metadataStorage, *driverName) if err != nil { @@ -58,7 +57,7 @@ func main() { } driver := cephfs.NewDriver() - driver.Run(*driverName, *nodeID, *endpoint, *volumeMounter, cp) + driver.Run(*driverName, *nodeID, *endpoint, *volumeMounter, *mountCacheDir, cp) os.Exit(0) } diff --git a/pkg/cephfs/driver.go b/pkg/cephfs/driver.go index ee7b446b8..7c5dfc61e 100644 --- a/pkg/cephfs/driver.go +++ b/pkg/cephfs/driver.go @@ -77,7 +77,7 @@ func NewNodeServer(d *csicommon.CSIDriver) *NodeServer { // Run start a non-blocking grpc controller,node and identityserver for // ceph CSI driver which can serve multiple parallel requests -func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter string, cachePersister util.CachePersister) { +func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir string, cachePersister util.CachePersister) { klog.Infof("Driver: %v version: %v", driverName, version) // Configuration @@ -105,9 +105,12 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter string, cacheP klog.Fatalf("failed to write ceph configuration file: %v", err) } - if err := remountHisMountedPath(driverName, version, nodeID, cachePersister); err != nil { - klog.Warningf("failed to remounted history mounted path: %v", err) - //ignore remount fail + initVolumeMountCache(driverName, mountCacheDir, cachePersister) + if mountCacheDir != "" { + if err := remountCachedVolumes(); err != nil { + klog.Warningf("failed to remount cached volumes: %v", err) + //ignore remount fail + } } // Initialize default library driver diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index dd7e89cb0..64daf9a01 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -12,82 +12,66 @@ import ( "k8s.io/klog" ) -type volumeMountEntry struct { - NodeID string `json:"nodeID"` +type volumeMountCacheEntry struct { DriverName string `json:"driverName"` DriverVersion string `json:"driverVersion"` - Namespace string `json:"namespace"` - VolumeID string `json:"volumeID"` Secrets map[string]string `json:"secrets"` StagingPath string `json:"stagingPath"` TargetPaths map[string]bool `json:"targetPaths"` CreateTime time.Time `json:"createTime"` LastMountTime time.Time `json:"lastMountTime"` - LoadCount uint64 `json:"loadCount"` } type volumeMountCacheMap struct { - DriverName string - DriverVersion string - NodeID string - MountFailNum int64 - MountSuccNum int64 - Volumes map[string]volumeMountEntry - NodeCacheStore util.NodeCache - MetadataStore util.CachePersister + driverName string + volumes map[string]volumeMountCacheEntry + nodeCacheStore util.NodeCache + metadataStore util.CachePersister } var ( - MountCacheDir = "" volumeMountCachePrefix = "cephfs-mount-cache-" volumeMountCache volumeMountCacheMap volumeMountCacheMtx sync.Mutex ) -func remountHisMountedPath(name string, v string, nodeID string, cachePersister util.CachePersister) error { - volumeMountCache.Volumes = make(map[string]volumeMountEntry) - volumeMountCache.NodeID = nodeID - volumeMountCache.DriverName = name - volumeMountCache.DriverVersion = v - volumeMountCache.MountSuccNum = 0 - volumeMountCache.MountFailNum = 0 +func initVolumeMountCache(driverName string, mountCacheDir string, cachePersister util.CachePersister) { + volumeMountCache.volumes = make(map[string]volumeMountCacheEntry) - volumeMountCache.MetadataStore = cachePersister + volumeMountCache.driverName = driverName + volumeMountCache.metadataStore = cachePersister + volumeMountCache.nodeCacheStore.BasePath = mountCacheDir + volumeMountCache.nodeCacheStore.CacheDir = "" + klog.Infof("mount-cache: name: %s, version: %s, mountCacheDir: %s", driverName, version, mountCacheDir) +} - volumeMountCache.NodeCacheStore.BasePath = MountCacheDir - volumeMountCache.NodeCacheStore.CacheDir = "" - - if len(MountCacheDir) == 0 { - //if mount cache dir unset, disable remount - klog.Infof("mount-cache: mountcachedir no define disalbe mount cache.") - return nil - } - - klog.Infof("mount-cache: MountCacheDir: %s", MountCacheDir) - if err := os.MkdirAll(volumeMountCache.NodeCacheStore.BasePath, 0755); err != nil { - klog.Errorf("mount-cache: failed to create %s: %v", volumeMountCache.NodeCacheStore.BasePath, err) +func remountCachedVolumes() error { + if err := os.MkdirAll(volumeMountCache.nodeCacheStore.BasePath, 0755); err != nil { + klog.Errorf("mount-cache: failed to create %s: %v", volumeMountCache.nodeCacheStore.BasePath, err) return err } - me := &volumeMountEntry{} + var remountFailCount, remountSuccCount int64 + me := &volumeMountCacheEntry{} ce := &controllerCacheEntry{} - err := volumeMountCache.NodeCacheStore.ForAll(volumeMountCachePrefix, me, func(identifier string) error { + err := volumeMountCache.nodeCacheStore.ForAll(volumeMountCachePrefix, me, func(identifier string) error { volID := me.VolumeID - klog.Infof("mount-cache: load %v", me) - if err := volumeMountCache.MetadataStore.Get(volID, ce); err != nil { + if err := volumeMountCache.metadataStore.Get(volID, ce); err != nil { if err, ok := err.(*util.CacheEntryNotFound); ok { - klog.Infof("cephfs: metadata for volume %s not found, assuming the volume to be already deleted (%v)", volID, err) - if err := volumeMountCache.NodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { + klog.Infof("mount-cache: metadata for volume %s not found, assuming the volume to be already deleted (%v)", volID, err) + if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { klog.Infof("mount-cache: metadata nofound, delete volume cache entry for volume %s", volID) } } } else { if err := mountOneCacheEntry(ce, me); err == nil { - volumeMountCache.MountSuccNum++ - volumeMountCache.Volumes[me.VolumeID] = *me + remountSuccCount++ + volumeMountCache.volumes[me.VolumeID] = *me + klog.Infof("mount-cache: remount volume %s succ", volID) } else { - volumeMountCache.MountFailNum++ + remountFailCount++ + klog.Infof("mount-cache: remount volume cache %s fail", volID) } } return nil @@ -96,33 +80,42 @@ func remountHisMountedPath(name string, v string, nodeID string, cachePersister klog.Infof("mount-cache: metastore list cache fail %v", err) return err } - if volumeMountCache.MountFailNum > volumeMountCache.MountSuccNum { - return errors.New("mount-cache: too many volumes mount fail") + if remountFailCount > 0 { + klog.Infof("mount-cache: succ remount %d volumes, fail remount %d volumes", remountSuccCount, remountFailCount) + } else { + klog.Infof("mount-cache: volume cache num %d, all succ remount", remountSuccCount) } - klog.Infof("mount-cache: succ remount %d volumes, fail remount %d volumes", volumeMountCache.MountSuccNum, volumeMountCache.MountFailNum) return nil } -func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountEntry) error { +func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountCacheEntry) error { volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() - var err error + var ( + err error + cr *credentials + ) volID := ce.VolumeID volOptions := ce.VolOptions - adminCr, err := getAdminCredentials(decodeCredentials(me.Secrets)) - if err != nil { - return err - } - entity, err := getCephUser(&volOptions, adminCr, volID) - if err != nil { - klog.Infof("mount-cache: failed to get ceph user: %s %v", volID, me.StagingPath) - } - cr := entity.toCredentials() - if volOptions.ProvisionVolume { volOptions.RootPath = getVolumeRootPathCeph(volID) + cr, err = getAdminCredentials(decodeCredentials(me.Secrets)) + if err != nil { + return err + } + var entity *cephEntity + entity, err = getCephUser(&volOptions, cr, volID) + if err != nil { + return err + } + cr = entity.toCredentials() + } else { + cr, err = getUserCredentials(decodeCredentials(me.Secrets)) + if err != nil { + return err + } } err = cleanupMountPoint(me.StagingPath) @@ -164,7 +157,7 @@ func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountEntry) error { func cleanupMountPoint(mountPoint string) error { if _, err := os.Stat(mountPoint); err != nil { - if IsCorruptedMnt(err) { + if isCorruptedMnt(err) { klog.Infof("mount-cache: corrupted mount point %s, need unmount", mountPoint) err := execCommandErr("umount", mountPoint) if err != nil { @@ -180,7 +173,7 @@ func cleanupMountPoint(mountPoint string) error { return nil } -func IsCorruptedMnt(err error) bool { +func isCorruptedMnt(err error) bool { if err == nil { return false } @@ -203,17 +196,20 @@ func genVolumeMountCacheFileName(volID string) string { cachePath := volumeMountCachePrefix + volID return cachePath } +func (mc *volumeMountCacheMap) isEnable() bool { + //if mount cache dir unset, disable state + return mc.nodeCacheStore.BasePath != "" +} func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath string, secrets map[string]string) error { - if len(MountCacheDir) == 0 { - //if mount cache dir unset, disable remount + if !mc.isEnable() { return nil } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() lastTargetPaths := make(map[string]bool) - me, ok := volumeMountCache.Volumes[volID] + me, ok := volumeMountCache.volumes[volID] if ok { if me.StagingPath == stagingTargetPath { klog.Warningf("mount-cache: node unexpected restage volume for volume %s", volID) @@ -223,82 +219,70 @@ func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath s klog.Warningf("mount-cache: node stage volume ignore last cache entry for volume %s", volID) } - me = volumeMountEntry{NodeID: mc.NodeID, DriverName: mc.DriverName, DriverVersion: mc.DriverVersion} + me = volumeMountCacheEntry{DriverName: mc.driverName, DriverVersion: version} me.VolumeID = volID me.Secrets = encodeCredentials(secrets) me.StagingPath = stagingTargetPath me.TargetPaths = lastTargetPaths - curTime := time.Now() - me.CreateTime = curTime - me.CreateTime = curTime - me.LoadCount = 0 - volumeMountCache.Volumes[volID] = me - if err := mc.NodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { - klog.Errorf("mount-cache: node stage volume failed to store a cache entry for volume %s: %v", volID, err) + me.CreateTime = time.Now() + volumeMountCache.volumes[volID] = me + if err := mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { return err } - klog.Infof("mount-cache: node stage volume succ to store a cache entry for volume %s: %v", volID, me) return nil } -func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string, stagingTargetPath string) error { - if len(MountCacheDir) == 0 { - //if mount cache dir unset, disable remount +func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string) error { + if !mc.isEnable() { return nil } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() - delete(volumeMountCache.Volumes, volID) - if err := mc.NodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err != nil { - klog.Infof("mount-cache: node unstage volume failed to delete cache entry for volume %s: %s %v", volID, stagingTargetPath, err) + delete(volumeMountCache.volumes, volID) + if err := mc.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err != nil { return err } return nil } func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string, readOnly bool) error { - if len(MountCacheDir) == 0 { - //if mount cache dir unset, disable remount + if !mc.isEnable() { return nil } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() - _, ok := volumeMountCache.Volumes[volID] + _, ok := volumeMountCache.volumes[volID] if !ok { - klog.Errorf("mount-cache: node publish volume failed to find cache entry for volume %s", volID) return errors.New("mount-cache: node publish volume failed to find cache entry for volume") } - volumeMountCache.Volumes[volID].TargetPaths[targetPath] = readOnly + volumeMountCache.volumes[volID].TargetPaths[targetPath] = readOnly return mc.updateNodeCache(volID) } func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath string) error { - if len(MountCacheDir) == 0 { - //if mount cache dir unset, disable remount + if !mc.isEnable() { return nil } volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() - _, ok := volumeMountCache.Volumes[volID] + _, ok := volumeMountCache.volumes[volID] if !ok { - klog.Errorf("mount-cache: node unpublish volume failed to find cache entry for volume %s", volID) return errors.New("mount-cache: node unpublish volume failed to find cache entry for volume") } - delete(volumeMountCache.Volumes[volID].TargetPaths, targetPath) + delete(volumeMountCache.volumes[volID].TargetPaths, targetPath) return mc.updateNodeCache(volID) } func (mc *volumeMountCacheMap) updateNodeCache(volID string) error { - me := volumeMountCache.Volumes[volID] - if err := volumeMountCache.NodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { + me := volumeMountCache.volumes[volID] + if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { klog.Infof("mount-cache: metadata nofound, delete mount cache failed for volume %s", volID) } - if err := mc.NodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { - klog.Errorf("mount-cache: mount cache failed to update for volume %s: %v", volID, err) + if err := mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { return err } return nil diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index 345e4904d..56d909ba9 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -245,7 +245,7 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag stagingTargetPath := req.GetStagingTargetPath() volID := req.GetVolumeId() - if err = volumeMountCache.nodeUnStageVolume(volID, stagingTargetPath); err != nil { + if err = volumeMountCache.nodeUnStageVolume(volID); err != nil { klog.Warningf("mount-cache: failed unstage volume %s %s: %v", volID, stagingTargetPath, err) } From 043d3603ff2464e28fbc0cc91e126a329525005f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Fri, 29 Mar 2019 10:49:22 +0800 Subject: [PATCH 74/89] remove unuse var --- pkg/cephfs/mountcache.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index 64daf9a01..a7b73d85e 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -16,12 +16,11 @@ type volumeMountCacheEntry struct { DriverName string `json:"driverName"` DriverVersion string `json:"driverVersion"` - VolumeID string `json:"volumeID"` - Secrets map[string]string `json:"secrets"` - StagingPath string `json:"stagingPath"` - TargetPaths map[string]bool `json:"targetPaths"` - CreateTime time.Time `json:"createTime"` - LastMountTime time.Time `json:"lastMountTime"` + VolumeID string `json:"volumeID"` + Secrets map[string]string `json:"secrets"` + StagingPath string `json:"stagingPath"` + TargetPaths map[string]bool `json:"targetPaths"` + CreateTime time.Time `json:"createTime"` } type volumeMountCacheMap struct { From 295202b98ea3ae096e1522ee7dcc28cb09842904 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Wed, 27 Mar 2019 13:08:42 +0800 Subject: [PATCH 75/89] issue #285 fix unexpect getCephRootPathLocal value PluginFolder update at main.go, so cephRootPrefix may get unexpected value --- pkg/cephfs/volume.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index c8a77f335..1380d778f 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -31,11 +31,11 @@ const ( ) var ( - cephRootPrefix = PluginFolder + "/controller/volumes/root-" + cephRootPrefix = "/controller/volumes/root-" ) func getCephRootPathLocal(volID volumeID) string { - return cephRootPrefix + string(volID) + return PluginFolder + cephRootPrefix + string(volID) } func getCephRootVolumePathLocal(volID volumeID) string { From 209774160752ac5665051b2599e9a411496cc388 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Wed, 27 Mar 2019 19:42:06 +0800 Subject: [PATCH 76/89] remove cephRootPrefix as global var --- pkg/cephfs/volume.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index 1380d778f..a4d6f197d 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -30,12 +30,8 @@ const ( namespacePrefix = "ns-" ) -var ( - cephRootPrefix = "/controller/volumes/root-" -) - func getCephRootPathLocal(volID volumeID) string { - return PluginFolder + cephRootPrefix + string(volID) + return fmt.Sprintf("%s/controller/volumes/root-%s", PluginFolder, string(volID)) } func getCephRootVolumePathLocal(volID volumeID) string { From 4ec3a5777a4a20eaaa0b8e99a90529670365d4d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Fri, 29 Mar 2019 16:09:05 +0800 Subject: [PATCH 77/89] code style --- pkg/cephfs/mountcache.go | 49 ++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 27 deletions(-) diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index a7b73d85e..fbc24df54 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -13,7 +13,6 @@ import ( ) type volumeMountCacheEntry struct { - DriverName string `json:"driverName"` DriverVersion string `json:"driverVersion"` VolumeID string `json:"volumeID"` @@ -24,7 +23,6 @@ type volumeMountCacheEntry struct { } type volumeMountCacheMap struct { - driverName string volumes map[string]volumeMountCacheEntry nodeCacheStore util.NodeCache metadataStore util.CachePersister @@ -39,7 +37,6 @@ var ( func initVolumeMountCache(driverName string, mountCacheDir string, cachePersister util.CachePersister) { volumeMountCache.volumes = make(map[string]volumeMountCacheEntry) - volumeMountCache.driverName = driverName volumeMountCache.metadataStore = cachePersister volumeMountCache.nodeCacheStore.BasePath = mountCacheDir volumeMountCache.nodeCacheStore.CacheDir = "" @@ -58,19 +55,19 @@ func remountCachedVolumes() error { volID := me.VolumeID if err := volumeMountCache.metadataStore.Get(volID, ce); err != nil { if err, ok := err.(*util.CacheEntryNotFound); ok { - klog.Infof("mount-cache: metadata for volume %s not found, assuming the volume to be already deleted (%v)", volID, err) + klog.Infof("mount-cache: metadata not found, assuming the volume %s to be already deleted (%v)", volID, err) if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { - klog.Infof("mount-cache: metadata nofound, delete volume cache entry for volume %s", volID) + klog.Infof("mount-cache: metadata not found, delete volume cache entry for volume %s", volID) } } } else { if err := mountOneCacheEntry(ce, me); err == nil { remountSuccCount++ volumeMountCache.volumes[me.VolumeID] = *me - klog.Infof("mount-cache: remount volume %s succ", volID) + klog.Infof("mount-cache: remount volume %s success", volID) } else { remountFailCount++ - klog.Infof("mount-cache: remount volume cache %s fail", volID) + klog.Errorf("mount-cache: remount volume cache %s fail", volID) } } return nil @@ -80,7 +77,7 @@ func remountCachedVolumes() error { return err } if remountFailCount > 0 { - klog.Infof("mount-cache: succ remount %d volumes, fail remount %d volumes", remountSuccCount, remountFailCount) + klog.Infof("mount-cache: success remount %d volumes, fail remount %d volumes", remountSuccCount, remountFailCount) } else { klog.Infof("mount-cache: volume cache num %d, all succ remount", remountSuccCount) } @@ -146,7 +143,7 @@ func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountCacheEntry) err klog.Errorf("mount-cache: failed to bind-mount volume %s: %s %s %v %v", volID, me.StagingPath, targetPath, readOnly, err) } else { - klog.Infof("mount-cache: succ bind-mount volume %s: %s %s %v", + klog.Infof("mount-cache: successfully bind-mount volume %s: %s %s %v", volID, me.StagingPath, targetPath, readOnly) } } @@ -173,9 +170,6 @@ func cleanupMountPoint(mountPoint string) error { } func isCorruptedMnt(err error) bool { - if err == nil { - return false - } var underlyingError error switch pe := err.(type) { case nil: @@ -186,9 +180,19 @@ func isCorruptedMnt(err error) bool { underlyingError = pe.Err case *os.SyscallError: underlyingError = pe.Err + default: + return false } - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES + CorruptedErrors := []error{ + syscall.ENOTCONN, syscall.ESTALE, syscall.EIO, syscall.EACCES} + + for _, v := range CorruptedErrors { + if underlyingError == v { + return true + } + } + return false } func genVolumeMountCacheFileName(volID string) string { @@ -218,7 +222,7 @@ func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath s klog.Warningf("mount-cache: node stage volume ignore last cache entry for volume %s", volID) } - me = volumeMountCacheEntry{DriverName: mc.driverName, DriverVersion: version} + me = volumeMountCacheEntry{DriverVersion: version} me.VolumeID = volID me.Secrets = encodeCredentials(secrets) @@ -227,10 +231,7 @@ func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath s me.CreateTime = time.Now() volumeMountCache.volumes[volID] = me - if err := mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { - return err - } - return nil + return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me) } func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string) error { @@ -240,10 +241,7 @@ func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string) error { volumeMountCacheMtx.Lock() defer volumeMountCacheMtx.Unlock() delete(volumeMountCache.volumes, volID) - if err := mc.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err != nil { - return err - } - return nil + return mc.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)) } func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string, readOnly bool) error { @@ -279,12 +277,9 @@ func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath stri func (mc *volumeMountCacheMap) updateNodeCache(volID string) error { me := volumeMountCache.volumes[volID] if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { - klog.Infof("mount-cache: metadata nofound, delete mount cache failed for volume %s", volID) + klog.Infof("mount-cache: metadata notfound, delete mount cache failed for volume %s", volID) } - if err := mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me); err != nil { - return err - } - return nil + return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me) } func encodeCredentials(input map[string]string) (output map[string]string) { From dfdefe40c9221f2adb7ac77b838cc8d8ed1e163c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Fri, 29 Mar 2019 16:11:02 +0800 Subject: [PATCH 78/89] add cephfs driver **--mountcachedir** parameter document --- docs/deploy-cephfs.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/deploy-cephfs.md b/docs/deploy-cephfs.md index a88293a14..6266107e7 100644 --- a/docs/deploy-cephfs.md +++ b/docs/deploy-cephfs.md @@ -34,6 +34,7 @@ Option | Default value | Description `--nodeid` | _empty_ | This node's ID `--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed. `--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`) +`--mountcachedir` | _empty_ | volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached. **Available environmental variables:** From 1ccbb5b6a53b00794612e1d1067a507e71454f70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Fri, 29 Mar 2019 16:12:09 +0800 Subject: [PATCH 79/89] cephfs driver deploy support remount volume --- deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml | 3 +++ deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml | 5 +++++ deploy/cephfs/kubernetes/csi-cephfsplugin.yaml | 5 +++++ deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml | 3 +++ 4 files changed, 16 insertions(+) diff --git a/deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml b/deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml index 290dd3f33..de4aaeaaa 100644 --- a/deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml +++ b/deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml @@ -10,6 +10,9 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "update"] diff --git a/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml index 9181d6102..c56e70bb4 100644 --- a/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml +++ b/deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml @@ -70,6 +70,7 @@ spec: - "--v=5" - "--drivername=$(DRIVER_NAME)" - "--metadatastorage=k8s_configmap" + - "--mountcachedir=/mount-cache-dir" env: - name: HOST_ROOTFS value: "/rootfs" @@ -83,6 +84,8 @@ spec: value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}" imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }} volumeMounts: + - name: mount-cache-dir + mountPath: /mount-cache-dir - name: plugin-dir mountPath: {{ .Values.socketDir }} - name: pods-mount-dir @@ -103,6 +106,8 @@ spec: resources: {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} volumes: + - name: mount-cache-dir + emptyDir: {} - name: plugin-dir hostPath: path: {{ .Values.socketDir }} diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index af4322fee..849cf57b3 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -55,6 +55,7 @@ spec: - "--v=5" - "--drivername=cephfs.csi.ceph.com" - "--metadatastorage=k8s_configmap" + - "--mountcachedir=/mount-cache-dir" env: - name: NODE_ID valueFrom: @@ -68,6 +69,8 @@ spec: value: unix:///csi/csi.sock imagePullPolicy: "IfNotPresent" volumeMounts: + - name: mount-cache-dir + mountPath: /mount-cache-dir - name: plugin-dir mountPath: /csi - name: csi-plugins-dir @@ -84,6 +87,8 @@ spec: - name: host-dev mountPath: /dev volumes: + - name: mount-cache-dir + emptyDir: {} - name: plugin-dir hostPath: path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/ diff --git a/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml b/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml index cc2919b0e..918bdc983 100644 --- a/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml @@ -10,6 +10,9 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: cephfs-csi-nodeplugin rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "update"] From f3e5f83ee06a96d304b3ae15ed6250e9f44fdd18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Fri, 29 Mar 2019 16:13:48 +0800 Subject: [PATCH 80/89] mount info cache dir support multi cephfsdriver --- pkg/cephfs/mountcache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index fbc24df54..5a5ade560 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -39,7 +39,7 @@ func initVolumeMountCache(driverName string, mountCacheDir string, cachePersiste volumeMountCache.metadataStore = cachePersister volumeMountCache.nodeCacheStore.BasePath = mountCacheDir - volumeMountCache.nodeCacheStore.CacheDir = "" + volumeMountCache.nodeCacheStore.CacheDir = driverName klog.Infof("mount-cache: name: %s, version: %s, mountCacheDir: %s", driverName, version, mountCacheDir) } From 205be90d742530b31124f42b38aed8f100e2357e Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 26 Mar 2019 11:35:47 +0530 Subject: [PATCH 81/89] update travis.yml and deploy.sh update travis.yaml and deploy.sh to build v0.3.0 images from csi-v0.3 branch instead of master. Signed-off-by: Madhu Rajanna --- .travis.yml | 3 ++- deploy.sh | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index bd1ccc0c2..d8fe27f78 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,8 +11,9 @@ addons: language: go branches: only: + - csi-v0.3 - master - - csi-v1.0 + - csi-v1.0 # remove this once csi-v1.0 becomes master go: 1.11.x diff --git a/deploy.sh b/deploy.sh index dff25413b..53dd40798 100755 --- a/deploy.sh +++ b/deploy.sh @@ -24,7 +24,7 @@ push_helm_chats() { fi } -if [ "${TRAVIS_BRANCH}" == 'master' ]; then +if [ "${TRAVIS_BRANCH}" == 'csi-v0.3' ]; then export RBD_IMAGE_VERSION='v0.3.0' export CEPHFS_IMAGE_VERSION='v0.3.0' elif [ "${TRAVIS_BRANCH}" == 'csi-v1.0' ]; then From 6de862d6cb2ba67a8e47619a2943ae3e169ea953 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Mon, 1 Apr 2019 21:20:53 +0800 Subject: [PATCH 82/89] code style --- pkg/cephfs/mountcache.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index 5a5ade560..60e5fad81 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -64,10 +64,10 @@ func remountCachedVolumes() error { if err := mountOneCacheEntry(ce, me); err == nil { remountSuccCount++ volumeMountCache.volumes[me.VolumeID] = *me - klog.Infof("mount-cache: remount volume %s success", volID) + klog.Infof("mount-cache: successfully remounted volume %s", volID) } else { remountFailCount++ - klog.Errorf("mount-cache: remount volume cache %s fail", volID) + klog.Errorf("mount-cache: failed to remount volume %s", volID) } } return nil @@ -77,9 +77,9 @@ func remountCachedVolumes() error { return err } if remountFailCount > 0 { - klog.Infof("mount-cache: success remount %d volumes, fail remount %d volumes", remountSuccCount, remountFailCount) + klog.Infof("mount-cache: successfully remounted %d volumes, failed to remount %d volumes", remountSuccCount, remountFailCount) } else { - klog.Infof("mount-cache: volume cache num %d, all succ remount", remountSuccCount) + klog.Infof("mount-cache: successfully remounted %d volumes", remountSuccCount) } return nil } @@ -157,13 +157,13 @@ func cleanupMountPoint(mountPoint string) error { klog.Infof("mount-cache: corrupted mount point %s, need unmount", mountPoint) err := execCommandErr("umount", mountPoint) if err != nil { - klog.Infof("mount-cache: unmount %s fail %v", mountPoint, err) + klog.Infof("mount-cache: failed to umount %s %v", mountPoint, err) //ignore error return err } } } if _, err := os.Stat(mountPoint); err != nil { - klog.Errorf("mount-cache: mount point %s stat fail %v", mountPoint, err) + klog.Errorf("mount-cache: failed to stat mount point %s %v", mountPoint, err) return err } return nil From 1f1d5f47c3953f7c69b524ba3b2d8ff4023b6761 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Mon, 1 Apr 2019 23:02:19 +0800 Subject: [PATCH 83/89] code style --- pkg/cephfs/mountcache.go | 4 ++-- pkg/cephfs/mountcache_test.go | 2 +- pkg/cephfs/nodeserver.go | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index 60e5fad81..c8bc2a03b 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -143,7 +143,7 @@ func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountCacheEntry) err klog.Errorf("mount-cache: failed to bind-mount volume %s: %s %s %v %v", volID, me.StagingPath, targetPath, readOnly, err) } else { - klog.Infof("mount-cache: successfully bind-mount volume %s: %s %s %v", + klog.Infof("mount-cache: successfully bind-mounted volume %s: %s %s %v", volID, me.StagingPath, targetPath, readOnly) } } @@ -277,7 +277,7 @@ func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath stri func (mc *volumeMountCacheMap) updateNodeCache(volID string) error { me := volumeMountCache.volumes[volID] if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil { - klog.Infof("mount-cache: metadata notfound, delete mount cache failed for volume %s", volID) + klog.Infof("mount-cache: metadata not found, delete mount cache failed for volume %s", volID) } return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me) } diff --git a/pkg/cephfs/mountcache_test.go b/pkg/cephfs/mountcache_test.go index 6bba59c55..e27053cd4 100644 --- a/pkg/cephfs/mountcache_test.go +++ b/pkg/cephfs/mountcache_test.go @@ -32,7 +32,7 @@ func TestEncodeDecodeCredentials(t *testing.T) { deSecrets := decodeCredentials(enSecrets) for key, value := range secrets { if deSecrets[key] != value { - t.Errorf("key %s value %s not equal %s after encode decode", key, value, deSecrets[key]) + t.Errorf("key %s of credentials's value %s change after decode %s ", key, value, deSecrets[key]) } } } diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index 56d909ba9..273471e05 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -155,7 +155,7 @@ func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequ return status.Error(codes.Internal, err.Error()) } if err := volumeMountCache.nodeStageVolume(req.GetVolumeId(), stagingTargetPath, req.GetSecrets()); err != nil { - klog.Warningf("mount-cache: failed stage volume %s %s: %v", volID, stagingTargetPath, err) + klog.Warningf("mount-cache: failed to stage volume %s %s: %v", volID, stagingTargetPath, err) } return nil } @@ -199,7 +199,7 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis } if err := volumeMountCache.nodePublishVolume(volID, targetPath, req.GetReadonly()); err != nil { - klog.Warningf("mount-cache: failed publish volume %s %s: %v", volID, targetPath, err) + klog.Warningf("mount-cache: failed to publish volume %s %s: %v", volID, targetPath, err) } klog.Infof("cephfs: successfully bind-mounted volume %s to %s", volID, targetPath) @@ -218,7 +218,7 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu volID := req.GetVolumeId() if err = volumeMountCache.nodeUnPublishVolume(volID, targetPath); err != nil { - klog.Warningf("mount-cache: failed unpublish volume %s %s: %v", volID, targetPath, err) + klog.Warningf("mount-cache: failed to unpublish volume %s %s: %v", volID, targetPath, err) } // Unmount the bind-mount @@ -246,7 +246,7 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag volID := req.GetVolumeId() if err = volumeMountCache.nodeUnStageVolume(volID); err != nil { - klog.Warningf("mount-cache: failed unstage volume %s %s: %v", volID, stagingTargetPath, err) + klog.Warningf("mount-cache: failed to unstage volume %s %s: %v", volID, stagingTargetPath, err) } // Unmount the volume From bb6754fb37ca0d8b011f0806e213afe9e0d37eab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Fri, 29 Mar 2019 21:31:43 +0800 Subject: [PATCH 84/89] csi-provisioner rbac add resources `nodes` get, list, watch #293 --- deploy/cephfs/helm/templates/provisioner-clusterrole.yaml | 3 +++ deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml b/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml index 6e6721cbf..07e35a98e 100644 --- a/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml +++ b/deploy/cephfs/helm/templates/provisioner-clusterrole.yaml @@ -10,6 +10,9 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list"] diff --git a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml index 2e76defb3..80ef301a9 100644 --- a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml @@ -10,6 +10,9 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: cephfs-external-provisioner-runner rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list"] From 4228ceb51e4d919851daad22a6a04dc9cf1a3e81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Mon, 1 Apr 2019 16:28:29 +0800 Subject: [PATCH 85/89] rbd deploy csi-provisioner rbac add resources `nodes` get, list, watch #293 --- deploy/rbd/helm/templates/provisioner-clusterrole.yaml | 3 +++ deploy/rbd/kubernetes/csi-provisioner-rbac.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/deploy/rbd/helm/templates/provisioner-clusterrole.yaml b/deploy/rbd/helm/templates/provisioner-clusterrole.yaml index 1c473e6e9..d324e455b 100644 --- a/deploy/rbd/helm/templates/provisioner-clusterrole.yaml +++ b/deploy/rbd/helm/templates/provisioner-clusterrole.yaml @@ -10,6 +10,9 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list"] diff --git a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml index bf2aaa1af..75615b054 100644 --- a/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml @@ -10,6 +10,9 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-external-provisioner-runner rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list"] From acdc75902963e95413c79e729d1013d8aabfd581 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Mon, 1 Apr 2019 22:14:36 +0800 Subject: [PATCH 86/89] bump up the chart version --- deploy/cephfs/helm/Chart.yaml | 2 +- deploy/rbd/helm/Chart.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/cephfs/helm/Chart.yaml b/deploy/cephfs/helm/Chart.yaml index 40b9f3cc5..f324e7f57 100644 --- a/deploy/cephfs/helm/Chart.yaml +++ b/deploy/cephfs/helm/Chart.yaml @@ -4,7 +4,7 @@ appVersion: "1.0.0" description: "Container Storage Interface (CSI) driver, provisioner, and attacher for Ceph cephfs" name: ceph-csi-cephfs -version: 0.5.0 +version: 0.5.1 keywords: - ceph - cephfs diff --git a/deploy/rbd/helm/Chart.yaml b/deploy/rbd/helm/Chart.yaml index 26185a91e..30585b9d8 100644 --- a/deploy/rbd/helm/Chart.yaml +++ b/deploy/rbd/helm/Chart.yaml @@ -4,7 +4,7 @@ appVersion: "1.0.0" description: "Container Storage Interface (CSI) driver, provisioner, snapshotter, and attacher for Ceph RBD" name: ceph-csi-rbd -version: 0.5.0 +version: 0.5.1 keywords: - ceph - rbd From ffe2816cb5751b713729fd3e0d0aadc58458e1a2 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 20 Mar 2019 15:14:20 -0400 Subject: [PATCH 87/89] cmd: create a unified cephcsi binary Create a single binary that can start ceph-csi in either rbd or cephfs mode. Signed-off-by: John Mulligan --- cmd/cephcsi.go | 133 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 cmd/cephcsi.go diff --git a/cmd/cephcsi.go b/cmd/cephcsi.go new file mode 100644 index 000000000..315b4fa9c --- /dev/null +++ b/cmd/cephcsi.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "os" + "path" + "strings" + + "github.com/ceph/ceph-csi/pkg/cephfs" + "github.com/ceph/ceph-csi/pkg/rbd" + "github.com/ceph/ceph-csi/pkg/util" + "k8s.io/klog" +) + +const ( + rbdType = "rbd" + cephfsType = "cephfs" + + rbdDefaultName = "rbd.csi.ceph.com" + cephfsDefaultName = "cephfs.csi.ceph.com" +) + +var ( + // common flags + vtype = flag.String("type", "", "driver type [rbd|cephfs]") + endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") + driverName = flag.String("drivername", "", "name of the driver") + nodeID = flag.String("nodeid", "", "node id") + metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]") + + // rbd related flags + containerized = flag.Bool("containerized", true, "whether run as containerized") + configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph"+ + " cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets") + + // cephfs related flags + volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')") + mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir") +) + +func init() { + klog.InitFlags(nil) + if err := flag.Set("logtostderr", "true"); err != nil { + klog.Exitf("failed to set logtostderr flag: %v", err) + } + flag.Parse() +} + +func getType() string { + if vtype == nil || len(*vtype) == 0 { + a0 := path.Base(os.Args[0]) + if strings.Contains(a0, rbdType) { + return rbdType + } + if strings.Contains(a0, cephfsType) { + return cephfsType + } + return "" + } + return *vtype +} + +func getDriverName() string { + // was explicitly passed a driver name + if driverName != nil && len(*driverName) != 0 { + return *driverName + } + // select driver name based on volume type + switch getType() { + case rbdType: + return rbdDefaultName + case cephfsType: + return cephfsDefaultName + default: + return "" + } +} + +func main() { + driverType := getType() + if len(driverType) == 0 { + klog.Fatalln("driver type not specified") + } + + dname := getDriverName() + err := util.ValidateDriverName(dname) + if err != nil { + klog.Fatalln(err) // calls exit + } + + switch driverType { + case rbdType: + rbd.PluginFolder = rbd.PluginFolder + dname + cp, err := util.CreatePersistanceStorage( + rbd.PluginFolder, *metadataStorage, dname) + if err != nil { + os.Exit(1) + } + driver := rbd.NewDriver() + driver.Run(dname, *nodeID, *endpoint, *configRoot, *containerized, cp) + + case cephfsType: + cephfs.PluginFolder = cephfs.PluginFolder + dname + cp, err := util.CreatePersistanceStorage( + cephfs.PluginFolder, *metadataStorage, dname) + if err != nil { + os.Exit(1) + } + driver := cephfs.NewDriver() + driver.Run(dname, *nodeID, *endpoint, *volumeMounter, *mountCacheDir, cp) + + default: + klog.Fatalln("invalid volume type", vtype) // calls exit + } + + os.Exit(0) +} From d969dada3ef9f563b4a5f7079c60fca5c94d548a Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 20 Mar 2019 15:15:51 -0400 Subject: [PATCH 88/89] deploy: create a new Dockerfile for unified cephcsi image Signed-off-by: John Mulligan --- deploy/cephcsi/image/Dockerfile | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 deploy/cephcsi/image/Dockerfile diff --git a/deploy/cephcsi/image/Dockerfile b/deploy/cephcsi/image/Dockerfile new file mode 100644 index 000000000..3a11c225d --- /dev/null +++ b/deploy/cephcsi/image/Dockerfile @@ -0,0 +1,14 @@ + +FROM ceph/ceph:v14.2 +LABEL maintainers="Ceph-CSI Authors" +LABEL description="Ceph-CSI Plugin" + +ENV CSIBIN=/usr/local/bin/cephcsi + +COPY cephcsi $CSIBIN + +RUN chmod +x $CSIBIN && \ + ln -sf $CSIBIN /usr/local/bin/cephcsi-rbd && \ + ln -sf $CSIBIN /usr/local/bin/cephcsi-cephfs + +ENTRYPOINT ["/usr/local/bin/cephcsi"] From de94cb62a99b899dc95bf9db79510a1c6ae9a867 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Wed, 20 Mar 2019 15:16:15 -0400 Subject: [PATCH 89/89] Makefile: add initial build rules for combined binary and image Add rules and variables to the Makefile so that the unified binary and container image can be built. Signed-off-by: John Mulligan --- Makefile | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 11b1a67fc..8451b380a 100644 --- a/Makefile +++ b/Makefile @@ -22,6 +22,9 @@ RBD_IMAGE_VERSION=$(if $(ENV_RBD_IMAGE_VERSION),$(ENV_RBD_IMAGE_VERSION),v1.0.0) CEPHFS_IMAGE_NAME=$(if $(ENV_CEPHFS_IMAGE_NAME),$(ENV_CEPHFS_IMAGE_NAME),quay.io/cephcsi/cephfsplugin) CEPHFS_IMAGE_VERSION=$(if $(ENV_CEPHFS_IMAGE_VERSION),$(ENV_CEPHFS_IMAGE_VERSION),v1.0.0) +CSI_IMAGE_NAME?=quay.io/cephcsi/cephcsi +CSI_IMAGE_VERSION?=v1.0.0 + $(info rbd image settings: $(RBD_IMAGE_NAME) version $(RBD_IMAGE_VERSION)) $(info cephfs image settings: $(CEPHFS_IMAGE_NAME) version $(CEPHFS_IMAGE_VERSION)) @@ -36,20 +39,21 @@ static-check: ./scripts/lint-go.sh ./scripts/lint-text.sh -rbdplugin: +.PHONY: cephcsi +cephcsi: if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi - CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./cmd/rbd + CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephcsi ./cmd/ -image-rbdplugin: rbdplugin - cp _output/rbdplugin deploy/rbd/docker +image-cephcsi: cephcsi + cp deploy/cephcsi/image/Dockerfile _output + $(CONTAINER_CMD) build -t $(CSI_IMAGE_NAME):$(CSI_IMAGE_VERSION) _output + +image-rbdplugin: cephcsi + cp _output/cephcsi deploy/rbd/docker/rbdplugin $(CONTAINER_CMD) build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker -cephfsplugin: - if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi - CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cmd/cephfs - -image-cephfsplugin: cephfsplugin - cp _output/cephfsplugin deploy/cephfs/docker +image-cephfsplugin: cephcsi + cp _output/cephsci deploy/cephfs/docker/cephfsplugin $(CONTAINER_CMD) build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker push-image-rbdplugin: image-rbdplugin