Merge pull request #296 from red-hat-storage/sync_us--devel

Syncing latest changes from upstream devel for ceph-csi
This commit is contained in:
openshift-merge-bot[bot] 2024-04-23 08:30:56 +00:00 committed by GitHub
commit 9481a149db
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 116 additions and 1 deletions

View File

@ -27,6 +27,7 @@ serviceAccounts:
# - "<MONValue2>" # - "<MONValue2>"
# rbd: # rbd:
# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net" # netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net"
# mirrorDaemonCount: 1
# readAffinity: # readAffinity:
# enabled: true # enabled: true
# crushLocationLabels: # crushLocationLabels:

View File

@ -19,6 +19,8 @@ kind: ConfigMap
# NOTE: The given radosNamespace must already exists in the pool. # NOTE: The given radosNamespace must already exists in the pool.
# NOTE: Make sure you don't add radosNamespace option to a currently in use # NOTE: Make sure you don't add radosNamespace option to a currently in use
# configuration as it will cause issues. # configuration as it will cause issues.
# The "rbd.mirrorDaemonCount" is optional and represents the total number of
# RBD mirror daemons running on the ceph cluster.
# The field "cephFS.subvolumeGroup" is optional and defaults to "csi". # The field "cephFS.subvolumeGroup" is optional and defaults to "csi".
# NOTE: The given subvolumeGroup must already exist in the filesystem. # NOTE: The given subvolumeGroup must already exist in the filesystem.
# The "cephFS.netNamespaceFilePath" fields are the various network namespace # The "cephFS.netNamespaceFilePath" fields are the various network namespace
@ -64,6 +66,7 @@ data:
"rbd": { "rbd": {
"netNamespaceFilePath": "<kubeletRootPath>/plugins/rbd.csi.ceph.com/net", "netNamespaceFilePath": "<kubeletRootPath>/plugins/rbd.csi.ceph.com/net",
"radosNamespace": "<rados-namespace>", "radosNamespace": "<rados-namespace>",
"mirrorDaemonCount": 1,
}, },
"monitors": [ "monitors": [
"<MONValue1>", "<MONValue1>",

View File

@ -553,9 +553,13 @@ func (ri *rbdImage) isInUse() (bool, error) {
// because we opened the image, there is at least one watcher // because we opened the image, there is at least one watcher
defaultWatchers := 1 defaultWatchers := 1
if mirrorInfo.Primary { if mirrorInfo.Primary {
count, err := util.GetRBDMirrorDaemonCount(util.CsiConfigFile, ri.ClusterID)
if err != nil {
return false, err
}
// if rbd mirror daemon is running, a watcher will be added by the rbd // if rbd mirror daemon is running, a watcher will be added by the rbd
// mirror daemon for mirrored images. // mirror daemon for mirrored images.
defaultWatchers++ defaultWatchers += count
} }
return len(watchers) > defaultWatchers, nil return len(watchers) > defaultWatchers, nil

View File

@ -45,6 +45,7 @@ const (
"clusterID": "<cluster-id>", "clusterID": "<cluster-id>",
"rbd": { "rbd": {
"radosNamespace": "<rados-namespace>" "radosNamespace": "<rados-namespace>"
"mirrorDaemonCount": 1
}, },
"monitors": [ "monitors": [
"<monitor-value>", "<monitor-value>",
@ -105,6 +106,22 @@ func GetRadosNamespace(pathToConfig, clusterID string) (string, error) {
return cluster.RBD.RadosNamespace, nil return cluster.RBD.RadosNamespace, nil
} }
// GetRBDMirrorDaemonCount returns the number of mirror daemon count for the
// given clusterID.
func GetRBDMirrorDaemonCount(pathToConfig, clusterID string) (int, error) {
cluster, err := readClusterInfo(pathToConfig, clusterID)
if err != nil {
return 0, err
}
// if it is empty, set the default to 1 which is most common in a cluster.
if cluster.RBD.MirrorDaemonCount == 0 {
return 1, nil
}
return cluster.RBD.MirrorDaemonCount, nil
}
// CephFSSubvolumeGroup returns the subvolumeGroup for CephFS volumes. If not set, it returns the default value "csi". // CephFSSubvolumeGroup returns the subvolumeGroup for CephFS volumes. If not set, it returns the default value "csi".
func CephFSSubvolumeGroup(pathToConfig, clusterID string) (string, error) { func CephFSSubvolumeGroup(pathToConfig, clusterID string) (string, error) {
cluster, err := readClusterInfo(pathToConfig, clusterID) cluster, err := readClusterInfo(pathToConfig, clusterID)

View File

@ -17,11 +17,14 @@ limitations under the License.
package util package util
import ( import (
"bytes"
"encoding/json" "encoding/json"
"os" "os"
"testing" "testing"
cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes" cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes"
"github.com/stretchr/testify/require"
) )
var ( var (
@ -530,3 +533,88 @@ func TestGetCephFSMountOptions(t *testing.T) {
}) })
} }
} }
func TestGetRBDMirrorDaemonCount(t *testing.T) {
t.Parallel()
tests := []struct {
name string
clusterID string
want int
}{
{
name: "get rbd mirror daemon count for cluster-1",
clusterID: "cluster-1",
want: 2,
},
{
name: "get rbd mirror daemon count for cluster-2",
clusterID: "cluster-2",
want: 4,
},
{
name: "when rbd mirror daemon count is empty",
clusterID: "cluster-3",
want: 1, // default mirror daemon count
},
}
csiConfig := []cephcsi.ClusterInfo{
{
ClusterID: "cluster-1",
Monitors: []string{"ip-1", "ip-2"},
RBD: cephcsi.RBD{
MirrorDaemonCount: 2,
},
},
{
ClusterID: "cluster-2",
Monitors: []string{"ip-3", "ip-4"},
RBD: cephcsi.RBD{
MirrorDaemonCount: 4,
},
},
{
ClusterID: "cluster-3",
Monitors: []string{"ip-5", "ip-6"},
},
}
csiConfigFileContent, err := json.Marshal(csiConfig)
if err != nil {
t.Errorf("failed to marshal csi config info %v", err)
}
tmpConfPath := t.TempDir() + "/ceph-csi.json"
err = os.WriteFile(tmpConfPath, csiConfigFileContent, 0o600)
if err != nil {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
}
for _, tt := range tests {
ts := tt
t.Run(ts.name, func(t *testing.T) {
t.Parallel()
var got int
got, err = GetRBDMirrorDaemonCount(tmpConfPath, ts.clusterID)
if err != nil {
t.Errorf("GetRBDMirrorDaemonCount() error = %v", err)
return
}
if got != ts.want {
t.Errorf("GetRBDMirrorDaemonCount() = %v, want %v", got, ts.want)
}
})
}
// when mirrorDaemonCount is set as string
csiConfigFileContent = bytes.Replace(
csiConfigFileContent,
[]byte(`"mirrorDaemonCount":2`),
[]byte(`"mirrorDaemonCount":"2"`),
1)
tmpCSIConfPath := t.TempDir() + "/ceph-csi.json"
err = os.WriteFile(tmpCSIConfPath, csiConfigFileContent, 0o600)
if err != nil {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
}
_, err = GetRBDMirrorDaemonCount(tmpCSIConfPath, "test")
require.Error(t, err)
}

View File

@ -46,6 +46,8 @@ type RBD struct {
NetNamespaceFilePath string `json:"netNamespaceFilePath"` NetNamespaceFilePath string `json:"netNamespaceFilePath"`
// RadosNamespace is a rados namespace in the pool // RadosNamespace is a rados namespace in the pool
RadosNamespace string `json:"radosNamespace"` RadosNamespace string `json:"radosNamespace"`
// RBD mirror daemons running in the ceph cluster.
MirrorDaemonCount int `json:"mirrorDaemonCount"`
} }
type NFS struct { type NFS struct {