mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-22 14:20:19 +00:00
internal: reformat long lines in internal/util package to 120 chars
We have many declarations and invocations..etc with long lines which are very difficult to follow while doing code reading. This address the issues in 'internal/util' package files to restrict the line length to 120 chars. Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
parent
8f82a30c21
commit
cc6d67a7d6
@ -25,7 +25,17 @@ import (
|
|||||||
|
|
||||||
// LuksFormat sets up volume as an encrypted LUKS partition.
|
// LuksFormat sets up volume as an encrypted LUKS partition.
|
||||||
func LuksFormat(devicePath, passphrase string) (stdout, stderr []byte, err error) {
|
func LuksFormat(devicePath, passphrase string) (stdout, stderr []byte, err error) {
|
||||||
return execCryptsetupCommand(&passphrase, "-q", "luksFormat", "--type", "luks2", "--hash", "sha256", devicePath, "-d", "/dev/stdin")
|
return execCryptsetupCommand(
|
||||||
|
&passphrase,
|
||||||
|
"-q",
|
||||||
|
"luksFormat",
|
||||||
|
"--type",
|
||||||
|
"luks2",
|
||||||
|
"--hash",
|
||||||
|
"sha256",
|
||||||
|
devicePath,
|
||||||
|
"-d",
|
||||||
|
"/dev/stdin")
|
||||||
}
|
}
|
||||||
|
|
||||||
// LuksOpen opens LUKS encrypted partition and sets up a mapping.
|
// LuksOpen opens LUKS encrypted partition and sets up a mapping.
|
||||||
|
@ -252,7 +252,10 @@ func RegisterKMSProvider(provider KMSProvider) bool {
|
|||||||
// buildKMS creates a new KMSProvider instance, based on the configuration that
|
// buildKMS creates a new KMSProvider instance, based on the configuration that
|
||||||
// was passed. This uses getKMSProvider() internally to identify the
|
// was passed. This uses getKMSProvider() internally to identify the
|
||||||
// KMSProvider to instantiate.
|
// KMSProvider to instantiate.
|
||||||
func (kf *kmsProviderList) buildKMS(tenant string, config map[string]interface{}, secrets map[string]string) (EncryptionKMS, error) {
|
func (kf *kmsProviderList) buildKMS(
|
||||||
|
tenant string,
|
||||||
|
config map[string]interface{},
|
||||||
|
secrets map[string]string) (EncryptionKMS, error) {
|
||||||
providerName, err := getKMSProvider(config)
|
providerName, err := getKMSProvider(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -131,7 +131,8 @@ type TopologyConstrainedPool struct {
|
|||||||
|
|
||||||
// GetTopologyFromRequest extracts TopologyConstrainedPools and passed in accessibility constraints
|
// GetTopologyFromRequest extracts TopologyConstrainedPools and passed in accessibility constraints
|
||||||
// from a CSI CreateVolume request.
|
// from a CSI CreateVolume request.
|
||||||
func GetTopologyFromRequest(req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) {
|
func GetTopologyFromRequest(
|
||||||
|
req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) {
|
||||||
var (
|
var (
|
||||||
topologyPools []TopologyConstrainedPool
|
topologyPools []TopologyConstrainedPool
|
||||||
)
|
)
|
||||||
@ -151,7 +152,10 @@ func GetTopologyFromRequest(req *csi.CreateVolumeRequest) (*[]TopologyConstraine
|
|||||||
// extract topology based pools configuration
|
// extract topology based pools configuration
|
||||||
err := json.Unmarshal([]byte(strings.Replace(topologyPoolsStr, "\n", " ", -1)), &topologyPools)
|
err := json.Unmarshal([]byte(strings.Replace(topologyPoolsStr, "\n", " ", -1)), &topologyPools)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to parse JSON encoded topology constrained pools parameter (%s): %v", topologyPoolsStr, err)
|
return nil, nil, fmt.Errorf(
|
||||||
|
"failed to parse JSON encoded topology constrained pools parameter (%s): %v",
|
||||||
|
topologyPoolsStr,
|
||||||
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &topologyPools, accessibilityRequirements, nil
|
return &topologyPools, accessibilityRequirements, nil
|
||||||
|
@ -76,8 +76,9 @@ type Config struct {
|
|||||||
DomainLabels string // list of domain labels to read from the node
|
DomainLabels string // list of domain labels to read from the node
|
||||||
|
|
||||||
// metrics related flags
|
// metrics related flags
|
||||||
MetricsPath string // path of prometheus endpoint where metrics will be available
|
MetricsPath string // path of prometheus endpoint where metrics will be available
|
||||||
HistogramOption string // Histogram option for grpc metrics, should be comma separated value, ex:= "0.5,2,6" where start=0.5 factor=2, count=6
|
HistogramOption string // Histogram option for grpc metrics, should be comma separated value,
|
||||||
|
// ex:= "0.5,2,6" where start=0.5 factor=2, count=6
|
||||||
MetricsIP string // TCP port for liveness/ metrics requests
|
MetricsIP string // TCP port for liveness/ metrics requests
|
||||||
PidLimit int // PID limit to configure through cgroups")
|
PidLimit int // PID limit to configure through cgroups")
|
||||||
MetricsPort int // TCP port for liveness/grpc metrics requests
|
MetricsPort int // TCP port for liveness/grpc metrics requests
|
||||||
@ -97,10 +98,12 @@ type Config struct {
|
|||||||
// cephfs related flags
|
// cephfs related flags
|
||||||
ForceKernelCephFS bool // force to use the ceph kernel client even if the kernel is < 4.17
|
ForceKernelCephFS bool // force to use the ceph kernel client even if the kernel is < 4.17
|
||||||
|
|
||||||
// RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten occurs
|
// RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten
|
||||||
|
// occurs
|
||||||
RbdHardMaxCloneDepth uint
|
RbdHardMaxCloneDepth uint
|
||||||
|
|
||||||
// RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten occurs
|
// RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten
|
||||||
|
// occurs
|
||||||
RbdSoftMaxCloneDepth uint
|
RbdSoftMaxCloneDepth uint
|
||||||
|
|
||||||
// MaxSnapshotsOnImage represents the maximum number of snapshots allowed
|
// MaxSnapshotsOnImage represents the maximum number of snapshots allowed
|
||||||
@ -233,7 +236,13 @@ func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool
|
|||||||
|
|
||||||
// GenerateVolID generates a volume ID based on passed in parameters and version, to be returned
|
// GenerateVolID generates a volume ID based on passed in parameters and version, to be returned
|
||||||
// to the CO system.
|
// to the CO system.
|
||||||
func GenerateVolID(ctx context.Context, monitors string, cr *Credentials, locationID int64, pool, clusterID, objUUID string, volIDVersion uint16) (string, error) {
|
func GenerateVolID(
|
||||||
|
ctx context.Context,
|
||||||
|
monitors string,
|
||||||
|
cr *Credentials,
|
||||||
|
locationID int64,
|
||||||
|
pool, clusterID, objUUID string,
|
||||||
|
volIDVersion uint16) (string, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if locationID == InvalidPoolID {
|
if locationID == InvalidPoolID {
|
||||||
|
@ -27,7 +27,10 @@ func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
|
|||||||
// validate stagingpath exists
|
// validate stagingpath exists
|
||||||
ok := checkDirExists(req.GetStagingTargetPath())
|
ok := checkDirExists(req.GetStagingTargetPath())
|
||||||
if !ok {
|
if !ok {
|
||||||
return status.Errorf(codes.InvalidArgument, "staging path %s does not exist on node", req.GetStagingTargetPath())
|
return status.Errorf(
|
||||||
|
codes.InvalidArgument,
|
||||||
|
"staging path %s does not exist on node",
|
||||||
|
req.GetStagingTargetPath())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -83,7 +86,8 @@ func ValidateNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) err
|
|||||||
// volume is from source as empty ReadOnlyMany is not supported.
|
// volume is from source as empty ReadOnlyMany is not supported.
|
||||||
func CheckReadOnlyManyIsSupported(req *csi.CreateVolumeRequest) error {
|
func CheckReadOnlyManyIsSupported(req *csi.CreateVolumeRequest) error {
|
||||||
for _, capability := range req.GetVolumeCapabilities() {
|
for _, capability := range req.GetVolumeCapabilities() {
|
||||||
if m := capability.GetAccessMode().Mode; m == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || m == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
if m := capability.GetAccessMode().Mode; m == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
|
||||||
|
m == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
||||||
if req.GetVolumeContentSource() == nil {
|
if req.GetVolumeContentSource() == nil {
|
||||||
return status.Error(codes.InvalidArgument, "readOnly accessMode is supported only with content source")
|
return status.Error(codes.InvalidArgument, "readOnly accessMode is supported only with content source")
|
||||||
}
|
}
|
||||||
|
@ -340,7 +340,10 @@ func (kms *VaultTokensKMS) initCertificates(config map[string]interface{}) error
|
|||||||
if vaultClientCertKeyFromSecret != "" {
|
if vaultClientCertKeyFromSecret != "" {
|
||||||
certKey, err := getCertificate(kms.Tenant, vaultClientCertKeyFromSecret, "key")
|
certKey, err := getCertificate(kms.Tenant, vaultClientCertKeyFromSecret, "key")
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
return fmt.Errorf("failed to get client certificate key from secret %s: %w", vaultClientCertKeyFromSecret, err)
|
return fmt.Errorf(
|
||||||
|
"failed to get client certificate key from secret %s: %w",
|
||||||
|
vaultClientCertKeyFromSecret,
|
||||||
|
err)
|
||||||
}
|
}
|
||||||
// if the certificate is not present in tenant namespace get it from
|
// if the certificate is not present in tenant namespace get it from
|
||||||
// cephcsi pod namespace
|
// cephcsi pod namespace
|
||||||
|
Loading…
Reference in New Issue
Block a user