mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
cleanup: Avoid usage of numbers
Add seperate functions to handle all levels and types of logging. Signed-off-by: Yug <yuggupta27@gmail.com>
This commit is contained in:
@ -136,9 +136,8 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully created backing volume named %s for request name %s"),
|
||||
util.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s",
|
||||
vID.FsSubvolName, requestName)
|
||||
|
||||
volumeContext := req.GetParameters()
|
||||
volumeContext["subvolumeName"] = vID.FsSubvolName
|
||||
volume := &csi.Volume{
|
||||
@ -185,7 +184,7 @@ func (cs *ControllerServer) deleteVolumeDeprecated(ctx context.Context, req *csi
|
||||
// mons may have changed since create volume,
|
||||
// retrieve the latest mons and override old mons
|
||||
if mon, secretsErr := util.GetMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
|
||||
klog.V(3).Infof(util.Log(ctx, "overriding monitors [%q] with [%q] for volume %s"), ce.VolOptions.Monitors, mon, volID)
|
||||
util.ExtendedLog(ctx, "overriding monitors [%q] with [%q] for volume %s", ce.VolOptions.Monitors, mon, volID)
|
||||
ce.VolOptions.Monitors = mon
|
||||
}
|
||||
|
||||
@ -218,7 +217,7 @@ func (cs *ControllerServer) deleteVolumeDeprecated(ctx context.Context, req *csi
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully deleted volume %s"), volID)
|
||||
util.DebugLog(ctx, "cephfs: successfully deleted volume %s", volID)
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
@ -312,7 +311,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully deleted volume %s"), volID)
|
||||
util.DebugLog(ctx, "cephfs: successfully deleted volume %s", volID)
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
@ -21,8 +21,6 @@ import (
|
||||
"errors"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// volumeIdentifier structure contains an association between the CSI VolumeID to its subvolume
|
||||
@ -96,7 +94,7 @@ func checkVolExists(ctx context.Context, volOptions *volumeOptions, secret map[s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "Found existing volume (%s) with subvolume name (%s) for request (%s)"),
|
||||
util.DebugLog(ctx, "Found existing volume (%s) with subvolume name (%s) for request (%s)",
|
||||
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
return &vid, nil
|
||||
@ -177,7 +175,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "Generated Volume ID (%s) and subvolume name (%s) for request name (%s)"),
|
||||
util.DebugLog(ctx, "Generated Volume ID (%s) and subvolume name (%s) for request name (%s)",
|
||||
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
return &vid, nil
|
||||
|
@ -121,7 +121,7 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
||||
}
|
||||
|
||||
if isMnt {
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s is already mounted to %s, skipping"), volID, stagingTargetPath)
|
||||
util.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -130,7 +130,7 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully mounted volume %s to %s"), volID, stagingTargetPath)
|
||||
util.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
@ -152,7 +152,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: mounting volume %s with %s"), volID, m.name())
|
||||
util.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.name())
|
||||
|
||||
readOnly := "ro"
|
||||
fuseMountOptions := strings.Split(volOptions.FuseMountOptions, ",")
|
||||
@ -234,7 +234,7 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
}
|
||||
|
||||
if isMnt {
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s is already bind-mounted to %s"), volID, targetPath)
|
||||
util.DebugLog(ctx, "cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully bind-mounted volume %s to %s"), volID, targetPath)
|
||||
util.DebugLog(ctx, "cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
@ -276,7 +276,7 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully unbinded volume %s from %s"), req.GetVolumeId(), targetPath)
|
||||
util.DebugLog(ctx, "cephfs: successfully unbinded volume %s from %s", req.GetVolumeId(), targetPath)
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
@ -301,7 +301,7 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully unmounted volume %s from %s"), req.GetVolumeId(), stagingTargetPath)
|
||||
util.DebugLog(ctx, "cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
klog "k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type volumeID string
|
||||
@ -45,7 +44,7 @@ func execCommand(ctx context.Context, program string, args ...string) (stdout, s
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: EXEC %s %s"), program, sanitizedArgs)
|
||||
util.DebugLog(ctx, "cephfs: EXEC %s %s", program, sanitizedArgs)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if cmd.Process == nil {
|
||||
|
@ -116,7 +116,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, cr *util.Crede
|
||||
klog.Errorf(util.Log(ctx, "failed to create subvolume group %s, for the vol %s(%s)"), volOptions.SubvolumeGroup, string(volID), err)
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: created subvolume group %s"), volOptions.SubvolumeGroup)
|
||||
util.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup)
|
||||
clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated = true
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ func purgeVolumeDeprecated(ctx context.Context, volID volumeID, adminCr *util.Cr
|
||||
}
|
||||
} else {
|
||||
if !pathExists(volRootDeleting) {
|
||||
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s not found, assuming it to be already deleted"), volID)
|
||||
util.DebugLog(ctx, "cephfs: volume %s not found, assuming it to be already deleted", volID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -85,10 +85,10 @@ func loadAvailableMounters(conf *util.Config) error {
|
||||
}
|
||||
|
||||
if conf.ForceKernelCephFS || util.CheckKernelSupport(release, quotaSupport) {
|
||||
klog.V(1).Infof("loaded mounter: %s", volumeMounterKernel)
|
||||
util.DefaultLog("loaded mounter: %s", volumeMounterKernel)
|
||||
availableMounters = append(availableMounters, volumeMounterKernel)
|
||||
} else {
|
||||
klog.V(1).Infof("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
|
||||
util.DefaultLog("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ func loadAvailableMounters(conf *util.Config) error {
|
||||
if err != nil {
|
||||
klog.Errorf("failed to run ceph-fuse %v", err)
|
||||
} else {
|
||||
klog.V(1).Infof("loaded mounter: %s", volumeMounterFuse)
|
||||
util.DefaultLog("loaded mounter: %s", volumeMounterFuse)
|
||||
availableMounters = append(availableMounters, volumeMounterFuse)
|
||||
}
|
||||
|
||||
@ -131,7 +131,7 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
|
||||
if chosenMounter == "" {
|
||||
// Otherwise pick whatever is left
|
||||
chosenMounter = availableMounters[0]
|
||||
klog.V(4).Infof("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
|
||||
util.DebugLogMsg("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
|
||||
}
|
||||
|
||||
// Create the mounter
|
||||
|
Reference in New Issue
Block a user