util: make inode metrics optional in FilesystemNodeGetVolumeStats()

CephFS does not have a concept of "free inodes", inodes get allocated
on-demand in the filesystem.

This confuses alerting managers that expect a (high) number of free
inodes, and warnings get produced if the number of free inodes is not
high enough. This causes alerts to always get reported for CephFS.

To prevent the false-positive alerts from happening, the
NodeGetVolumeStats procedure for CephFS (and CephNFS) will not contain
inodes in the reply anymore.

See-also: https://bugzilla.redhat.com/2128263
Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos 2022-10-03 17:46:52 +02:00 committed by mergify[bot]
parent 386d3ddd6e
commit b7703faf37
5 changed files with 34 additions and 27 deletions

View File

@ -637,7 +637,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}
if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
}
return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)

View File

@ -241,6 +241,7 @@ func FilesystemNodeGetVolumeStats(
ctx context.Context,
mounter mount.Interface,
targetPath string,
includeInodes bool,
) (*csi.NodeGetVolumeStatsResponse, error) {
isMnt, err := util.IsMountPoint(mounter, targetPath)
if err != nil {
@ -274,6 +275,19 @@ func FilesystemNodeGetVolumeStats(
if !ok {
log.ErrorLog(ctx, "failed to fetch used bytes")
}
res := &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{
{
Available: requirePositive(available),
Total: requirePositive(capacity),
Used: requirePositive(used),
Unit: csi.VolumeUsage_BYTES,
},
},
}
if includeInodes {
inodes, ok := (*(volMetrics.Inodes)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch available inodes")
@ -290,22 +304,15 @@ func FilesystemNodeGetVolumeStats(
log.ErrorLog(ctx, "failed to fetch used inodes")
}
return &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{
{
Available: requirePositive(available),
Total: requirePositive(capacity),
Used: requirePositive(used),
Unit: csi.VolumeUsage_BYTES,
},
{
res.Usage = append(res.Usage, &csi.VolumeUsage{
Available: requirePositive(inodesFree),
Total: requirePositive(inodes),
Used: requirePositive(inodesUsed),
Unit: csi.VolumeUsage_INODES,
},
},
}, nil
})
}
return res, nil
}
// requirePositive returns the value for `x` when it is greater or equal to 0,

View File

@ -88,7 +88,7 @@ func TestFilesystemNodeGetVolumeStats(t *testing.T) {
// retry until a mountpoint is found
for {
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd)
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd, true)
if err != nil && cwd != "/" && strings.HasSuffix(err.Error(), "is not mounted") {
// try again with the parent directory
cwd = filepath.Dir(cwd)

View File

@ -182,7 +182,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}
if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
}
return nil, status.Errorf(codes.InvalidArgument,

View File

@ -1240,7 +1240,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}
if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, true)
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
return blockNodeGetVolumeStats(ctx, targetPath)
}