mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update replaced k8s.io modules to v0.33.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
dd77e72800
commit
107407b44b
2
e2e/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
|
8
e2e/vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
8
e2e/vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
tasksapi "github.com/containerd/containerd/api/services/tasks/v1"
|
||||
versionapi "github.com/containerd/containerd/api/services/version/v1"
|
||||
tasktypes "github.com/containerd/containerd/api/types/task"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/errgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
@ -114,7 +114,7 @@ func (c *client) LoadContainer(ctx context.Context, id string) (*containers.Cont
|
||||
ID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
return nil, errgrpc.ToNative(err)
|
||||
}
|
||||
return containerFromProto(r.Container), nil
|
||||
}
|
||||
@ -124,7 +124,7 @@ func (c *client) TaskPid(ctx context.Context, id string) (uint32, error) {
|
||||
ContainerID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errdefs.FromGRPC(err)
|
||||
return 0, errgrpc.ToNative(err)
|
||||
}
|
||||
if response.Process.Status == tasktypes.Status_UNKNOWN {
|
||||
return 0, ErrTaskIsInUnknownState
|
||||
@ -135,7 +135,7 @@ func (c *client) TaskPid(ctx context.Context, id string) (uint32, error) {
|
||||
func (c *client) Version(ctx context.Context) (string, error) {
|
||||
response, err := c.versionService.Version(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return "", errdefs.FromGRPC(err)
|
||||
return "", errgrpc.ToNative(err)
|
||||
}
|
||||
return response.Version, nil
|
||||
}
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@ -66,6 +66,7 @@ const (
|
||||
ResctrlMetrics MetricKind = "resctrl"
|
||||
CPUSetMetrics MetricKind = "cpuset"
|
||||
OOMMetrics MetricKind = "oom_event"
|
||||
PressureMetrics MetricKind = "pressure"
|
||||
)
|
||||
|
||||
// AllMetrics represents all kinds of metrics that cAdvisor supported.
|
||||
@ -91,6 +92,7 @@ var AllMetrics = MetricSet{
|
||||
ResctrlMetrics: struct{}{},
|
||||
CPUSetMetrics: struct{}{},
|
||||
OOMMetrics: struct{}{},
|
||||
PressureMetrics: struct{}{},
|
||||
}
|
||||
|
||||
// AllNetworkMetrics represents all network metrics that cAdvisor supports.
|
||||
|
35
e2e/vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
35
e2e/vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@ -28,8 +28,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/cgroups"
|
||||
"github.com/opencontainers/cgroups/fs2"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
@ -717,10 +717,7 @@ func scanUDPStats(r io.Reader) (info.UdpStat, error) {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
var listening, dropped, rxQueued, txQueued uint64
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
@ -733,8 +730,11 @@ func scanUDPStats(r io.Reader) (info.UdpStat, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
var rx, tx uint64
|
||||
_, err := fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
@ -771,6 +771,7 @@ func setCPUStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
||||
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
||||
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
||||
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
||||
setPSIStats(s.CpuStats.PSI, &ret.Cpu.PSI)
|
||||
|
||||
if !withPerCPU {
|
||||
return
|
||||
@ -792,6 +793,7 @@ func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoWaitTime = diskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||
ret.DiskIo.IoMerged = diskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||
ret.DiskIo.IoTime = diskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
setPSIStats(s.BlkioStats.PSI, &ret.DiskIo.PSI)
|
||||
}
|
||||
|
||||
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
@ -799,6 +801,7 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.KernelUsage = s.MemoryStats.KernelUsage.Usage
|
||||
setPSIStats(s.MemoryStats.PSI, &ret.Memory.PSI)
|
||||
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["file"]
|
||||
@ -884,6 +887,22 @@ func setHugepageStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
}
|
||||
}
|
||||
|
||||
func setPSIData(d *cgroups.PSIData, ret *info.PSIData) {
|
||||
if d != nil {
|
||||
ret.Total = d.Total
|
||||
ret.Avg10 = d.Avg10
|
||||
ret.Avg60 = d.Avg60
|
||||
ret.Avg300 = d.Avg300
|
||||
}
|
||||
}
|
||||
|
||||
func setPSIStats(s *cgroups.PSIStats, ret *info.PSIStats) {
|
||||
if s != nil {
|
||||
setPSIData(&s.Full, &ret.Full)
|
||||
setPSIData(&s.Some, &ret.Some)
|
||||
}
|
||||
}
|
||||
|
||||
// read from pids path not cpu
|
||||
func setThreadsStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
if s != nil {
|
||||
|
15
e2e/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
15
e2e/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@ -17,15 +17,12 @@ package libcontainer
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
|
||||
fs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
fs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
configs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/cgroups"
|
||||
fs "github.com/opencontainers/cgroups/fs"
|
||||
fs2 "github.com/opencontainers/cgroups/fs2"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@ -157,9 +154,9 @@ func diskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStat
|
||||
}
|
||||
|
||||
func NewCgroupManager(name string, paths map[string]string) (cgroups.Manager, error) {
|
||||
config := &configs.Cgroup{
|
||||
config := &cgroups.Cgroup{
|
||||
Name: name,
|
||||
Resources: &configs.Resources{},
|
||||
Resources: &cgroups.Resources{},
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
path := paths[""]
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/machine"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"github.com/opencontainers/cgroups"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
26
e2e/vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
26
e2e/vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@ -261,6 +261,26 @@ func (ci *ContainerInfo) StatsEndTime() time.Time {
|
||||
return ret
|
||||
}
|
||||
|
||||
// PSI statistics for an individual resource.
|
||||
type PSIStats struct {
|
||||
// PSI data for all tasks of in the cgroup.
|
||||
Full PSIData `json:"full,omitempty"`
|
||||
// PSI data for some tasks in the cgroup.
|
||||
Some PSIData `json:"some,omitempty"`
|
||||
}
|
||||
|
||||
type PSIData struct {
|
||||
// Total time duration for tasks in the cgroup have waited due to congestion.
|
||||
// Unit: nanoseconds.
|
||||
Total uint64 `json:"total"`
|
||||
// The average (in %) tasks have waited due to congestion over a 10 second window.
|
||||
Avg10 float64 `json:"avg10"`
|
||||
// The average (in %) tasks have waited due to congestion over a 60 second window.
|
||||
Avg60 float64 `json:"avg60"`
|
||||
// The average (in %) tasks have waited due to congestion over a 300 second window.
|
||||
Avg300 float64 `json:"avg300"`
|
||||
}
|
||||
|
||||
// This mirrors kernel internal structure.
|
||||
type LoadStats struct {
|
||||
// Number of sleeping tasks.
|
||||
@ -334,7 +354,8 @@ type CpuStats struct {
|
||||
// from LoadStats.NrRunning.
|
||||
LoadAverage int32 `json:"load_average"`
|
||||
// from LoadStats.NrUninterruptible
|
||||
LoadDAverage int32 `json:"load_d_average"`
|
||||
LoadDAverage int32 `json:"load_d_average"`
|
||||
PSI PSIStats `json:"psi"`
|
||||
}
|
||||
|
||||
type PerDiskStats struct {
|
||||
@ -353,6 +374,7 @@ type DiskIoStats struct {
|
||||
IoWaitTime []PerDiskStats `json:"io_wait_time,omitempty"`
|
||||
IoMerged []PerDiskStats `json:"io_merged,omitempty"`
|
||||
IoTime []PerDiskStats `json:"io_time,omitempty"`
|
||||
PSI PSIStats `json:"psi"`
|
||||
}
|
||||
|
||||
type HugetlbStats struct {
|
||||
@ -411,6 +433,8 @@ type MemoryStats struct {
|
||||
|
||||
ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"`
|
||||
HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
|
||||
|
||||
PSI PSIStats `json:"psi"`
|
||||
}
|
||||
|
||||
type CPUSetStats struct {
|
||||
|
6
e2e/vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
6
e2e/vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@ -45,7 +45,7 @@ import (
|
||||
"github.com/google/cadvisor/version"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
@ -221,7 +221,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, HousekeepingConfi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newManager.resctrlManager, err = resctrl.NewManager(resctrlInterval, resctrl.Setup, machineInfo.CPUVendorID, inHostNamespace)
|
||||
newManager.resctrlManager, err = resctrl.NewManager(resctrlInterval, machineInfo.CPUVendorID, inHostNamespace)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Cannot gather resctrl metrics: %v", err)
|
||||
}
|
||||
@ -265,7 +265,7 @@ type manager struct {
|
||||
eventsChannel chan watcher.ContainerEvent
|
||||
collectorHTTPClient *http.Client
|
||||
perfManager stats.Manager
|
||||
resctrlManager resctrl.Manager
|
||||
resctrlManager resctrl.ResControlManager
|
||||
// List of raw container cgroup path prefix whitelist.
|
||||
rawContainerCgroupPathPrefixWhiteList []string
|
||||
// List of container env prefix whitelist, the matched container envs would be collected into metrics as extra labels.
|
||||
|
171
e2e/vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
171
e2e/vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Collector of resctrl for a container.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
const noInterval = 0
|
||||
|
||||
type collector struct {
|
||||
id string
|
||||
interval time.Duration
|
||||
getContainerPids func() ([]string, error)
|
||||
resctrlPath string
|
||||
running bool
|
||||
destroyed bool
|
||||
numberOfNUMANodes int
|
||||
vendorID string
|
||||
mu sync.Mutex
|
||||
inHostNamespace bool
|
||||
}
|
||||
|
||||
func newCollector(id string, getContainerPids func() ([]string, error), interval time.Duration, numberOfNUMANodes int, vendorID string, inHostNamespace bool) *collector {
|
||||
return &collector{id: id, interval: interval, getContainerPids: getContainerPids, numberOfNUMANodes: numberOfNUMANodes,
|
||||
vendorID: vendorID, mu: sync.Mutex{}, inHostNamespace: inHostNamespace}
|
||||
}
|
||||
|
||||
func (c *collector) setup() error {
|
||||
var err error
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
|
||||
if c.interval != noInterval {
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
} else {
|
||||
c.running = true
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(c.interval)
|
||||
c.mu.Lock()
|
||||
if c.destroyed {
|
||||
break
|
||||
}
|
||||
klog.V(5).Infof("Trying to check %q containers control group.", c.id)
|
||||
if c.running {
|
||||
err = c.checkMonitoringGroup()
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to check %q resctrl collector control group: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
} else {
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// There is no interval set, if setup fail, stop.
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to setup container %q resctrl collector: %w", c.id, err)
|
||||
}
|
||||
c.running = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) checkMonitoringGroup() error {
|
||||
newPath, err := prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't obtain mon_group path: %v", err)
|
||||
}
|
||||
|
||||
// Check if container moved between control groups.
|
||||
if newPath != c.resctrlPath {
|
||||
err = c.clear()
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear previous monitoring group: %w", err)
|
||||
}
|
||||
c.resctrlPath = newPath
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) UpdateStats(stats *info.ContainerStats) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.running {
|
||||
stats.Resctrl = info.ResctrlStats{}
|
||||
|
||||
resctrlStats, err := getIntelRDTStatsFrom(c.resctrlPath, c.vendorID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats.Resctrl.MemoryBandwidth = make([]info.MemoryBandwidthStats, 0, c.numberOfNUMANodes)
|
||||
stats.Resctrl.Cache = make([]info.CacheStats, 0, c.numberOfNUMANodes)
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.MBMStats {
|
||||
stats.Resctrl.MemoryBandwidth = append(stats.Resctrl.MemoryBandwidth,
|
||||
info.MemoryBandwidthStats{
|
||||
TotalBytes: numaNodeStats.MBMTotalBytes,
|
||||
LocalBytes: numaNodeStats.MBMLocalBytes,
|
||||
})
|
||||
}
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.CMTStats {
|
||||
stats.Resctrl.Cache = append(stats.Resctrl.Cache,
|
||||
info.CacheStats{LLCOccupancy: numaNodeStats.LLCOccupancy})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) Destroy() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.running = false
|
||||
err := c.clear()
|
||||
if err != nil {
|
||||
klog.Errorf("trying to destroy %q resctrl collector but: %v", c.id, err)
|
||||
}
|
||||
c.destroyed = true
|
||||
}
|
||||
|
||||
func (c *collector) clear() error {
|
||||
// Not allowed to remove root or undefined resctrl directory.
|
||||
if c.id != rootContainer && c.resctrlPath != "" {
|
||||
// Remove only own prepared mon group.
|
||||
if strings.HasPrefix(filepath.Base(c.resctrlPath), monGroupPrefix) {
|
||||
err := os.RemoveAll(c.resctrlPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear mon_group: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
58
e2e/vendor/github.com/google/cadvisor/resctrl/factory.go
generated
vendored
Normal file
58
e2e/vendor/github.com/google/cadvisor/resctrl/factory.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2025 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/stats"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type ResControlManager interface {
|
||||
Destroy()
|
||||
GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error)
|
||||
}
|
||||
|
||||
// All registered auth provider plugins.
|
||||
var pluginsLock sync.Mutex
|
||||
var plugins = make(map[string]ResControlManagerPlugin)
|
||||
|
||||
type ResControlManagerPlugin interface {
|
||||
NewManager(interval time.Duration, vendorID string, inHostNamespace bool) (ResControlManager, error)
|
||||
}
|
||||
|
||||
func RegisterPlugin(name string, plugin ResControlManagerPlugin) error {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
if _, found := plugins[name]; found {
|
||||
return fmt.Errorf("ResControlManagerPlugin %q was registered twice", name)
|
||||
}
|
||||
klog.V(4).Infof("Registered ResControlManagerPlugin %q", name)
|
||||
plugins[name] = plugin
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewManager(interval time.Duration, vendorID string, inHostNamespace bool) (ResControlManager, error) {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
for _, plugin := range plugins {
|
||||
return plugin.NewManager(interval, vendorID, inHostNamespace)
|
||||
}
|
||||
return nil, fmt.Errorf("unable to find plugins for resctrl manager")
|
||||
}
|
79
e2e/vendor/github.com/google/cadvisor/resctrl/manager.go
generated
vendored
79
e2e/vendor/github.com/google/cadvisor/resctrl/manager.go
generated
vendored
@ -1,79 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Manager of resctrl for containers.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container/raw"
|
||||
"github.com/google/cadvisor/stats"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
Destroy()
|
||||
GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error)
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
stats.NoopDestroy
|
||||
interval time.Duration
|
||||
vendorID string
|
||||
inHostNamespace bool
|
||||
}
|
||||
|
||||
func (m *manager) GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error) {
|
||||
collector := newCollector(containerName, getContainerPids, m.interval, numberOfNUMANodes, m.vendorID, m.inHostNamespace)
|
||||
err := collector.setup()
|
||||
if err != nil {
|
||||
return &stats.NoopCollector{}, err
|
||||
}
|
||||
|
||||
return collector, nil
|
||||
}
|
||||
|
||||
func NewManager(interval time.Duration, setup func() error, vendorID string, inHostNamespace bool) (Manager, error) {
|
||||
err := setup()
|
||||
if err != nil {
|
||||
return &NoopManager{}, err
|
||||
}
|
||||
|
||||
if !isResctrlInitialized {
|
||||
return &NoopManager{}, errors.New("the resctrl isn't initialized")
|
||||
}
|
||||
if !(enabledCMT || enabledMBM) {
|
||||
return &NoopManager{}, errors.New("there are no monitoring features available")
|
||||
}
|
||||
|
||||
if !*raw.DockerOnly {
|
||||
klog.Warning("--docker_only should be set when collecting Resctrl metrics! See the runtime docs.")
|
||||
}
|
||||
|
||||
return &manager{interval: interval, vendorID: vendorID, inHostNamespace: inHostNamespace}, nil
|
||||
}
|
||||
|
||||
type NoopManager struct {
|
||||
stats.NoopDestroy
|
||||
}
|
||||
|
||||
func (np *NoopManager) GetCollector(_ string, _ func() ([]string, error), _ int) (stats.Collector, error) {
|
||||
return &stats.NoopCollector{}, nil
|
||||
}
|
369
e2e/vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
369
e2e/vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
@ -1,369 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Utilities.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/runc/libcontainer/intelrdt"
|
||||
)
|
||||
|
||||
const (
|
||||
cpuCgroup = "cpu"
|
||||
rootContainer = "/"
|
||||
monitoringGroupDir = "mon_groups"
|
||||
processTask = "task"
|
||||
cpusFileName = "cpus"
|
||||
cpusListFileName = "cpus_list"
|
||||
schemataFileName = "schemata"
|
||||
tasksFileName = "tasks"
|
||||
modeFileName = "mode"
|
||||
sizeFileName = "size"
|
||||
infoDirName = "info"
|
||||
monDataDirName = "mon_data"
|
||||
monGroupsDirName = "mon_groups"
|
||||
noPidsPassedError = "there are no pids passed"
|
||||
noContainerNameError = "there are no container name passed"
|
||||
noControlGroupFoundError = "couldn't find control group matching container"
|
||||
llcOccupancyFileName = "llc_occupancy"
|
||||
mbmLocalBytesFileName = "mbm_local_bytes"
|
||||
mbmTotalBytesFileName = "mbm_total_bytes"
|
||||
containerPrefix = '/'
|
||||
minContainerNameLen = 2 // "/<container_name>" e.g. "/a"
|
||||
unavailable = "Unavailable"
|
||||
monGroupPrefix = "cadvisor"
|
||||
)
|
||||
|
||||
var (
|
||||
rootResctrl = ""
|
||||
pidsPath = ""
|
||||
processPath = "/proc"
|
||||
enabledMBM = false
|
||||
enabledCMT = false
|
||||
isResctrlInitialized = false
|
||||
groupDirectories = map[string]struct{}{
|
||||
cpusFileName: {},
|
||||
cpusListFileName: {},
|
||||
infoDirName: {},
|
||||
monDataDirName: {},
|
||||
monGroupsDirName: {},
|
||||
schemataFileName: {},
|
||||
tasksFileName: {},
|
||||
modeFileName: {},
|
||||
sizeFileName: {},
|
||||
}
|
||||
)
|
||||
|
||||
func Setup() error {
|
||||
var err error
|
||||
rootResctrl, err = intelrdt.Root()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize resctrl: %v", err)
|
||||
}
|
||||
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
pidsPath = fs2.UnifiedMountpoint
|
||||
} else {
|
||||
pidsPath = filepath.Join(fs2.UnifiedMountpoint, cpuCgroup)
|
||||
}
|
||||
|
||||
enabledMBM = intelrdt.IsMBMEnabled()
|
||||
enabledCMT = intelrdt.IsCMTEnabled()
|
||||
|
||||
isResctrlInitialized = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareMonitoringGroup(containerName string, getContainerPids func() ([]string, error), inHostNamespace bool) (string, error) {
|
||||
if containerName == rootContainer {
|
||||
return rootResctrl, nil
|
||||
}
|
||||
|
||||
pids, err := getContainerPids()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(pids) == 0 {
|
||||
return "", fmt.Errorf("couldn't obtain %q container pids: there is no pids in cgroup", containerName)
|
||||
}
|
||||
|
||||
// Firstly, find the control group to which the container belongs.
|
||||
// Consider the root group.
|
||||
controlGroupPath, err := findGroup(rootResctrl, pids, true, false)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%q %q: %q", noControlGroupFoundError, containerName, err)
|
||||
}
|
||||
if controlGroupPath == "" {
|
||||
return "", fmt.Errorf("%q %q", noControlGroupFoundError, containerName)
|
||||
}
|
||||
|
||||
// Check if there is any monitoring group.
|
||||
monGroupPath, err := findGroup(filepath.Join(controlGroupPath, monGroupsDirName), pids, false, true)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't find monitoring group matching %q container: %v", containerName, err)
|
||||
}
|
||||
|
||||
// Prepare new one if not exists.
|
||||
if monGroupPath == "" {
|
||||
// Remove leading prefix.
|
||||
// e.g. /my/container -> my/container
|
||||
if len(containerName) >= minContainerNameLen && containerName[0] == containerPrefix {
|
||||
containerName = containerName[1:]
|
||||
}
|
||||
|
||||
// Add own prefix and use `-` instead `/`.
|
||||
// e.g. my/container -> cadvisor-my-container
|
||||
properContainerName := fmt.Sprintf("%s-%s", monGroupPrefix, strings.Replace(containerName, "/", "-", -1))
|
||||
monGroupPath = filepath.Join(controlGroupPath, monitoringGroupDir, properContainerName)
|
||||
|
||||
err = os.MkdirAll(monGroupPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't create monitoring group directory for %q container: %w", containerName, err)
|
||||
}
|
||||
|
||||
if !inHostNamespace {
|
||||
processPath = "/rootfs/proc"
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
processThreads, err := getAllProcessThreads(filepath.Join(processPath, pid, processTask))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, thread := range processThreads {
|
||||
err = intelrdt.WriteIntelRdtTasks(monGroupPath, thread)
|
||||
if err != nil {
|
||||
secondError := os.Remove(monGroupPath)
|
||||
if secondError != nil {
|
||||
return "", fmt.Errorf(
|
||||
"coudn't assign pids to %q container monitoring group: %w \n couldn't clear %q monitoring group: %v",
|
||||
containerName, err, containerName, secondError)
|
||||
}
|
||||
return "", fmt.Errorf("coudn't assign pids to %q container monitoring group: %w", containerName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return monGroupPath, nil
|
||||
}
|
||||
|
||||
func getPids(containerName string) ([]int, error) {
|
||||
if len(containerName) == 0 {
|
||||
// No container name passed.
|
||||
return nil, fmt.Errorf(noContainerNameError)
|
||||
}
|
||||
pids, err := cgroups.GetAllPids(filepath.Join(pidsPath, containerName))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't obtain pids for %q container: %v", containerName, err)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// getAllProcessThreads obtains all available processes from directory.
|
||||
// e.g. ls /proc/4215/task/ -> 4215, 4216, 4217, 4218
|
||||
// func will return [4215, 4216, 4217, 4218].
|
||||
func getAllProcessThreads(path string) ([]int, error) {
|
||||
processThreads := make([]int, 0)
|
||||
|
||||
threadDirs, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return processThreads, err
|
||||
}
|
||||
|
||||
for _, dir := range threadDirs {
|
||||
pid, err := strconv.Atoi(dir.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't parse %q dir: %v", dir.Name(), err)
|
||||
}
|
||||
processThreads = append(processThreads, pid)
|
||||
}
|
||||
|
||||
return processThreads, nil
|
||||
}
|
||||
|
||||
// findGroup returns the path of a control/monitoring group in which the pids are.
|
||||
func findGroup(group string, pids []string, includeGroup bool, exclusive bool) (string, error) {
|
||||
if len(pids) == 0 {
|
||||
return "", fmt.Errorf(noPidsPassedError)
|
||||
}
|
||||
|
||||
availablePaths := make([]string, 0)
|
||||
if includeGroup {
|
||||
availablePaths = append(availablePaths, group)
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(group)
|
||||
for _, file := range files {
|
||||
if _, ok := groupDirectories[file.Name()]; !ok {
|
||||
availablePaths = append(availablePaths, filepath.Join(group, file.Name()))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't obtain groups paths: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range availablePaths {
|
||||
groupFound, err := arePIDsInGroup(path, pids, exclusive)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if groupFound {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// arePIDsInGroup returns true if all of the pids are within control group.
|
||||
func arePIDsInGroup(path string, pids []string, exclusive bool) (bool, error) {
|
||||
if len(pids) == 0 {
|
||||
return false, fmt.Errorf("couldn't obtain pids from %q path: %v", path, noPidsPassedError)
|
||||
}
|
||||
|
||||
tasks, err := readTasksFile(filepath.Join(path, tasksFileName))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
any := false
|
||||
for _, pid := range pids {
|
||||
_, ok := tasks[pid]
|
||||
if !ok {
|
||||
// There are missing pids within group.
|
||||
if any {
|
||||
return false, fmt.Errorf("there should be all pids in group")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
any = true
|
||||
}
|
||||
|
||||
// Check if there should be only passed pids in group.
|
||||
if exclusive {
|
||||
if len(tasks) != len(pids) {
|
||||
return false, fmt.Errorf("group should have container pids only")
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// readTasksFile returns pids map from given tasks path.
|
||||
func readTasksFile(tasksPath string) (map[string]struct{}, error) {
|
||||
tasks := make(map[string]struct{})
|
||||
|
||||
tasksFile, err := os.Open(tasksPath)
|
||||
if err != nil {
|
||||
return tasks, fmt.Errorf("couldn't read tasks file from %q path: %w", tasksPath, err)
|
||||
}
|
||||
defer tasksFile.Close()
|
||||
|
||||
scanner := bufio.NewScanner(tasksFile)
|
||||
for scanner.Scan() {
|
||||
tasks[scanner.Text()] = struct{}{}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return tasks, fmt.Errorf("couldn't obtain pids from %q path: %w", tasksPath, err)
|
||||
}
|
||||
|
||||
return tasks, nil
|
||||
}
|
||||
|
||||
func readStatFrom(path string, vendorID string) (uint64, error) {
|
||||
context, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
contextString := string(bytes.TrimSpace(context))
|
||||
|
||||
if contextString == unavailable {
|
||||
err := fmt.Errorf("\"Unavailable\" value from file %q", path)
|
||||
if vendorID == "AuthenticAMD" {
|
||||
kernelBugzillaLink := "https://bugzilla.kernel.org/show_bug.cgi?id=213311"
|
||||
err = fmt.Errorf("%v, possible bug: %q", err, kernelBugzillaLink)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
stat, err := strconv.ParseUint(contextString, 10, 64)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("unable to parse %q as a uint from file %q", string(context), path)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func getIntelRDTStatsFrom(path string, vendorID string) (intelrdt.Stats, error) {
|
||||
stats := intelrdt.Stats{}
|
||||
|
||||
statsDirectories, err := filepath.Glob(filepath.Join(path, monDataDirName, "*"))
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
if len(statsDirectories) == 0 {
|
||||
return stats, fmt.Errorf("there is no mon_data stats directories: %q", path)
|
||||
}
|
||||
|
||||
var cmtStats []intelrdt.CMTNumaNodeStats
|
||||
var mbmStats []intelrdt.MBMNumaNodeStats
|
||||
|
||||
for _, dir := range statsDirectories {
|
||||
if enabledCMT {
|
||||
llcOccupancy, err := readStatFrom(filepath.Join(dir, llcOccupancyFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
cmtStats = append(cmtStats, intelrdt.CMTNumaNodeStats{LLCOccupancy: llcOccupancy})
|
||||
}
|
||||
if enabledMBM {
|
||||
mbmTotalBytes, err := readStatFrom(filepath.Join(dir, mbmTotalBytesFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
mbmLocalBytes, err := readStatFrom(filepath.Join(dir, mbmLocalBytesFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
mbmStats = append(mbmStats, intelrdt.MBMNumaNodeStats{
|
||||
MBMTotalBytes: mbmTotalBytes,
|
||||
MBMLocalBytes: mbmLocalBytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
stats.CMTStats = &cmtStats
|
||||
stats.MBMStats = &mbmStats
|
||||
|
||||
return stats, nil
|
||||
}
|
8
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
8
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
@ -47,7 +47,7 @@ type netlinkMessage struct {
|
||||
func (m netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
|
||||
rawmsg.Header = m.Header
|
||||
w := bytes.NewBuffer([]byte{})
|
||||
binary.Write(w, Endian, m.GenHeader)
|
||||
_ = binary.Write(w, Endian, m.GenHeader)
|
||||
w.Write(m.Data)
|
||||
rawmsg.Data = w.Bytes()
|
||||
return rawmsg
|
||||
@ -94,13 +94,13 @@ func addAttribute(buf *bytes.Buffer, attrType uint16, data interface{}, dataSize
|
||||
Type: attrType,
|
||||
}
|
||||
attr.Len += uint16(dataSize)
|
||||
binary.Write(buf, Endian, attr)
|
||||
_ = binary.Write(buf, Endian, attr)
|
||||
switch data := data.(type) {
|
||||
case string:
|
||||
binary.Write(buf, Endian, []byte(data))
|
||||
_ = binary.Write(buf, Endian, []byte(data))
|
||||
buf.WriteByte(0) // terminate
|
||||
default:
|
||||
binary.Write(buf, Endian, data)
|
||||
_ = binary.Write(buf, Endian, data)
|
||||
}
|
||||
for i := 0; i < padding(int(attr.Len), syscall.NLMSG_ALIGNTO); i++ {
|
||||
buf.WriteByte(0)
|
||||
|
Reference in New Issue
Block a user