mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
build: move e2e dependencies into e2e/go.mod
Several packages are only used while running the e2e suite. These packages are less important to update, as the they can not influence the final executable that is part of the Ceph-CSI container-image. By moving these dependencies out of the main Ceph-CSI go.mod, it is easier to identify if a reported CVE affects Ceph-CSI, or only the testing (like most of the Kubernetes CVEs). Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
15da101b1b
commit
bec6090996
89
e2e/vendor/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go
generated
vendored
Normal file
89
e2e/vendor/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Get information about the cloud provider (if any) cAdvisor is running on.
|
||||
|
||||
package cloudinfo
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type CloudInfo interface {
|
||||
GetCloudProvider() info.CloudProvider
|
||||
GetInstanceType() info.InstanceType
|
||||
GetInstanceID() info.InstanceID
|
||||
}
|
||||
|
||||
// CloudProvider is an abstraction for providing cloud-specific information.
|
||||
type CloudProvider interface {
|
||||
// IsActiveProvider determines whether this is the cloud provider operating
|
||||
// this instance.
|
||||
IsActiveProvider() bool
|
||||
// GetInstanceType gets the type of instance this process is running on.
|
||||
// The behavior is undefined if this is not the active provider.
|
||||
GetInstanceType() info.InstanceType
|
||||
// GetInstanceType gets the ID of the instance this process is running on.
|
||||
// The behavior is undefined if this is not the active provider.
|
||||
GetInstanceID() info.InstanceID
|
||||
}
|
||||
|
||||
var providers = map[info.CloudProvider]CloudProvider{}
|
||||
|
||||
// RegisterCloudProvider registers the given cloud provider
|
||||
func RegisterCloudProvider(name info.CloudProvider, provider CloudProvider) {
|
||||
if _, alreadyRegistered := providers[name]; alreadyRegistered {
|
||||
klog.Warningf("Duplicate registration of CloudProvider %s", name)
|
||||
}
|
||||
providers[name] = provider
|
||||
}
|
||||
|
||||
type realCloudInfo struct {
|
||||
cloudProvider info.CloudProvider
|
||||
instanceType info.InstanceType
|
||||
instanceID info.InstanceID
|
||||
}
|
||||
|
||||
func NewRealCloudInfo() CloudInfo {
|
||||
for name, provider := range providers {
|
||||
if provider.IsActiveProvider() {
|
||||
return &realCloudInfo{
|
||||
cloudProvider: name,
|
||||
instanceType: provider.GetInstanceType(),
|
||||
instanceID: provider.GetInstanceID(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No registered active provider.
|
||||
return &realCloudInfo{
|
||||
cloudProvider: info.UnknownProvider,
|
||||
instanceType: info.UnknownInstance,
|
||||
instanceID: info.UnNamedInstance,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *realCloudInfo) GetCloudProvider() info.CloudProvider {
|
||||
return i.cloudProvider
|
||||
}
|
||||
|
||||
func (i *realCloudInfo) GetInstanceType() info.InstanceType {
|
||||
return i.instanceType
|
||||
}
|
||||
|
||||
func (i *realCloudInfo) GetInstanceID() info.InstanceID {
|
||||
return i.instanceID
|
||||
}
|
47
e2e/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
Normal file
47
e2e/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cpuload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/utils/cpuload/netlink"
|
||||
)
|
||||
|
||||
type CpuLoadReader interface {
|
||||
// Start the reader.
|
||||
Start() error
|
||||
|
||||
// Stop the reader and clean up internal state.
|
||||
Stop()
|
||||
|
||||
// Retrieve Cpu load for a given group.
|
||||
// name is the full hierarchical name of the container.
|
||||
// Path is an absolute filesystem path for a container under CPU cgroup hierarchy.
|
||||
GetCpuLoad(name string, path string) (info.LoadStats, error)
|
||||
}
|
||||
|
||||
func New() (CpuLoadReader, error) {
|
||||
reader, err := netlink.New()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err)
|
||||
}
|
||||
klog.V(4).Info("Using a netlink-based load reader")
|
||||
return reader, nil
|
||||
}
|
98
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/conn.go
generated
vendored
Normal file
98
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/conn.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type Connection struct {
|
||||
// netlink socket
|
||||
fd int
|
||||
// cache pid to use in every netlink request.
|
||||
pid uint32
|
||||
// sequence number for netlink messages.
|
||||
seq uint32
|
||||
addr syscall.SockaddrNetlink
|
||||
rbuf *bufio.Reader
|
||||
}
|
||||
|
||||
// Create and bind a new netlink socket.
|
||||
func newConnection() (*Connection, error) {
|
||||
|
||||
fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_DGRAM, syscall.NETLINK_GENERIC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn := new(Connection)
|
||||
conn.fd = fd
|
||||
conn.seq = 0
|
||||
conn.pid = uint32(os.Getpid())
|
||||
conn.addr.Family = syscall.AF_NETLINK
|
||||
conn.rbuf = bufio.NewReader(conn)
|
||||
err = syscall.Bind(fd, &conn.addr)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func (c *Connection) Read(b []byte) (n int, err error) {
|
||||
n, _, err = syscall.Recvfrom(c.fd, b, 0)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *Connection) Write(b []byte) (n int, err error) {
|
||||
err = syscall.Sendto(c.fd, b, 0, &c.addr)
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
func (c *Connection) Close() error {
|
||||
return syscall.Close(c.fd)
|
||||
}
|
||||
|
||||
func (c *Connection) WriteMessage(msg syscall.NetlinkMessage) error {
|
||||
w := bytes.NewBuffer(nil)
|
||||
msg.Header.Len = uint32(syscall.NLMSG_HDRLEN + len(msg.Data))
|
||||
msg.Header.Seq = c.seq
|
||||
c.seq++
|
||||
msg.Header.Pid = c.pid
|
||||
err := binary.Write(w, binary.LittleEndian, msg.Header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(msg.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.Write(w.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Connection) ReadMessage() (msg syscall.NetlinkMessage, err error) {
|
||||
err = binary.Read(c.rbuf, binary.LittleEndian, &msg.Header)
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
msg.Data = make([]byte, msg.Header.Len-syscall.NLMSG_HDRLEN)
|
||||
_, err = c.rbuf.Read(msg.Data)
|
||||
return msg, err
|
||||
}
|
241
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
Normal file
241
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(rjnagal): Verify and fix for other architectures.
|
||||
|
||||
Endian = binary.LittleEndian
|
||||
)
|
||||
|
||||
type genMsghdr struct {
|
||||
Command uint8
|
||||
Version uint8
|
||||
Reserved uint16
|
||||
}
|
||||
|
||||
type netlinkMessage struct {
|
||||
Header syscall.NlMsghdr
|
||||
GenHeader genMsghdr
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (m netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
|
||||
rawmsg.Header = m.Header
|
||||
w := bytes.NewBuffer([]byte{})
|
||||
binary.Write(w, Endian, m.GenHeader)
|
||||
w.Write(m.Data)
|
||||
rawmsg.Data = w.Bytes()
|
||||
return rawmsg
|
||||
}
|
||||
|
||||
type loadStatsResp struct {
|
||||
Header syscall.NlMsghdr
|
||||
GenHeader genMsghdr
|
||||
Stats info.LoadStats
|
||||
}
|
||||
|
||||
// Return required padding to align 'size' to 'alignment'.
|
||||
func padding(size int, alignment int) int {
|
||||
unalignedPart := size % alignment
|
||||
return (alignment - unalignedPart) % alignment
|
||||
}
|
||||
|
||||
// Get family id for taskstats subsystem.
|
||||
func getFamilyID(conn *Connection) (uint16, error) {
|
||||
msg := prepareFamilyMessage()
|
||||
err := conn.WriteMessage(msg.toRawMsg())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
id, err := parseFamilyResp(resp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Append an attribute to the message.
|
||||
// Adds attribute info (length and type), followed by the data and necessary padding.
|
||||
// Can be called multiple times to add attributes. Only fixed size and string type
|
||||
// attributes are handled. We don't need nested attributes for task stats.
|
||||
func addAttribute(buf *bytes.Buffer, attrType uint16, data interface{}, dataSize int) {
|
||||
attr := syscall.RtAttr{
|
||||
Len: syscall.SizeofRtAttr,
|
||||
Type: attrType,
|
||||
}
|
||||
attr.Len += uint16(dataSize)
|
||||
binary.Write(buf, Endian, attr)
|
||||
switch data := data.(type) {
|
||||
case string:
|
||||
binary.Write(buf, Endian, []byte(data))
|
||||
buf.WriteByte(0) // terminate
|
||||
default:
|
||||
binary.Write(buf, Endian, data)
|
||||
}
|
||||
for i := 0; i < padding(int(attr.Len), syscall.NLMSG_ALIGNTO); i++ {
|
||||
buf.WriteByte(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Prepares the message and generic headers and appends attributes as data.
|
||||
func prepareMessage(headerType uint16, cmd uint8, attributes []byte) (msg netlinkMessage) {
|
||||
msg.Header.Type = headerType
|
||||
msg.Header.Flags = syscall.NLM_F_REQUEST
|
||||
msg.GenHeader.Command = cmd
|
||||
msg.GenHeader.Version = 0x1
|
||||
msg.Data = attributes
|
||||
return msg
|
||||
}
|
||||
|
||||
// Prepares message to query family id for task stats.
|
||||
func prepareFamilyMessage() (msg netlinkMessage) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
addAttribute(buf, unix.CTRL_ATTR_FAMILY_NAME, unix.TASKSTATS_GENL_NAME, len(unix.TASKSTATS_GENL_NAME)+1)
|
||||
return prepareMessage(unix.GENL_ID_CTRL, unix.CTRL_CMD_GETFAMILY, buf.Bytes())
|
||||
}
|
||||
|
||||
// Prepares message to query task stats for a task group.
|
||||
func prepareCmdMessage(id uint16, cfd uintptr) (msg netlinkMessage) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
addAttribute(buf, unix.CGROUPSTATS_CMD_ATTR_FD, uint32(cfd), 4)
|
||||
return prepareMessage(id, unix.CGROUPSTATS_CMD_GET, buf.Bytes())
|
||||
}
|
||||
|
||||
// Extracts returned family id from the response.
|
||||
func parseFamilyResp(msg syscall.NetlinkMessage) (uint16, error) {
|
||||
m := new(netlinkMessage)
|
||||
m.Header = msg.Header
|
||||
err := verifyHeader(msg)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
buf := bytes.NewBuffer(msg.Data)
|
||||
// extract generic header from data.
|
||||
err = binary.Read(buf, Endian, &m.GenHeader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
id := uint16(0)
|
||||
// Extract attributes. kernel reports family name, id, version, etc.
|
||||
// Scan till we find id.
|
||||
for buf.Len() > syscall.SizeofRtAttr {
|
||||
var attr syscall.RtAttr
|
||||
err = binary.Read(buf, Endian, &attr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if attr.Type == unix.CTRL_ATTR_FAMILY_ID {
|
||||
err = binary.Read(buf, Endian, &id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
payload := int(attr.Len) - syscall.SizeofRtAttr
|
||||
skipLen := payload + padding(payload, syscall.SizeofRtAttr)
|
||||
name := make([]byte, skipLen)
|
||||
err = binary.Read(buf, Endian, name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("family id not found in the response")
|
||||
}
|
||||
|
||||
// Extract task stats from response returned by kernel.
|
||||
func parseLoadStatsResp(msg syscall.NetlinkMessage) (*loadStatsResp, error) {
|
||||
m := new(loadStatsResp)
|
||||
m.Header = msg.Header
|
||||
err := verifyHeader(msg)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
buf := bytes.NewBuffer(msg.Data)
|
||||
// Scan the general header.
|
||||
err = binary.Read(buf, Endian, &m.GenHeader)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
// cgroup stats response should have just one attribute.
|
||||
// Read it directly into the stats structure.
|
||||
var attr syscall.RtAttr
|
||||
err = binary.Read(buf, Endian, &attr)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
err = binary.Read(buf, Endian, &m.Stats)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// Verify and return any error reported by kernel.
|
||||
func verifyHeader(msg syscall.NetlinkMessage) error {
|
||||
switch msg.Header.Type {
|
||||
case syscall.NLMSG_DONE:
|
||||
return fmt.Errorf("expected a response, got nil")
|
||||
case syscall.NLMSG_ERROR:
|
||||
buf := bytes.NewBuffer(msg.Data)
|
||||
var errno int32
|
||||
err := binary.Read(buf, Endian, errno)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("netlink request failed with error %s", syscall.Errno(-errno))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get load stats for a task group.
|
||||
// id: family id for taskstats.
|
||||
// cfd: open file to path to the cgroup directory under cpu hierarchy.
|
||||
// conn: open netlink connection used to communicate with kernel.
|
||||
func getLoadStats(id uint16, cfd *os.File, conn *Connection) (info.LoadStats, error) {
|
||||
msg := prepareCmdMessage(id, cfd.Fd())
|
||||
err := conn.WriteMessage(msg.toRawMsg())
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
|
||||
parsedmsg, err := parseLoadStatsResp(resp)
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
return parsedmsg.Stats, nil
|
||||
}
|
80
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
Normal file
80
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type NetlinkReader struct {
|
||||
familyID uint16
|
||||
conn *Connection
|
||||
}
|
||||
|
||||
func New() (*NetlinkReader, error) {
|
||||
conn, err := newConnection()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a new connection: %s", err)
|
||||
}
|
||||
|
||||
id, err := getFamilyID(conn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err)
|
||||
}
|
||||
klog.V(4).Infof("Family id for taskstats: %d", id)
|
||||
return &NetlinkReader{
|
||||
familyID: id,
|
||||
conn: conn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *NetlinkReader) Stop() {
|
||||
if r.conn != nil {
|
||||
r.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *NetlinkReader) Start() error {
|
||||
// We do the start setup for netlink in New(). Nothing to do here.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns instantaneous number of running tasks in a group.
|
||||
// Caller can use historical data to calculate cpu load.
|
||||
// path is an absolute filesystem path for a container under the CPU cgroup hierarchy.
|
||||
// NOTE: non-hierarchical load is returned. It does not include load for subcontainers.
|
||||
func (r *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, error) {
|
||||
if len(path) == 0 {
|
||||
return info.LoadStats{}, fmt.Errorf("cgroup path can not be empty")
|
||||
}
|
||||
|
||||
cfd, err := os.Open(path)
|
||||
if err != nil {
|
||||
return info.LoadStats{}, fmt.Errorf("failed to open cgroup path %s: %q", path, err)
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
stats, err := getLoadStats(r.familyID, cfd, r.conn)
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
klog.V(4).Infof("Task stats for %q: %+v", path, stats)
|
||||
return stats, nil
|
||||
}
|
174
e2e/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
Normal file
174
e2e/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oomparser
|
||||
|
||||
import (
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/euank/go-kmsg-parser/kmsgparser"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyContainerRegexp = regexp.MustCompile(`Task in (.*) killed as a result of limit of (.*)`)
|
||||
// Starting in 5.0 linux kernels, the OOM message changed
|
||||
containerRegexp = regexp.MustCompile(`oom-kill:constraint=(.*),nodemask=(.*),cpuset=(.*),mems_allowed=(.*),oom_memcg=(.*),task_memcg=(.*),task=(.*),pid=(.*),uid=(.*)`)
|
||||
lastLineRegexp = regexp.MustCompile(`Killed process ([0-9]+) \((.+)\)`)
|
||||
firstLineRegexp = regexp.MustCompile(`invoked oom-killer:`)
|
||||
)
|
||||
|
||||
// OomParser wraps a kmsgparser in order to extract OOM events from the
|
||||
// individual kernel ring buffer messages.
|
||||
type OomParser struct {
|
||||
parser kmsgparser.Parser
|
||||
}
|
||||
|
||||
// struct that contains information related to an OOM kill instance
|
||||
type OomInstance struct {
|
||||
// process id of the killed process
|
||||
Pid int
|
||||
// the name of the killed process
|
||||
ProcessName string
|
||||
// the time that the process was reported to be killed,
|
||||
// accurate to the minute
|
||||
TimeOfDeath time.Time
|
||||
// the absolute name of the container that OOMed
|
||||
ContainerName string
|
||||
// the absolute name of the container that was killed
|
||||
// due to the OOM.
|
||||
VictimContainerName string
|
||||
// the constraint that triggered the OOM. One of CONSTRAINT_NONE,
|
||||
// CONSTRAINT_CPUSET, CONSTRAINT_MEMORY_POLICY, CONSTRAINT_MEMCG
|
||||
Constraint string
|
||||
}
|
||||
|
||||
// gets the container name from a line and adds it to the oomInstance.
|
||||
func getLegacyContainerName(line string, currentOomInstance *OomInstance) error {
|
||||
parsedLine := legacyContainerRegexp.FindStringSubmatch(line)
|
||||
if parsedLine == nil {
|
||||
return nil
|
||||
}
|
||||
currentOomInstance.ContainerName = path.Join("/", parsedLine[1])
|
||||
currentOomInstance.VictimContainerName = path.Join("/", parsedLine[2])
|
||||
return nil
|
||||
}
|
||||
|
||||
// gets the container name from a line and adds it to the oomInstance.
|
||||
func getContainerName(line string, currentOomInstance *OomInstance) (bool, error) {
|
||||
parsedLine := containerRegexp.FindStringSubmatch(line)
|
||||
if parsedLine == nil {
|
||||
// Fall back to the legacy format if it isn't found here.
|
||||
return false, getLegacyContainerName(line, currentOomInstance)
|
||||
}
|
||||
currentOomInstance.ContainerName = parsedLine[6]
|
||||
currentOomInstance.VictimContainerName = parsedLine[5]
|
||||
currentOomInstance.Constraint = parsedLine[1]
|
||||
pid, err := strconv.Atoi(parsedLine[8])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
currentOomInstance.Pid = pid
|
||||
currentOomInstance.ProcessName = parsedLine[7]
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// gets the pid, name, and date from a line and adds it to oomInstance
|
||||
func getProcessNamePid(line string, currentOomInstance *OomInstance) (bool, error) {
|
||||
reList := lastLineRegexp.FindStringSubmatch(line)
|
||||
|
||||
if reList == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(reList[1])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
currentOomInstance.Pid = pid
|
||||
currentOomInstance.ProcessName = reList[2]
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// uses regex to see if line is the start of a kernel oom log
|
||||
func checkIfStartOfOomMessages(line string) bool {
|
||||
potentialOomStart := firstLineRegexp.MatchString(line)
|
||||
return potentialOomStart
|
||||
}
|
||||
|
||||
// StreamOoms writes to a provided a stream of OomInstance objects representing
|
||||
// OOM events that are found in the logs.
|
||||
// It will block and should be called from a goroutine.
|
||||
func (p *OomParser) StreamOoms(outStream chan<- *OomInstance) {
|
||||
kmsgEntries := p.parser.Parse()
|
||||
defer p.parser.Close()
|
||||
|
||||
for msg := range kmsgEntries {
|
||||
isOomMessage := checkIfStartOfOomMessages(msg.Message)
|
||||
if isOomMessage {
|
||||
oomCurrentInstance := &OomInstance{
|
||||
ContainerName: "/",
|
||||
VictimContainerName: "/",
|
||||
TimeOfDeath: msg.Timestamp,
|
||||
}
|
||||
for msg := range kmsgEntries {
|
||||
finished, err := getContainerName(msg.Message, oomCurrentInstance)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !finished {
|
||||
finished, err = getProcessNamePid(msg.Message, oomCurrentInstance)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
if finished {
|
||||
oomCurrentInstance.TimeOfDeath = msg.Timestamp
|
||||
break
|
||||
}
|
||||
}
|
||||
outStream <- oomCurrentInstance
|
||||
}
|
||||
}
|
||||
// Should not happen
|
||||
klog.Errorf("exiting analyzeLines. OOM events will not be reported.")
|
||||
}
|
||||
|
||||
// initializes an OomParser object. Returns an OomParser object and an error.
|
||||
func New() (*OomParser, error) {
|
||||
parser, err := kmsgparser.NewParser()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parser.SetLogger(glogAdapter{})
|
||||
return &OomParser{parser: parser}, nil
|
||||
}
|
||||
|
||||
type glogAdapter struct{}
|
||||
|
||||
var _ kmsgparser.Logger = glogAdapter{}
|
||||
|
||||
func (glogAdapter) Infof(format string, args ...interface{}) {
|
||||
klog.V(4).Infof(format, args...)
|
||||
}
|
||||
func (glogAdapter) Warningf(format string, args ...interface{}) {
|
||||
klog.V(2).Infof(format, args...)
|
||||
}
|
||||
func (glogAdapter) Errorf(format string, args ...interface{}) {
|
||||
klog.Warningf(format, args...)
|
||||
}
|
24
e2e/vendor/github.com/google/cadvisor/utils/path.go
generated
vendored
Normal file
24
e2e/vendor/github.com/google/cadvisor/utils/path.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import "os"
|
||||
|
||||
func FileExists(file string) bool {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
603
e2e/vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
Normal file
603
e2e/vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
Normal file
@ -0,0 +1,603 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
blockDir = "/sys/block"
|
||||
cacheDir = "/sys/devices/system/cpu/cpu"
|
||||
netDir = "/sys/class/net"
|
||||
dmiDir = "/sys/class/dmi"
|
||||
ppcDevTree = "/proc/device-tree"
|
||||
s390xDevTree = "/etc" // s390/s390x changes
|
||||
|
||||
meminfoFile = "meminfo"
|
||||
|
||||
distanceFile = "distance"
|
||||
|
||||
sysFsCPUTopology = "topology"
|
||||
|
||||
// CPUPhysicalPackageID is a physical package id of cpu#. Typically corresponds to a physical socket number,
|
||||
// but the actual value is architecture and platform dependent.
|
||||
CPUPhysicalPackageID = "physical_package_id"
|
||||
// CPUCoreID is the CPU core ID of cpu#. Typically it is the hardware platform's identifier
|
||||
// (rather than the kernel's). The actual value is architecture and platform dependent.
|
||||
CPUCoreID = "core_id"
|
||||
|
||||
coreIDFilePath = "/" + sysFsCPUTopology + "/core_id"
|
||||
packageIDFilePath = "/" + sysFsCPUTopology + "/physical_package_id"
|
||||
bookIDFilePath = "/" + sysFsCPUTopology + "/book_id"
|
||||
drawerIDFilePath = "/" + sysFsCPUTopology + "/drawer_id"
|
||||
|
||||
// memory size calculations
|
||||
|
||||
cpuDirPattern = "cpu*[0-9]"
|
||||
nodeDirPattern = "node*[0-9]"
|
||||
|
||||
//HugePagesNrFile name of nr_hugepages file in sysfs
|
||||
HugePagesNrFile = "nr_hugepages"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeDir = "/sys/devices/system/node/"
|
||||
)
|
||||
|
||||
type CacheInfo struct {
|
||||
// cache id
|
||||
Id int
|
||||
// size in bytes
|
||||
Size uint64
|
||||
// cache type - instruction, data, unified
|
||||
Type string
|
||||
// distance from cpus in a multi-level hierarchy
|
||||
Level int
|
||||
// number of cpus that can access this cache.
|
||||
Cpus int
|
||||
}
|
||||
|
||||
// Abstracts the lowest level calls to sysfs.
|
||||
type SysFs interface {
|
||||
// Get NUMA nodes paths
|
||||
GetNodesPaths() ([]string, error)
|
||||
// Get paths to CPUs in provided directory e.g. /sys/devices/system/node/node0 or /sys/devices/system/cpu
|
||||
GetCPUsPaths(cpusPath string) ([]string, error)
|
||||
// Get physical core id for specified CPU
|
||||
GetCoreID(coreIDFilePath string) (string, error)
|
||||
// Get physical package id for specified CPU
|
||||
GetCPUPhysicalPackageID(cpuPath string) (string, error)
|
||||
// Get book id for specified CPU
|
||||
GetBookID(cpuPath string) (string, error)
|
||||
// Get drawer id for specified CPU
|
||||
GetDrawerID(cpuPath string) (string, error)
|
||||
// Get total memory for specified NUMA node
|
||||
GetMemInfo(nodeDir string) (string, error)
|
||||
// Get hugepages from specified directory
|
||||
GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error)
|
||||
// Get hugepage_nr from specified directory
|
||||
GetHugePagesNr(hugePagesDirectory string, hugePageName string) (string, error)
|
||||
// Get directory information for available block devices.
|
||||
GetBlockDevices() ([]os.FileInfo, error)
|
||||
// Get Size of a given block device.
|
||||
GetBlockDeviceSize(string) (string, error)
|
||||
// Get scheduler type for the block device.
|
||||
GetBlockDeviceScheduler(string) (string, error)
|
||||
// Get device major:minor number string.
|
||||
GetBlockDeviceNumbers(string) (string, error)
|
||||
// Is the device "hidden" (meaning will not have a device handle)
|
||||
// This is the case with native nvme multipathing.
|
||||
IsBlockDeviceHidden(string) (bool, error)
|
||||
|
||||
GetNetworkDevices() ([]os.FileInfo, error)
|
||||
GetNetworkAddress(string) (string, error)
|
||||
GetNetworkMtu(string) (string, error)
|
||||
GetNetworkSpeed(string) (string, error)
|
||||
GetNetworkStatValue(dev string, stat string) (uint64, error)
|
||||
|
||||
// Get directory information for available caches accessible to given cpu.
|
||||
GetCaches(id int) ([]os.FileInfo, error)
|
||||
// Get information for a cache accessible from the given cpu.
|
||||
GetCacheInfo(cpu int, cache string) (CacheInfo, error)
|
||||
|
||||
GetSystemUUID() (string, error)
|
||||
|
||||
// GetDistances returns distance array
|
||||
GetDistances(string) (string, error)
|
||||
|
||||
// IsCPUOnline determines if CPU status from kernel hotplug machanism standpoint.
|
||||
// See: https://www.kernel.org/doc/html/latest/core-api/cpu_hotplug.html
|
||||
IsCPUOnline(dir string) bool
|
||||
}
|
||||
|
||||
type realSysFs struct {
|
||||
cpuPath string
|
||||
}
|
||||
|
||||
func NewRealSysFs() SysFs {
|
||||
return &realSysFs{
|
||||
cpuPath: "/sys/devices/system/cpu",
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNodesPaths() ([]string, error) {
|
||||
pathPattern := fmt.Sprintf("%s%s", nodeDir, nodeDirPattern)
|
||||
return filepath.Glob(pathPattern)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCPUsPaths(cpusPath string) ([]string, error) {
|
||||
pathPattern := fmt.Sprintf("%s/%s", cpusPath, cpuDirPattern)
|
||||
return filepath.Glob(pathPattern)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCoreID(cpuPath string) (string, error) {
|
||||
coreIDFilePath := fmt.Sprintf("%s%s", cpuPath, coreIDFilePath)
|
||||
coreID, err := os.ReadFile(coreIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(coreID)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
|
||||
packageIDFilePath := fmt.Sprintf("%s%s", cpuPath, packageIDFilePath)
|
||||
packageID, err := os.ReadFile(packageIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(packageID)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBookID(cpuPath string) (string, error) {
|
||||
bookIDFilePath := fmt.Sprintf("%s%s", cpuPath, bookIDFilePath)
|
||||
bookID, err := os.ReadFile(bookIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(bookID)), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetDrawerID(cpuPath string) (string, error) {
|
||||
drawerIDFilePath := fmt.Sprintf("%s%s", cpuPath, drawerIDFilePath)
|
||||
drawerID, err := os.ReadFile(drawerIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(drawerID)), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetMemInfo(nodePath string) (string, error) {
|
||||
meminfoPath := fmt.Sprintf("%s/%s", nodePath, meminfoFile)
|
||||
meminfo, err := os.ReadFile(meminfoPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(meminfo)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetDistances(nodePath string) (string, error) {
|
||||
distancePath := fmt.Sprintf("%s/%s", nodePath, distanceFile)
|
||||
distance, err := os.ReadFile(distancePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(distance)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error) {
|
||||
dirs, err := os.ReadDir(hugePagesDirectory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toFileInfo(dirs)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
|
||||
hugePageFilePath := fmt.Sprintf("%s%s/%s", hugepagesDirectory, hugePageName, HugePagesNrFile)
|
||||
hugePageFile, err := os.ReadFile(hugePageFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(hugePageFile)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDevices() ([]os.FileInfo, error) {
|
||||
dirs, err := os.ReadDir(blockDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toFileInfo(dirs)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
|
||||
dev, err := os.ReadFile(path.Join(blockDir, name, "/dev"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(dev), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) IsBlockDeviceHidden(name string) (bool, error) {
|
||||
// See: https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-block
|
||||
// https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
|
||||
// - c8487d854ba5 ("lsblk: Ignore hidden devices")
|
||||
devHiddenPath := path.Join(blockDir, name, "/hidden")
|
||||
hidden, err := os.ReadFile(devHiddenPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// older OS may not have /hidden sysfs entry, so for sure
|
||||
// it is not a hidden device...
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read %s: %w", devHiddenPath, err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(hidden)) == "1", nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
|
||||
sched, err := os.ReadFile(path.Join(blockDir, name, "/queue/scheduler"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(sched), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDeviceSize(name string) (string, error) {
|
||||
size, err := os.ReadFile(path.Join(blockDir, name, "/size"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(size), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
|
||||
dirs, err := os.ReadDir(netDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := toFileInfo(dirs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Filter out non-directory & non-symlink files
|
||||
filtered := []os.FileInfo{}
|
||||
for _, f := range files {
|
||||
if f.Mode()|os.ModeSymlink != 0 {
|
||||
f, err = os.Stat(path.Join(netDir, f.Name()))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if f.IsDir() {
|
||||
filtered = append(filtered, f)
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkAddress(name string) (string, error) {
|
||||
address, err := os.ReadFile(path.Join(netDir, name, "/address"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(address), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkMtu(name string) (string, error) {
|
||||
mtu, err := os.ReadFile(path.Join(netDir, name, "/mtu"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(mtu), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkSpeed(name string) (string, error) {
|
||||
speed, err := os.ReadFile(path.Join(netDir, name, "/speed"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(speed), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, error) {
|
||||
statPath := path.Join(netDir, dev, "/statistics", stat)
|
||||
out, err := os.ReadFile(statPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read stat from %q for device %q", statPath, dev)
|
||||
}
|
||||
var s uint64
|
||||
n, err := fmt.Sscanf(string(out), "%d", &s)
|
||||
if err != nil || n != 1 {
|
||||
return 0, fmt.Errorf("could not parse value from %q for file %s", string(out), statPath)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCaches(id int) ([]os.FileInfo, error) {
|
||||
cpuPath := fmt.Sprintf("%s%d/cache", cacheDir, id)
|
||||
dir, err := os.ReadDir(cpuPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toFileInfo(dir)
|
||||
}
|
||||
|
||||
func toFileInfo(dirs []os.DirEntry) ([]os.FileInfo, error) {
|
||||
info := []os.FileInfo{}
|
||||
for _, dir := range dirs {
|
||||
fI, err := dir.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = append(info, fI)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func bitCount(i uint64) (count int) {
|
||||
for i != 0 {
|
||||
if i&1 == 1 {
|
||||
count++
|
||||
}
|
||||
i >>= 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCPUCount(cache string) (count int, err error) {
|
||||
out, err := os.ReadFile(path.Join(cache, "/shared_cpu_map"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
masks := strings.Split(string(out), ",")
|
||||
for _, mask := range masks {
|
||||
// convert hex string to uint64
|
||||
m, err := strconv.ParseUint(strings.TrimSpace(mask), 16, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse cpu map %q: %v", string(out), err)
|
||||
}
|
||||
count += bitCount(m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCacheInfo(cpu int, name string) (CacheInfo, error) {
|
||||
cachePath := fmt.Sprintf("%s%d/cache/%s", cacheDir, cpu, name)
|
||||
var id int
|
||||
if runtime.GOARCH != "s390x" {
|
||||
out, err := os.ReadFile(path.Join(cachePath, "/id"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
n, err := fmt.Sscanf(string(out), "%d", &id)
|
||||
if err != nil || n != 1 {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := os.ReadFile(path.Join(cachePath, "/size"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
var size uint64
|
||||
n, err := fmt.Sscanf(string(out), "%dK", &size)
|
||||
if err != nil || n != 1 {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
// convert to bytes
|
||||
size = size * 1024
|
||||
out, err = os.ReadFile(path.Join(cachePath, "/level"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
var level int
|
||||
n, err = fmt.Sscanf(string(out), "%d", &level)
|
||||
if err != nil || n != 1 {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
|
||||
out, err = os.ReadFile(path.Join(cachePath, "/type"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
cacheType := strings.TrimSpace(string(out))
|
||||
cpuCount, err := getCPUCount(cachePath)
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
return CacheInfo{
|
||||
Id: id,
|
||||
Size: size,
|
||||
Level: level,
|
||||
Type: cacheType,
|
||||
Cpus: cpuCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetSystemUUID() (string, error) {
|
||||
if id, err := os.ReadFile(path.Join(dmiDir, "id", "product_uuid")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
} else if id, err = os.ReadFile(path.Join(ppcDevTree, "system-id")); err == nil {
|
||||
return strings.TrimSpace(strings.TrimRight(string(id), "\000")), nil
|
||||
} else if id, err = os.ReadFile(path.Join(ppcDevTree, "vm,uuid")); err == nil {
|
||||
return strings.TrimSpace(strings.TrimRight(string(id), "\000")), nil
|
||||
} else if id, err = os.ReadFile(path.Join(s390xDevTree, "machine-id")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *realSysFs) IsCPUOnline(cpuPath string) bool {
|
||||
cpuOnlinePath, err := filepath.Abs(fs.cpuPath + "/online")
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get absolute path for %s", cpuPath)
|
||||
return false
|
||||
}
|
||||
|
||||
// Quick check to determine if file exists: if it does not then kernel CPU hotplug is disabled and all CPUs are online.
|
||||
_, err = os.Stat(cpuOnlinePath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to stat %s: %s", cpuOnlinePath, err)
|
||||
}
|
||||
|
||||
cpuID, err := getCPUID(cpuPath)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get CPU ID from path %s: %s", cpuPath, err)
|
||||
return false
|
||||
}
|
||||
|
||||
isOnline, err := isCPUOnline(cpuOnlinePath, cpuID)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get online CPUs list: %s", err)
|
||||
return false
|
||||
}
|
||||
return isOnline
|
||||
}
|
||||
|
||||
func getCPUID(dir string) (uint16, error) {
|
||||
regex := regexp.MustCompile("cpu([0-9]+)")
|
||||
matches := regex.FindStringSubmatch(dir)
|
||||
if len(matches) == 2 {
|
||||
id, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(id), nil
|
||||
}
|
||||
return 0, fmt.Errorf("can't get CPU ID from %s", dir)
|
||||
}
|
||||
|
||||
// isCPUOnline is copied from github.com/opencontainers/runc/libcontainer/cgroups/fs and modified to suite cAdvisor
|
||||
// needs as Apache 2.0 license allows.
|
||||
// It parses CPU list (such as: 0,3-5,10) into a struct that allows to determine quickly if CPU or particular ID is online.
|
||||
// see: https://github.com/opencontainers/runc/blob/ab27e12cebf148aa5d1ee3ad13d9fc7ae12bf0b6/libcontainer/cgroups/fs/cpuset.go#L45
|
||||
func isCPUOnline(path string, cpuID uint16) (bool, error) {
|
||||
fileContent, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(fileContent) == 0 {
|
||||
return false, fmt.Errorf("%s found to be empty", path)
|
||||
}
|
||||
|
||||
cpuList := strings.TrimSpace(string(fileContent))
|
||||
for _, s := range strings.Split(cpuList, ",") {
|
||||
splitted := strings.SplitN(s, "-", 3)
|
||||
switch len(splitted) {
|
||||
case 3:
|
||||
return false, fmt.Errorf("invalid values in %s", path)
|
||||
case 2:
|
||||
min, err := strconv.ParseUint(splitted[0], 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
max, err := strconv.ParseUint(splitted[1], 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if min > max {
|
||||
return false, fmt.Errorf("invalid values in %s", path)
|
||||
}
|
||||
// Return true, if the CPU under consideration is in the range of online CPUs.
|
||||
if cpuID >= uint16(min) && cpuID <= uint16(max) {
|
||||
return true, nil
|
||||
}
|
||||
case 1:
|
||||
value, err := strconv.ParseUint(s, 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if uint16(value) == cpuID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Looks for sysfs cpu path containing given CPU property, e.g. core_id or physical_package_id
|
||||
// and returns number of unique values of given property, exemplary usage: getting number of CPU physical cores
|
||||
func GetUniqueCPUPropertyCount(cpuAttributesPath string, propertyName string) int {
|
||||
absCPUAttributesPath, err := filepath.Abs(cpuAttributesPath)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot make %s absolute", cpuAttributesPath)
|
||||
return 0
|
||||
}
|
||||
pathPattern := absCPUAttributesPath + "/cpu*[0-9]"
|
||||
sysCPUPaths, err := filepath.Glob(pathPattern)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot find files matching pattern (pathPattern: %s), number of unique %s set to 0", pathPattern, propertyName)
|
||||
return 0
|
||||
}
|
||||
cpuOnlinePath, err := filepath.Abs(cpuAttributesPath + "/online")
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get absolute path for %s", cpuAttributesPath+"/../online")
|
||||
return 0
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get online CPUs list: %s", err)
|
||||
return 0
|
||||
}
|
||||
uniques := make(map[string]bool)
|
||||
for _, sysCPUPath := range sysCPUPaths {
|
||||
cpuID, err := getCPUID(sysCPUPath)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get CPU ID from path %s: %s", sysCPUPath, err)
|
||||
return 0
|
||||
}
|
||||
isOnline, err := isCPUOnline(cpuOnlinePath, cpuID)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
klog.V(1).Infof("Unable to determine CPU online state: %s", err)
|
||||
continue
|
||||
}
|
||||
if !isOnline && !os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
propertyPath := filepath.Join(sysCPUPath, sysFsCPUTopology, propertyName)
|
||||
propertyVal, err := os.ReadFile(propertyPath)
|
||||
if err != nil {
|
||||
klog.Warningf("Cannot open %s, assuming 0 for %s of CPU %d", propertyPath, propertyName, cpuID)
|
||||
propertyVal = []byte("0")
|
||||
}
|
||||
packagePath := filepath.Join(sysCPUPath, sysFsCPUTopology, CPUPhysicalPackageID)
|
||||
packageVal, err := os.ReadFile(packagePath)
|
||||
if err != nil {
|
||||
klog.Warningf("Cannot open %s, assuming 0 %s of CPU %d", packagePath, CPUPhysicalPackageID, cpuID)
|
||||
packageVal = []byte("0")
|
||||
|
||||
}
|
||||
uniques[fmt.Sprintf("%s_%s", bytes.TrimSpace(propertyVal), bytes.TrimSpace(packageVal))] = true
|
||||
}
|
||||
return len(uniques)
|
||||
}
|
20
e2e/vendor/github.com/google/cadvisor/utils/sysfs/sysfs_notx86.go
generated
vendored
Normal file
20
e2e/vendor/github.com/google/cadvisor/utils/sysfs/sysfs_notx86.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
//go:build !x86
|
||||
// +build !x86
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
var isX86 = false
|
20
e2e/vendor/github.com/google/cadvisor/utils/sysfs/sysfs_x86.go
generated
vendored
Normal file
20
e2e/vendor/github.com/google/cadvisor/utils/sysfs/sysfs_x86.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
//go:build x86
|
||||
// +build x86
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
var isX86 = true
|
614
e2e/vendor/github.com/google/cadvisor/utils/sysinfo/sysinfo.go
generated
vendored
Normal file
614
e2e/vendor/github.com/google/cadvisor/utils/sysinfo/sysinfo.go
generated
vendored
Normal file
@ -0,0 +1,614 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysinfo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
schedulerRegExp = regexp.MustCompile(`.*\[(.*)\].*`)
|
||||
nodeDirRegExp = regexp.MustCompile(`node/node(\d*)`)
|
||||
cpuDirRegExp = regexp.MustCompile(`/cpu(\d+)`)
|
||||
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
|
||||
|
||||
cpusPath = "/sys/devices/system/cpu"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheLevel2 = 2
|
||||
hugepagesDir = "hugepages/"
|
||||
)
|
||||
|
||||
// Get information about block devices present on the system.
|
||||
// Uses the passed in system interface to retrieve the low level OS information.
|
||||
func GetBlockDeviceInfo(sysfs sysfs.SysFs) (map[string]info.DiskInfo, error) {
|
||||
disks, err := sysfs.GetBlockDevices()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskMap := make(map[string]info.DiskInfo)
|
||||
for _, disk := range disks {
|
||||
name := disk.Name()
|
||||
// Ignore non-disk devices.
|
||||
// TODO(rjnagal): Maybe just match hd, sd, and dm prefixes.
|
||||
if strings.HasPrefix(name, "loop") || strings.HasPrefix(name, "ram") || strings.HasPrefix(name, "sr") {
|
||||
continue
|
||||
}
|
||||
// Ignore "hidden" devices (i.e. nvme path device sysfs entries).
|
||||
// These devices are in the form of /dev/nvme$Xc$Yn$Z and will
|
||||
// not have a device handle (i.e. "hidden")
|
||||
isHidden, err := sysfs.IsBlockDeviceHidden(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isHidden {
|
||||
continue
|
||||
}
|
||||
diskInfo := info.DiskInfo{
|
||||
Name: name,
|
||||
}
|
||||
dev, err := sysfs.GetBlockDeviceNumbers(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, err := fmt.Sscanf(dev, "%d:%d", &diskInfo.Major, &diskInfo.Minor)
|
||||
if err != nil || n != 2 {
|
||||
return nil, fmt.Errorf("could not parse device numbers from %s for device %s", dev, name)
|
||||
}
|
||||
out, err := sysfs.GetBlockDeviceSize(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Remove trailing newline before conversion.
|
||||
size, err := strconv.ParseUint(strings.TrimSpace(out), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// size is in 512 bytes blocks.
|
||||
diskInfo.Size = size * 512
|
||||
|
||||
diskInfo.Scheduler = "none"
|
||||
blkSched, err := sysfs.GetBlockDeviceScheduler(name)
|
||||
if err == nil {
|
||||
matches := schedulerRegExp.FindSubmatch([]byte(blkSched))
|
||||
if len(matches) >= 2 {
|
||||
diskInfo.Scheduler = string(matches[1])
|
||||
}
|
||||
}
|
||||
device := fmt.Sprintf("%d:%d", diskInfo.Major, diskInfo.Minor)
|
||||
diskMap[device] = diskInfo
|
||||
}
|
||||
return diskMap, nil
|
||||
}
|
||||
|
||||
// Get information about network devices present on the system.
|
||||
func GetNetworkDevices(sysfs sysfs.SysFs) ([]info.NetInfo, error) {
|
||||
devs, err := sysfs.GetNetworkDevices()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netDevices := []info.NetInfo{}
|
||||
for _, dev := range devs {
|
||||
name := dev.Name()
|
||||
// Ignore docker, loopback, and veth devices.
|
||||
ignoredDevices := []string{"lo", "veth", "docker", "nerdctl"}
|
||||
ignored := false
|
||||
for _, prefix := range ignoredDevices {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
ignored = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if ignored {
|
||||
continue
|
||||
}
|
||||
address, err := sysfs.GetNetworkAddress(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mtuStr, err := sysfs.GetNetworkMtu(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var mtu int64
|
||||
n, err := fmt.Sscanf(mtuStr, "%d", &mtu)
|
||||
if err != nil || n != 1 {
|
||||
return nil, fmt.Errorf("could not parse mtu from %s for device %s", mtuStr, name)
|
||||
}
|
||||
netInfo := info.NetInfo{
|
||||
Name: name,
|
||||
MacAddress: strings.TrimSpace(address),
|
||||
Mtu: mtu,
|
||||
}
|
||||
speed, err := sysfs.GetNetworkSpeed(name)
|
||||
// Some devices don't set speed.
|
||||
if err == nil {
|
||||
var s int64
|
||||
n, err := fmt.Sscanf(speed, "%d", &s)
|
||||
if err != nil || n != 1 {
|
||||
return nil, fmt.Errorf("could not parse speed from %s for device %s", speed, name)
|
||||
}
|
||||
netInfo.Speed = s
|
||||
}
|
||||
netDevices = append(netDevices, netInfo)
|
||||
}
|
||||
return netDevices, nil
|
||||
}
|
||||
|
||||
// GetHugePagesInfo returns information about pre-allocated huge pages
|
||||
// hugepagesDirectory should be top directory of hugepages
|
||||
// Such as: /sys/kernel/mm/hugepages/
|
||||
func GetHugePagesInfo(sysFs sysfs.SysFs, hugepagesDirectory string) ([]info.HugePagesInfo, error) {
|
||||
var hugePagesInfo []info.HugePagesInfo
|
||||
files, err := sysFs.GetHugePagesInfo(hugepagesDirectory)
|
||||
if err != nil {
|
||||
// treat as non-fatal since kernels and machine can be
|
||||
// configured to disable hugepage support
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
for _, st := range files {
|
||||
nameArray := strings.Split(st.Name(), "-")
|
||||
pageSizeArray := strings.Split(nameArray[1], "kB")
|
||||
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
|
||||
val, err := sysFs.GetHugePagesNr(hugepagesDirectory, st.Name())
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
var numPages uint64
|
||||
// we use sscanf as the file as a new-line that trips up ParseUint
|
||||
// it returns the number of tokens successfully parsed, so if
|
||||
// n != 1, it means we were unable to parse a number from the file
|
||||
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
||||
if err != nil || n != 1 {
|
||||
return hugePagesInfo, fmt.Errorf("could not parse file nr_hugepage for %s, contents %q", st.Name(), string(val))
|
||||
}
|
||||
|
||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
||||
NumPages: numPages,
|
||||
PageSize: pageSize,
|
||||
})
|
||||
}
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
// GetNodesInfo returns information about NUMA nodes and their topology
|
||||
func GetNodesInfo(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
nodes := []info.Node{}
|
||||
allLogicalCoresCount := 0
|
||||
|
||||
nodesDirs, err := sysFs.GetNodesPaths()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if len(nodesDirs) == 0 {
|
||||
klog.V(4).Info("Nodes topology is not available, providing CPU topology")
|
||||
return getCPUTopology(sysFs)
|
||||
}
|
||||
|
||||
for _, nodeDir := range nodesDirs {
|
||||
id, err := getMatchedInt(nodeDirRegExp, nodeDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node := info.Node{Id: id}
|
||||
|
||||
cpuDirs, err := sysFs.GetCPUsPaths(nodeDir)
|
||||
if len(cpuDirs) == 0 {
|
||||
klog.Warningf("Found node without any CPU, nodeDir: %s, number of cpuDirs %d, err: %v", nodeDir, len(cpuDirs), err)
|
||||
} else {
|
||||
cores, err := getCoresInfo(sysFs, cpuDirs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node.Cores = cores
|
||||
for _, core := range cores {
|
||||
allLogicalCoresCount += len(core.Threads)
|
||||
}
|
||||
}
|
||||
|
||||
// On some Linux platforms(such as Arm64 guest kernel), cache info may not exist.
|
||||
// So, we should ignore error here.
|
||||
err = addCacheInfo(sysFs, &node)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Found node without cache information, nodeDir: %s", nodeDir)
|
||||
}
|
||||
|
||||
node.Memory, err = getNodeMemInfo(sysFs, nodeDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
hugepagesDirectory := fmt.Sprintf("%s/%s", nodeDir, hugepagesDir)
|
||||
node.HugePages, err = GetHugePagesInfo(sysFs, hugepagesDirectory)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
node.Distances, err = getDistances(sysFs, nodeDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, allLogicalCoresCount, err
|
||||
}
|
||||
|
||||
func getCPUTopology(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
nodes := []info.Node{}
|
||||
|
||||
cpusPaths, err := sysFs.GetCPUsPaths(cpusPath)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
cpusCount := len(cpusPaths)
|
||||
|
||||
if cpusCount == 0 {
|
||||
err = fmt.Errorf("Any CPU is not available, cpusPath: %s", cpusPath)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
cpusByPhysicalPackageID, err := getCpusByPhysicalPackageID(sysFs, cpusPaths)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if len(cpusByPhysicalPackageID) == 0 {
|
||||
klog.Warningf("Cannot read any physical package id for any CPU")
|
||||
return nil, cpusCount, nil
|
||||
}
|
||||
|
||||
for physicalPackageID, cpus := range cpusByPhysicalPackageID {
|
||||
node := info.Node{Id: physicalPackageID}
|
||||
|
||||
cores, err := getCoresInfo(sysFs, cpus)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node.Cores = cores
|
||||
|
||||
// On some Linux platforms(such as Arm64 guest kernel), cache info may not exist.
|
||||
// So, we should ignore error here.
|
||||
err = addCacheInfo(sysFs, &node)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Found cpu without cache information, cpuPath: %s", cpus)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, cpusCount, nil
|
||||
}
|
||||
|
||||
func getCpusByPhysicalPackageID(sysFs sysfs.SysFs, cpusPaths []string) (map[int][]string, error) {
|
||||
cpuPathsByPhysicalPackageID := make(map[int][]string)
|
||||
for _, cpuPath := range cpusPaths {
|
||||
|
||||
rawPhysicalPackageID, err := sysFs.GetCPUPhysicalPackageID(cpuPath)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read physical package id for %s, physical_package_id file does not exist, err: %s", cpuPath, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
physicalPackageID, err := strconv.Atoi(rawPhysicalPackageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := cpuPathsByPhysicalPackageID[physicalPackageID]; !ok {
|
||||
cpuPathsByPhysicalPackageID[physicalPackageID] = make([]string, 0)
|
||||
}
|
||||
|
||||
cpuPathsByPhysicalPackageID[physicalPackageID] = append(cpuPathsByPhysicalPackageID[physicalPackageID], cpuPath)
|
||||
}
|
||||
return cpuPathsByPhysicalPackageID, nil
|
||||
}
|
||||
|
||||
// addCacheInfo adds information about cache for NUMA node
|
||||
func addCacheInfo(sysFs sysfs.SysFs, node *info.Node) error {
|
||||
for coreID, core := range node.Cores {
|
||||
threadID := core.Threads[0] //get any thread for core
|
||||
caches, err := GetCacheInfo(sysFs, threadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numThreadsPerCore := len(core.Threads)
|
||||
numThreadsPerNode := len(node.Cores) * numThreadsPerCore
|
||||
|
||||
for _, cache := range caches {
|
||||
c := info.Cache{
|
||||
Id: cache.Id,
|
||||
Size: cache.Size,
|
||||
Level: cache.Level,
|
||||
Type: cache.Type,
|
||||
}
|
||||
if cache.Level > cacheLevel2 {
|
||||
if cache.Cpus == numThreadsPerNode {
|
||||
// Add a node level cache.
|
||||
cacheFound := false
|
||||
for _, nodeCache := range node.Caches {
|
||||
if nodeCache == c {
|
||||
cacheFound = true
|
||||
}
|
||||
}
|
||||
if !cacheFound {
|
||||
node.Caches = append(node.Caches, c)
|
||||
}
|
||||
} else {
|
||||
// Add uncore cache, for architecture in which l3 cache only shared among some cores.
|
||||
uncoreCacheFound := false
|
||||
for _, uncoreCache := range node.Cores[coreID].UncoreCaches {
|
||||
if uncoreCache == c {
|
||||
uncoreCacheFound = true
|
||||
}
|
||||
}
|
||||
if !uncoreCacheFound {
|
||||
node.Cores[coreID].UncoreCaches = append(node.Cores[coreID].UncoreCaches, c)
|
||||
}
|
||||
}
|
||||
} else if cache.Cpus == numThreadsPerCore {
|
||||
// Add core level cache
|
||||
node.Cores[coreID].Caches = append(node.Cores[coreID].Caches, c)
|
||||
}
|
||||
// Ignore unknown caches.
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNodeMemInfo returns information about total memory for NUMA node
|
||||
func getNodeMemInfo(sysFs sysfs.SysFs, nodeDir string) (uint64, error) {
|
||||
rawMem, err := sysFs.GetMemInfo(nodeDir)
|
||||
if err != nil {
|
||||
//Ignore if per-node info is not available.
|
||||
klog.Warningf("Found node without memory information, nodeDir: %s", nodeDir)
|
||||
return 0, nil
|
||||
}
|
||||
matches := memoryCapacityRegexp.FindStringSubmatch(rawMem)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("failed to match regexp in output: %q", string(rawMem))
|
||||
}
|
||||
memory, err := strconv.ParseUint(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
memory = memory * 1024 // Convert to bytes
|
||||
return uint64(memory), nil
|
||||
}
|
||||
|
||||
// getDistances returns information about distances between NUMA nodes
|
||||
func getDistances(sysFs sysfs.SysFs, nodeDir string) ([]uint64, error) {
|
||||
rawDistance, err := sysFs.GetDistances(nodeDir)
|
||||
if err != nil {
|
||||
//Ignore if per-node info is not available.
|
||||
klog.Warningf("Found node without distance information, nodeDir: %s", nodeDir)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
distances := []uint64{}
|
||||
for _, distance := range strings.Split(rawDistance, " ") {
|
||||
distanceUint, err := strconv.ParseUint(distance, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot convert %s to int", distance)
|
||||
}
|
||||
distances = append(distances, distanceUint)
|
||||
}
|
||||
|
||||
return distances, nil
|
||||
}
|
||||
|
||||
// getCoresInfo returns information about physical cores
|
||||
func getCoresInfo(sysFs sysfs.SysFs, cpuDirs []string) ([]info.Core, error) {
|
||||
cores := make([]info.Core, 0, len(cpuDirs))
|
||||
for _, cpuDir := range cpuDirs {
|
||||
cpuID, err := getMatchedInt(cpuDirRegExp, cpuDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected format of CPU directory, cpuDirRegExp %s, cpuDir: %s", cpuDirRegExp, cpuDir)
|
||||
}
|
||||
if !sysFs.IsCPUOnline(cpuDir) {
|
||||
continue
|
||||
}
|
||||
|
||||
rawPhysicalID, err := sysFs.GetCoreID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read core id for %s, core_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
physicalID, err := strconv.Atoi(rawPhysicalID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawPhysicalPackageID, err := sysFs.GetCPUPhysicalPackageID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read physical package id for %s, physical_package_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
physicalPackageID, err := strconv.Atoi(rawPhysicalPackageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bookID, drawerID string
|
||||
// s390/s390x additional cpu topology levels
|
||||
if runtime.GOARCH == "s390x" {
|
||||
bookID, err = sysFs.GetBookID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read book id for %s, book_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drawerID, err = sysFs.GetDrawerID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read drawer id for %s, drawer_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
coreIDx := -1
|
||||
for id, core := range cores {
|
||||
if core.Id == physicalID && core.SocketID == physicalPackageID {
|
||||
// For s390x, we need to check the BookID and DrawerID match as well.
|
||||
if runtime.GOARCH != "s390x" || (core.BookID == bookID && core.DrawerID == drawerID) {
|
||||
coreIDx = id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if coreIDx == -1 {
|
||||
cores = append(cores, info.Core{})
|
||||
coreIDx = len(cores) - 1
|
||||
}
|
||||
desiredCore := &cores[coreIDx]
|
||||
|
||||
desiredCore.Id = physicalID
|
||||
desiredCore.SocketID = physicalPackageID
|
||||
desiredCore.BookID = bookID
|
||||
desiredCore.DrawerID = drawerID
|
||||
|
||||
if len(desiredCore.Threads) == 0 {
|
||||
desiredCore.Threads = []int{cpuID}
|
||||
} else {
|
||||
desiredCore.Threads = append(desiredCore.Threads, cpuID)
|
||||
}
|
||||
|
||||
}
|
||||
return cores, nil
|
||||
}
|
||||
|
||||
// GetCacheInfo return information about a cache accessible from the given cpu thread
|
||||
func GetCacheInfo(sysFs sysfs.SysFs, id int) ([]sysfs.CacheInfo, error) {
|
||||
caches, err := sysFs.GetCaches(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := []sysfs.CacheInfo{}
|
||||
for _, cache := range caches {
|
||||
if !strings.HasPrefix(cache.Name(), "index") {
|
||||
continue
|
||||
}
|
||||
cacheInfo, err := sysFs.GetCacheInfo(id, cache.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = append(info, cacheInfo)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func getNetworkStats(name string, sysFs sysfs.SysFs) (info.InterfaceStats, error) {
|
||||
var stats info.InterfaceStats
|
||||
var err error
|
||||
stats.Name = name
|
||||
stats.RxBytes, err = sysFs.GetNetworkStatValue(name, "rx_bytes")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.RxPackets, err = sysFs.GetNetworkStatValue(name, "rx_packets")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.RxErrors, err = sysFs.GetNetworkStatValue(name, "rx_errors")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.RxDropped, err = sysFs.GetNetworkStatValue(name, "rx_dropped")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxBytes, err = sysFs.GetNetworkStatValue(name, "tx_bytes")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxPackets, err = sysFs.GetNetworkStatValue(name, "tx_packets")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxErrors, err = sysFs.GetNetworkStatValue(name, "tx_errors")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxDropped, err = sysFs.GetNetworkStatValue(name, "tx_dropped")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetSystemUUID(sysFs sysfs.SysFs) (string, error) {
|
||||
return sysFs.GetSystemUUID()
|
||||
}
|
||||
|
||||
func getMatchedInt(rgx *regexp.Regexp, str string) (int, error) {
|
||||
matches := rgx.FindStringSubmatch(str)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("failed to match regexp, str: %s", str)
|
||||
}
|
||||
valInt, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return valInt, nil
|
||||
}
|
||||
|
||||
// GetSocketFromCPU returns Socket ID of passed CPU. If is not present, returns -1.
|
||||
func GetSocketFromCPU(topology []info.Node, cpu int) int {
|
||||
for _, node := range topology {
|
||||
found, coreID := node.FindCoreByThread(cpu)
|
||||
if found {
|
||||
return node.Cores[coreID].SocketID
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// GetOnlineCPUs returns available cores.
|
||||
func GetOnlineCPUs(topology []info.Node) []int {
|
||||
onlineCPUs := make([]int, 0)
|
||||
for _, node := range topology {
|
||||
for _, core := range node.Cores {
|
||||
onlineCPUs = append(onlineCPUs, core.Threads...)
|
||||
}
|
||||
}
|
||||
return onlineCPUs
|
||||
}
|
164
e2e/vendor/github.com/google/cadvisor/utils/timed_store.go
generated
vendored
Normal file
164
e2e/vendor/github.com/google/cadvisor/utils/timed_store.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timedStoreDataSlice []timedStoreData
|
||||
|
||||
func (t timedStoreDataSlice) Less(i, j int) bool {
|
||||
return t[i].timestamp.Before(t[j].timestamp)
|
||||
}
|
||||
|
||||
func (t timedStoreDataSlice) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
|
||||
func (t timedStoreDataSlice) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
|
||||
// A time-based buffer for ContainerStats.
|
||||
// Holds information for a specific time period and/or a max number of items.
|
||||
type TimedStore struct {
|
||||
buffer timedStoreDataSlice
|
||||
age time.Duration
|
||||
maxItems int
|
||||
}
|
||||
|
||||
type timedStoreData struct {
|
||||
timestamp time.Time
|
||||
data interface{}
|
||||
}
|
||||
|
||||
// Returns a new thread-compatible TimedStore.
|
||||
// A maxItems value of -1 means no limit.
|
||||
func NewTimedStore(age time.Duration, maxItems int) *TimedStore {
|
||||
return &TimedStore{
|
||||
buffer: make(timedStoreDataSlice, 0),
|
||||
age: age,
|
||||
maxItems: maxItems,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an element to the start of the buffer (removing one from the end if necessary).
|
||||
func (s *TimedStore) Add(timestamp time.Time, item interface{}) {
|
||||
data := timedStoreData{
|
||||
timestamp: timestamp,
|
||||
data: item,
|
||||
}
|
||||
// Common case: data is added in order.
|
||||
if len(s.buffer) == 0 || !timestamp.Before(s.buffer[len(s.buffer)-1].timestamp) {
|
||||
s.buffer = append(s.buffer, data)
|
||||
} else {
|
||||
// Data is out of order; insert it in the correct position.
|
||||
index := sort.Search(len(s.buffer), func(index int) bool {
|
||||
return s.buffer[index].timestamp.After(timestamp)
|
||||
})
|
||||
s.buffer = append(s.buffer, timedStoreData{}) // Make room to shift the elements
|
||||
copy(s.buffer[index+1:], s.buffer[index:]) // Shift the elements over
|
||||
s.buffer[index] = data
|
||||
}
|
||||
|
||||
// Remove any elements before eviction time.
|
||||
// TODO(rjnagal): This is assuming that the added entry has timestamp close to now.
|
||||
evictTime := timestamp.Add(-s.age)
|
||||
index := sort.Search(len(s.buffer), func(index int) bool {
|
||||
return s.buffer[index].timestamp.After(evictTime)
|
||||
})
|
||||
if index < len(s.buffer) {
|
||||
s.buffer = s.buffer[index:]
|
||||
}
|
||||
|
||||
// Remove any elements if over our max size.
|
||||
if s.maxItems >= 0 && len(s.buffer) > s.maxItems {
|
||||
startIndex := len(s.buffer) - s.maxItems
|
||||
s.buffer = s.buffer[startIndex:]
|
||||
}
|
||||
}
|
||||
|
||||
// Returns up to maxResult elements in the specified time period (inclusive).
|
||||
// Results are from first to last. maxResults of -1 means no limit.
|
||||
func (s *TimedStore) InTimeRange(start, end time.Time, maxResults int) []interface{} {
|
||||
// No stats, return empty.
|
||||
if len(s.buffer) == 0 {
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
var startIndex int
|
||||
if start.IsZero() {
|
||||
// None specified, start at the beginning.
|
||||
startIndex = len(s.buffer) - 1
|
||||
} else {
|
||||
// Start is the index before the elements smaller than it. We do this by
|
||||
// finding the first element smaller than start and taking the index
|
||||
// before that element
|
||||
startIndex = sort.Search(len(s.buffer), func(index int) bool {
|
||||
// buffer[index] < start
|
||||
return s.getData(index).timestamp.Before(start)
|
||||
}) - 1
|
||||
// Check if start is after all the data we have.
|
||||
if startIndex < 0 {
|
||||
return []interface{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var endIndex int
|
||||
if end.IsZero() {
|
||||
// None specified, end with the latest stats.
|
||||
endIndex = 0
|
||||
} else {
|
||||
// End is the first index smaller than or equal to it (so, not larger).
|
||||
endIndex = sort.Search(len(s.buffer), func(index int) bool {
|
||||
// buffer[index] <= t -> !(buffer[index] > t)
|
||||
return !s.getData(index).timestamp.After(end)
|
||||
})
|
||||
// Check if end is before all the data we have.
|
||||
if endIndex == len(s.buffer) {
|
||||
return []interface{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Trim to maxResults size.
|
||||
numResults := startIndex - endIndex + 1
|
||||
if maxResults != -1 && numResults > maxResults {
|
||||
startIndex -= numResults - maxResults
|
||||
numResults = maxResults
|
||||
}
|
||||
|
||||
// Return in sorted timestamp order so from the "back" to "front".
|
||||
result := make([]interface{}, numResults)
|
||||
for i := 0; i < numResults; i++ {
|
||||
result[i] = s.Get(startIndex - i)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Gets the element at the specified index. Note that elements are output in LIFO order.
|
||||
func (s *TimedStore) Get(index int) interface{} {
|
||||
return s.getData(index).data
|
||||
}
|
||||
|
||||
// Gets the data at the specified index. Note that elements are output in LIFO order.
|
||||
func (s *TimedStore) getData(index int) timedStoreData {
|
||||
return s.buffer[len(s.buffer)-index-1]
|
||||
}
|
||||
|
||||
func (s *TimedStore) Size() int {
|
||||
return len(s.buffer)
|
||||
}
|
29
e2e/vendor/github.com/google/cadvisor/utils/utils.go
generated
vendored
Normal file
29
e2e/vendor/github.com/google/cadvisor/utils/utils.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Returns a mask of all cores on the machine if the passed-in mask is empty.
|
||||
func FixCpuMask(mask string, cores int) string {
|
||||
if mask == "" {
|
||||
if cores > 1 {
|
||||
mask = fmt.Sprintf("0-%d", cores-1)
|
||||
} else {
|
||||
mask = "0"
|
||||
}
|
||||
}
|
||||
return mask
|
||||
}
|
Reference in New Issue
Block a user