mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
Normal file
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
language: go
|
202
vendor/github.com/google/btree/LICENSE
generated
vendored
Normal file
202
vendor/github.com/google/btree/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
12
vendor/github.com/google/btree/README.md
generated
vendored
Normal file
12
vendor/github.com/google/btree/README.md
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
# BTree implementation for Go
|
||||
|
||||

|
||||
|
||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
||||
an ordered, mutable data structure.
|
||||
|
||||
The API is based off of the wonderful
|
||||
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
|
||||
act as a drop-in replacement for gollrb trees.
|
||||
|
||||
See http://godoc.org/github.com/google/btree for documentation.
|
890
vendor/github.com/google/btree/btree.go
generated
vendored
Normal file
890
vendor/github.com/google/btree/btree.go
generated
vendored
Normal file
@ -0,0 +1,890 @@
|
||||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
//
|
||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
// It is not meant for persistent storage solutions.
|
||||
//
|
||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
||||
// which in some cases yields better memory usage and/or performance.
|
||||
// See some discussion on the matter here:
|
||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
||||
// implementation written about there.
|
||||
//
|
||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
||||
// slice of children. For basic numeric values or raw structs, this can cause
|
||||
// efficiency differences when compared to equivalent C++ template code that
|
||||
// stores values in arrays within the node:
|
||||
// * Due to the overhead of storing values as interfaces (each
|
||||
// value needs to be stored as the value itself, then 2 words for the
|
||||
// interface pointing to that value and its type), resulting in higher
|
||||
// memory use.
|
||||
// * Since interfaces can point to values anywhere in memory, values are
|
||||
// most likely not stored in contiguous blocks, resulting in a higher
|
||||
// number of cache misses.
|
||||
// These issues don't tend to matter, though, when working with strings or other
|
||||
// heap-allocated structures, since C++-equivalent structures also must store
|
||||
// pointers and also distribute their values across the heap.
|
||||
//
|
||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
||||
// Its functions, therefore, exactly mirror those of
|
||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
||||
// support storing multiple equivalent values.
|
||||
package btree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Item represents a single object in the tree.
|
||||
type Item interface {
|
||||
// Less tests whether the current item is less than the given argument.
|
||||
//
|
||||
// This must provide a strict weak ordering.
|
||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
||||
// hold one of either a or b in the tree).
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultFreeListSize = 32
|
||||
)
|
||||
|
||||
var (
|
||||
nilItems = make(items, 16)
|
||||
nilChildren = make(children, 16)
|
||||
)
|
||||
|
||||
// FreeList represents a free list of btree nodes. By default each
|
||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
||||
// FreeList.
|
||||
// Two Btrees using the same freelist are safe for concurrent write access.
|
||||
type FreeList struct {
|
||||
mu sync.Mutex
|
||||
freelist []*node
|
||||
}
|
||||
|
||||
// NewFreeList creates a new free list.
|
||||
// size is the maximum size of the returned free list.
|
||||
func NewFreeList(size int) *FreeList {
|
||||
return &FreeList{freelist: make([]*node, 0, size)}
|
||||
}
|
||||
|
||||
func (f *FreeList) newNode() (n *node) {
|
||||
f.mu.Lock()
|
||||
index := len(f.freelist) - 1
|
||||
if index < 0 {
|
||||
f.mu.Unlock()
|
||||
return new(node)
|
||||
}
|
||||
n = f.freelist[index]
|
||||
f.freelist[index] = nil
|
||||
f.freelist = f.freelist[:index]
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// freeNode adds the given node to the list, returning true if it was added
|
||||
// and false if it was discarded.
|
||||
func (f *FreeList) freeNode(n *node) (out bool) {
|
||||
f.mu.Lock()
|
||||
if len(f.freelist) < cap(f.freelist) {
|
||||
f.freelist = append(f.freelist, n)
|
||||
out = true
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
||||
// the tree. When this function returns false, iteration will stop and the
|
||||
// associated Ascend* function will immediately return.
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
// New creates a new B-Tree with the given degree.
|
||||
//
|
||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
||||
// and 2-4 children).
|
||||
func New(degree int) *BTree {
|
||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
||||
}
|
||||
|
||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
||||
if degree <= 1 {
|
||||
panic("bad degree")
|
||||
}
|
||||
return &BTree{
|
||||
degree: degree,
|
||||
cow: ©OnWriteContext{freelist: f},
|
||||
}
|
||||
}
|
||||
|
||||
// items stores items in a node.
|
||||
type items []Item
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *items) insertAt(index int, item Item) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = item
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *items) removeAt(index int) Item {
|
||||
item := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *items) pop() (out Item) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index items. index must be less than or equal to length.
|
||||
func (s *items) truncate(index int) {
|
||||
var toClear items
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilItems):]
|
||||
}
|
||||
}
|
||||
|
||||
// find returns the index where the given item should be inserted into this
|
||||
// list. 'found' is true if the item already exists in the list at the given
|
||||
// index.
|
||||
func (s items) find(item Item) (index int, found bool) {
|
||||
i := sort.Search(len(s), func(i int) bool {
|
||||
return item.Less(s[i])
|
||||
})
|
||||
if i > 0 && !s[i-1].Less(item) {
|
||||
return i - 1, true
|
||||
}
|
||||
return i, false
|
||||
}
|
||||
|
||||
// children stores child nodes in a node.
|
||||
type children []*node
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *children) insertAt(index int, n *node) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = n
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *children) removeAt(index int) *node {
|
||||
n := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return n
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *children) pop() (out *node) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index children. index must be less than or equal to length.
|
||||
func (s *children) truncate(index int) {
|
||||
var toClear children
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilChildren):]
|
||||
}
|
||||
}
|
||||
|
||||
// node is an internal node in a tree.
|
||||
//
|
||||
// It must at all times maintain the invariant that either
|
||||
// * len(children) == 0, len(items) unconstrained
|
||||
// * len(children) == len(items) + 1
|
||||
type node struct {
|
||||
items items
|
||||
children children
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
func (n *node) mutableFor(cow *copyOnWriteContext) *node {
|
||||
if n.cow == cow {
|
||||
return n
|
||||
}
|
||||
out := cow.newNode()
|
||||
if cap(out.items) >= len(n.items) {
|
||||
out.items = out.items[:len(n.items)]
|
||||
} else {
|
||||
out.items = make(items, len(n.items), cap(n.items))
|
||||
}
|
||||
copy(out.items, n.items)
|
||||
// Copy children
|
||||
if cap(out.children) >= len(n.children) {
|
||||
out.children = out.children[:len(n.children)]
|
||||
} else {
|
||||
out.children = make(children, len(n.children), cap(n.children))
|
||||
}
|
||||
copy(out.children, n.children)
|
||||
return out
|
||||
}
|
||||
|
||||
func (n *node) mutableChild(i int) *node {
|
||||
c := n.children[i].mutableFor(n.cow)
|
||||
n.children[i] = c
|
||||
return c
|
||||
}
|
||||
|
||||
// split splits the given node at the given index. The current node shrinks,
|
||||
// and this function returns the item that existed at that index and a new node
|
||||
// containing all items/children after it.
|
||||
func (n *node) split(i int) (Item, *node) {
|
||||
item := n.items[i]
|
||||
next := n.cow.newNode()
|
||||
next.items = append(next.items, n.items[i+1:]...)
|
||||
n.items.truncate(i)
|
||||
if len(n.children) > 0 {
|
||||
next.children = append(next.children, n.children[i+1:]...)
|
||||
n.children.truncate(i + 1)
|
||||
}
|
||||
return item, next
|
||||
}
|
||||
|
||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
||||
// Returns whether or not a split occurred.
|
||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
||||
if len(n.children[i].items) < maxItems {
|
||||
return false
|
||||
}
|
||||
first := n.mutableChild(i)
|
||||
item, second := first.split(maxItems / 2)
|
||||
n.items.insertAt(i, item)
|
||||
n.children.insertAt(i+1, second)
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts an item into the subtree rooted at this node, making sure
|
||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
||||
// be found/replaced by insert, it will be returned.
|
||||
func (n *node) insert(item Item, maxItems int) Item {
|
||||
i, found := n.items.find(item)
|
||||
if found {
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.items.insertAt(i, item)
|
||||
return nil
|
||||
}
|
||||
if n.maybeSplitChild(i, maxItems) {
|
||||
inTree := n.items[i]
|
||||
switch {
|
||||
case item.Less(inTree):
|
||||
// no change, we want first split node
|
||||
case inTree.Less(item):
|
||||
i++ // we want second split node
|
||||
default:
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
}
|
||||
return n.mutableChild(i).insert(item, maxItems)
|
||||
}
|
||||
|
||||
// get finds the given key in the subtree and returns it.
|
||||
func (n *node) get(key Item) Item {
|
||||
i, found := n.items.find(key)
|
||||
if found {
|
||||
return n.items[i]
|
||||
} else if len(n.children) > 0 {
|
||||
return n.children[i].get(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// min returns the first item in the subtree.
|
||||
func min(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[0]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[0]
|
||||
}
|
||||
|
||||
// max returns the last item in the subtree.
|
||||
func max(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[len(n.children)-1]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[len(n.items)-1]
|
||||
}
|
||||
|
||||
// toRemove details what item to remove in a node.remove call.
|
||||
type toRemove int
|
||||
|
||||
const (
|
||||
removeItem toRemove = iota // removes the given item
|
||||
removeMin // removes smallest item in the subtree
|
||||
removeMax // removes largest item in the subtree
|
||||
)
|
||||
|
||||
// remove removes an item from the subtree rooted at this node.
|
||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
||||
var i int
|
||||
var found bool
|
||||
switch typ {
|
||||
case removeMax:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.pop()
|
||||
}
|
||||
i = len(n.items)
|
||||
case removeMin:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.removeAt(0)
|
||||
}
|
||||
i = 0
|
||||
case removeItem:
|
||||
i, found = n.items.find(item)
|
||||
if len(n.children) == 0 {
|
||||
if found {
|
||||
return n.items.removeAt(i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
// If we get to here, we have children.
|
||||
if len(n.children[i].items) <= minItems {
|
||||
return n.growChildAndRemove(i, item, minItems, typ)
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// Either we had enough items to begin with, or we've done some
|
||||
// merging/stealing, because we've got enough now and we're ready to return
|
||||
// stuff.
|
||||
if found {
|
||||
// The item exists at index 'i', and the child we've selected can give us a
|
||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
||||
out := n.items[i]
|
||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
||||
// and set it into where we pulled the item from.
|
||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
||||
return out
|
||||
}
|
||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
||||
// node and that the child is big enough to remove from.
|
||||
return child.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
||||
// item from it while keeping it at minItems, then calls remove to actually
|
||||
// remove it.
|
||||
//
|
||||
// Most documentation says we have to do two sets of special casing:
|
||||
// 1) item is in this node
|
||||
// 2) item is in child
|
||||
// In both cases, we need to handle the two subcases:
|
||||
// A) node has enough values that it can spare one
|
||||
// B) node doesn't have enough values
|
||||
// For the latter, we have to check:
|
||||
// a) left sibling has node to spare
|
||||
// b) right sibling has node to spare
|
||||
// c) we must merge
|
||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
||||
// We then simply redo our remove call, and the second time (regardless of
|
||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
||||
// that we hit case A.
|
||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
||||
// Steal from left child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i - 1)
|
||||
stolenItem := stealFrom.items.pop()
|
||||
child.items.insertAt(0, n.items[i-1])
|
||||
n.items[i-1] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children.insertAt(0, stealFrom.children.pop())
|
||||
}
|
||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
||||
// steal from right child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i + 1)
|
||||
stolenItem := stealFrom.items.removeAt(0)
|
||||
child.items = append(child.items, n.items[i])
|
||||
n.items[i] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
||||
}
|
||||
} else {
|
||||
if i >= len(n.items) {
|
||||
i--
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// merge with right child
|
||||
mergeItem := n.items.removeAt(i)
|
||||
mergeChild := n.children.removeAt(i + 1)
|
||||
child.items = append(child.items, mergeItem)
|
||||
child.items = append(child.items, mergeChild.items...)
|
||||
child.children = append(child.children, mergeChild.children...)
|
||||
n.cow.freeNode(mergeChild)
|
||||
}
|
||||
return n.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
type direction int
|
||||
|
||||
const (
|
||||
descend = direction(-1)
|
||||
ascend = direction(+1)
|
||||
)
|
||||
|
||||
// iterate provides a simple method for iterating over elements in the tree.
|
||||
//
|
||||
// When ascending, the 'start' should be less than 'stop' and when descending,
|
||||
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
|
||||
// will force the iterator to include the first item when it equals 'start',
|
||||
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
|
||||
// "greaterThan" or "lessThan" queries.
|
||||
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
|
||||
var ok, found bool
|
||||
var index int
|
||||
switch dir {
|
||||
case ascend:
|
||||
if start != nil {
|
||||
index, _ = n.items.find(start)
|
||||
}
|
||||
for i := index; i < len(n.items); i++ {
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
|
||||
hit = true
|
||||
continue
|
||||
}
|
||||
hit = true
|
||||
if stop != nil && !n.items[i].Less(stop) {
|
||||
return hit, false
|
||||
}
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
case descend:
|
||||
if start != nil {
|
||||
index, found = n.items.find(start)
|
||||
if !found {
|
||||
index = index - 1
|
||||
}
|
||||
} else {
|
||||
index = len(n.items) - 1
|
||||
}
|
||||
for i := index; i >= 0; i-- {
|
||||
if start != nil && !n.items[i].Less(start) {
|
||||
if !includeStart || hit || start.Less(n.items[i]) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if stop != nil && !stop.Less(n.items[i]) {
|
||||
return hit, false // continue
|
||||
}
|
||||
hit = true
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return hit, true
|
||||
}
|
||||
|
||||
// Used for testing/debugging purposes.
|
||||
func (n *node) print(w io.Writer, level int) {
|
||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
||||
for _, c := range n.children {
|
||||
c.print(w, level+1)
|
||||
}
|
||||
}
|
||||
|
||||
// BTree is an implementation of a B-Tree.
|
||||
//
|
||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
||||
// removal, and iteration.
|
||||
//
|
||||
// Write operations are not safe for concurrent mutation by multiple
|
||||
// goroutines, but Read operations are.
|
||||
type BTree struct {
|
||||
degree int
|
||||
length int
|
||||
root *node
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
// copyOnWriteContext pointers determine node ownership... a tree with a write
|
||||
// context equivalent to a node's write context is allowed to modify that node.
|
||||
// A tree whose write context does not match a node's is not allowed to modify
|
||||
// it, and must create a new, writable copy (IE: it's a Clone).
|
||||
//
|
||||
// When doing any write operation, we maintain the invariant that the current
|
||||
// node's context is equal to the context of the tree that requested the write.
|
||||
// We do this by, before we descend into any node, creating a copy with the
|
||||
// correct context if the contexts don't match.
|
||||
//
|
||||
// Since the node we're currently visiting on any write has the requesting
|
||||
// tree's context, that node is modifiable in place. Children of that node may
|
||||
// not share context, but before we descend into them, we'll make a mutable
|
||||
// copy.
|
||||
type copyOnWriteContext struct {
|
||||
freelist *FreeList
|
||||
}
|
||||
|
||||
// Clone clones the btree, lazily. Clone should not be called concurrently,
|
||||
// but the original tree (t) and the new tree (t2) can be used concurrently
|
||||
// once the Clone call completes.
|
||||
//
|
||||
// The internal tree structure of b is marked read-only and shared between t and
|
||||
// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
|
||||
// whenever one of b's original nodes would have been modified. Read operations
|
||||
// should have no performance degredation. Write operations for both t and t2
|
||||
// will initially experience minor slow-downs caused by additional allocs and
|
||||
// copies due to the aforementioned copy-on-write logic, but should converge to
|
||||
// the original performance characteristics of the original tree.
|
||||
func (t *BTree) Clone() (t2 *BTree) {
|
||||
// Create two entirely new copy-on-write contexts.
|
||||
// This operation effectively creates three trees:
|
||||
// the original, shared nodes (old b.cow)
|
||||
// the new b.cow nodes
|
||||
// the new out.cow nodes
|
||||
cow1, cow2 := *t.cow, *t.cow
|
||||
out := *t
|
||||
t.cow = &cow1
|
||||
out.cow = &cow2
|
||||
return &out
|
||||
}
|
||||
|
||||
// maxItems returns the max number of items to allow per node.
|
||||
func (t *BTree) maxItems() int {
|
||||
return t.degree*2 - 1
|
||||
}
|
||||
|
||||
// minItems returns the min number of items to allow per node (ignored for the
|
||||
// root node).
|
||||
func (t *BTree) minItems() int {
|
||||
return t.degree - 1
|
||||
}
|
||||
|
||||
func (c *copyOnWriteContext) newNode() (n *node) {
|
||||
n = c.freelist.newNode()
|
||||
n.cow = c
|
||||
return
|
||||
}
|
||||
|
||||
type freeType int
|
||||
|
||||
const (
|
||||
ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
|
||||
ftStored // node was stored in the freelist for later use
|
||||
ftNotOwned // node was ignored by COW, since it's owned by another one
|
||||
)
|
||||
|
||||
// freeNode frees a node within a given COW context, if it's owned by that
|
||||
// context. It returns what happened to the node (see freeType const
|
||||
// documentation).
|
||||
func (c *copyOnWriteContext) freeNode(n *node) freeType {
|
||||
if n.cow == c {
|
||||
// clear to allow GC
|
||||
n.items.truncate(0)
|
||||
n.children.truncate(0)
|
||||
n.cow = nil
|
||||
if c.freelist.freeNode(n) {
|
||||
return ftStored
|
||||
} else {
|
||||
return ftFreelistFull
|
||||
}
|
||||
} else {
|
||||
return ftNotOwned
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
||||
// already equals the given one, it is removed from the tree and returned.
|
||||
// Otherwise, nil is returned.
|
||||
//
|
||||
// nil cannot be added to the tree (will panic).
|
||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("nil item being added to BTree")
|
||||
}
|
||||
if t.root == nil {
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item)
|
||||
t.length++
|
||||
return nil
|
||||
} else {
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
if len(t.root.items) >= t.maxItems() {
|
||||
item2, second := t.root.split(t.maxItems() / 2)
|
||||
oldroot := t.root
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item2)
|
||||
t.root.children = append(t.root.children, oldroot, second)
|
||||
}
|
||||
}
|
||||
out := t.root.insert(item, t.maxItems())
|
||||
if out == nil {
|
||||
t.length++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Delete removes an item equal to the passed in item from the tree, returning
|
||||
// it. If no such item exists, returns nil.
|
||||
func (t *BTree) Delete(item Item) Item {
|
||||
return t.deleteItem(item, removeItem)
|
||||
}
|
||||
|
||||
// DeleteMin removes the smallest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMin() Item {
|
||||
return t.deleteItem(nil, removeMin)
|
||||
}
|
||||
|
||||
// DeleteMax removes the largest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMax() Item {
|
||||
return t.deleteItem(nil, removeMax)
|
||||
}
|
||||
|
||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
||||
if t.root == nil || len(t.root.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
out := t.root.remove(item, t.minItems(), typ)
|
||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
||||
oldroot := t.root
|
||||
t.root = t.root.children[0]
|
||||
t.cow.freeNode(oldroot)
|
||||
}
|
||||
if out != nil {
|
||||
t.length--
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AscendRange calls the iterator for every value in the tree within the range
|
||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
||||
// [first, pivot), until iterator returns false.
|
||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
||||
// the range [pivot, last], until iterator returns false.
|
||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// Ascend calls the iterator for every value in the tree within the range
|
||||
// [first, last], until iterator returns false.
|
||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// DescendRange calls the iterator for every value in the tree within the range
|
||||
// [lessOrEqual, greaterThan), until iterator returns false.
|
||||
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendLessOrEqual calls the iterator for every value in the tree within the range
|
||||
// [pivot, first], until iterator returns false.
|
||||
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendGreaterThan calls the iterator for every value in the tree within
|
||||
// the range [last, pivot), until iterator returns false.
|
||||
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// Descend calls the iterator for every value in the tree within the range
|
||||
// [last, first], until iterator returns false.
|
||||
func (t *BTree) Descend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
||||
// unable to find that item.
|
||||
func (t *BTree) Get(key Item) Item {
|
||||
if t.root == nil {
|
||||
return nil
|
||||
}
|
||||
return t.root.get(key)
|
||||
}
|
||||
|
||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Min() Item {
|
||||
return min(t.root)
|
||||
}
|
||||
|
||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Max() Item {
|
||||
return max(t.root)
|
||||
}
|
||||
|
||||
// Has returns true if the given key is in the tree.
|
||||
func (t *BTree) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the tree.
|
||||
func (t *BTree) Len() int {
|
||||
return t.length
|
||||
}
|
||||
|
||||
// Clear removes all items from the btree. If addNodesToFreelist is true,
|
||||
// t's nodes are added to its freelist as part of this call, until the freelist
|
||||
// is full. Otherwise, the root node is simply dereferenced and the subtree
|
||||
// left to Go's normal GC processes.
|
||||
//
|
||||
// This can be much faster
|
||||
// than calling Delete on all elements, because that requires finding/removing
|
||||
// each element in the tree and updating the tree accordingly. It also is
|
||||
// somewhat faster than creating a new tree to replace the old one, because
|
||||
// nodes from the old tree are reclaimed into the freelist for use by the new
|
||||
// one, instead of being lost to the garbage collector.
|
||||
//
|
||||
// This call takes:
|
||||
// O(1): when addNodesToFreelist is false, this is a single operation.
|
||||
// O(1): when the freelist is already full, it breaks out immediately
|
||||
// O(freelist size): when the freelist is empty and the nodes are all owned
|
||||
// by this tree, nodes are added to the freelist until full.
|
||||
// O(tree size): when all nodes are owned by another tree, all nodes are
|
||||
// iterated over looking for nodes to add to the freelist, and due to
|
||||
// ownership, none are.
|
||||
func (t *BTree) Clear(addNodesToFreelist bool) {
|
||||
if t.root != nil && addNodesToFreelist {
|
||||
t.root.reset(t.cow)
|
||||
}
|
||||
t.root, t.length = nil, 0
|
||||
}
|
||||
|
||||
// reset returns a subtree to the freelist. It breaks out immediately if the
|
||||
// freelist is full, since the only benefit of iterating is to fill that
|
||||
// freelist up. Returns true if parent reset call should continue.
|
||||
func (n *node) reset(c *copyOnWriteContext) bool {
|
||||
for _, child := range n.children {
|
||||
if !child.reset(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return c.freeNode(n) != ftFreelistFull
|
||||
}
|
||||
|
||||
// Int implements the Item interface for integers.
|
||||
type Int int
|
||||
|
||||
// Less returns true if int(a) < int(b).
|
||||
func (a Int) Less(b Item) bool {
|
||||
return a < b.(Int)
|
||||
}
|
9
vendor/github.com/google/cadvisor/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/google/cadvisor/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# This is the official list of cAdvisor authors for copyright purposes.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Google Inc.
|
190
vendor/github.com/google/cadvisor/LICENSE
generated
vendored
Normal file
190
vendor/github.com/google/cadvisor/LICENSE
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
Copyright 2014 The cAdvisor Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
142
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
Normal file
142
vendor/github.com/google/cadvisor/cache/memory/memory.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/storage"
|
||||
"github.com/google/cadvisor/utils"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ErrDataNotFound is the error resulting if failed to find a container in memory cache.
|
||||
var ErrDataNotFound = errors.New("unable to find data in memory cache")
|
||||
|
||||
// TODO(vmarmol): See about refactoring this class, we have an unnecessary redirection of containerCache and InMemoryCache.
|
||||
// containerCache is used to store per-container information
|
||||
type containerCache struct {
|
||||
ref info.ContainerReference
|
||||
recentStats *utils.TimedStore
|
||||
maxAge time.Duration
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func (c *containerCache) AddStats(stats *info.ContainerStats) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Add the stat to storage.
|
||||
c.recentStats.Add(stats.Timestamp, stats)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *containerCache) RecentStats(start, end time.Time, maxStats int) ([]*info.ContainerStats, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
result := c.recentStats.InTimeRange(start, end, maxStats)
|
||||
converted := make([]*info.ContainerStats, len(result))
|
||||
for i, el := range result {
|
||||
converted[i] = el.(*info.ContainerStats)
|
||||
}
|
||||
return converted, nil
|
||||
}
|
||||
|
||||
func newContainerStore(ref info.ContainerReference, maxAge time.Duration) *containerCache {
|
||||
return &containerCache{
|
||||
ref: ref,
|
||||
recentStats: utils.NewTimedStore(maxAge, -1),
|
||||
maxAge: maxAge,
|
||||
}
|
||||
}
|
||||
|
||||
type InMemoryCache struct {
|
||||
lock sync.RWMutex
|
||||
containerCacheMap map[string]*containerCache
|
||||
maxAge time.Duration
|
||||
backend []storage.StorageDriver
|
||||
}
|
||||
|
||||
func (c *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error {
|
||||
var cstore *containerCache
|
||||
var ok bool
|
||||
|
||||
func() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if cstore, ok = c.containerCacheMap[cInfo.ContainerReference.Name]; !ok {
|
||||
cstore = newContainerStore(cInfo.ContainerReference, c.maxAge)
|
||||
c.containerCacheMap[cInfo.ContainerReference.Name] = cstore
|
||||
}
|
||||
}()
|
||||
|
||||
for _, backend := range c.backend {
|
||||
// TODO(monnand): To deal with long delay write operations, we
|
||||
// may want to start a pool of goroutines to do write
|
||||
// operations.
|
||||
if err := backend.AddStats(cInfo, stats); err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return cstore.AddStats(stats)
|
||||
}
|
||||
|
||||
func (c *InMemoryCache) RecentStats(name string, start, end time.Time, maxStats int) ([]*info.ContainerStats, error) {
|
||||
var cstore *containerCache
|
||||
var ok bool
|
||||
err := func() error {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if cstore, ok = c.containerCacheMap[name]; !ok {
|
||||
return ErrDataNotFound
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cstore.RecentStats(start, end, maxStats)
|
||||
}
|
||||
|
||||
func (c *InMemoryCache) Close() error {
|
||||
c.lock.Lock()
|
||||
c.containerCacheMap = make(map[string]*containerCache, 32)
|
||||
c.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *InMemoryCache) RemoveContainer(containerName string) error {
|
||||
c.lock.Lock()
|
||||
delete(c.containerCacheMap, containerName)
|
||||
c.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func New(
|
||||
maxAge time.Duration,
|
||||
backend []storage.StorageDriver,
|
||||
) *InMemoryCache {
|
||||
ret := &InMemoryCache{
|
||||
containerCacheMap: make(map[string]*containerCache, 32),
|
||||
maxAge: maxAge,
|
||||
backend: backend,
|
||||
}
|
||||
return ret
|
||||
}
|
109
vendor/github.com/google/cadvisor/collector/collector_manager.go
generated
vendored
Normal file
109
vendor/github.com/google/cadvisor/collector/collector_manager.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
const metricLabelPrefix = "io.cadvisor.metric."
|
||||
|
||||
type GenericCollectorManager struct {
|
||||
Collectors []*collectorData
|
||||
NextCollectionTime time.Time
|
||||
}
|
||||
|
||||
type collectorData struct {
|
||||
collector Collector
|
||||
nextCollectionTime time.Time
|
||||
}
|
||||
|
||||
// Returns a new CollectorManager that is thread-compatible.
|
||||
func NewCollectorManager() (CollectorManager, error) {
|
||||
return &GenericCollectorManager{
|
||||
Collectors: []*collectorData{},
|
||||
NextCollectionTime: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func GetCollectorConfigs(labels map[string]string) map[string]string {
|
||||
configs := map[string]string{}
|
||||
for k, v := range labels {
|
||||
if strings.HasPrefix(k, metricLabelPrefix) {
|
||||
name := strings.TrimPrefix(k, metricLabelPrefix)
|
||||
configs[name] = v
|
||||
}
|
||||
}
|
||||
return configs
|
||||
}
|
||||
|
||||
func (cm *GenericCollectorManager) RegisterCollector(collector Collector) error {
|
||||
cm.Collectors = append(cm.Collectors, &collectorData{
|
||||
collector: collector,
|
||||
nextCollectionTime: time.Now(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *GenericCollectorManager) GetSpec() ([]v1.MetricSpec, error) {
|
||||
metricSpec := []v1.MetricSpec{}
|
||||
for _, c := range cm.Collectors {
|
||||
specs := c.collector.GetSpec()
|
||||
metricSpec = append(metricSpec, specs...)
|
||||
}
|
||||
|
||||
return metricSpec, nil
|
||||
}
|
||||
|
||||
func (cm *GenericCollectorManager) Collect() (time.Time, map[string][]v1.MetricVal, error) {
|
||||
var errors []error
|
||||
|
||||
// Collect from all collectors that are ready.
|
||||
var next time.Time
|
||||
metrics := map[string][]v1.MetricVal{}
|
||||
for _, c := range cm.Collectors {
|
||||
if c.nextCollectionTime.Before(time.Now()) {
|
||||
var err error
|
||||
c.nextCollectionTime, metrics, err = c.collector.Collect(metrics)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep track of the next collector that will be ready.
|
||||
if next.IsZero() || next.After(c.nextCollectionTime) {
|
||||
next = c.nextCollectionTime
|
||||
}
|
||||
}
|
||||
cm.NextCollectionTime = next
|
||||
return next, metrics, compileErrors(errors)
|
||||
}
|
||||
|
||||
// Make an error slice into a single error.
|
||||
func compileErrors(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
res := make([]string, len(errors))
|
||||
for i := range errors {
|
||||
res[i] = fmt.Sprintf("Error %d: %v", i, errors[i].Error())
|
||||
}
|
||||
return fmt.Errorf("%s", strings.Join(res, ","))
|
||||
}
|
101
vendor/github.com/google/cadvisor/collector/config.go
generated
vendored
Normal file
101
vendor/github.com/google/cadvisor/collector/config.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// the endpoint to hit to scrape metrics
|
||||
Endpoint EndpointConfig `json:"endpoint"`
|
||||
|
||||
// holds information about different metrics that can be collected
|
||||
MetricsConfig []MetricConfig `json:"metrics_config"`
|
||||
}
|
||||
|
||||
// metricConfig holds information extracted from the config file about a metric
|
||||
type MetricConfig struct {
|
||||
// the name of the metric
|
||||
Name string `json:"name"`
|
||||
|
||||
// enum type for the metric type
|
||||
MetricType v1.MetricType `json:"metric_type"`
|
||||
|
||||
// metric units to display on UI and in storage (eg: MB, cores)
|
||||
// this is only used for display.
|
||||
Units string `json:"units"`
|
||||
|
||||
// data type of the metric (eg: int, float)
|
||||
DataType v1.DataType `json:"data_type"`
|
||||
|
||||
// the frequency at which the metric should be collected
|
||||
PollingFrequency time.Duration `json:"polling_frequency"`
|
||||
|
||||
// the regular expression that can be used to extract the metric
|
||||
Regex string `json:"regex"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
// the endpoint to hit to scrape metrics
|
||||
Endpoint EndpointConfig `json:"endpoint"`
|
||||
|
||||
// the frequency at which metrics should be collected
|
||||
PollingFrequency time.Duration `json:"polling_frequency"`
|
||||
|
||||
// holds names of different metrics that can be collected
|
||||
MetricsConfig []string `json:"metrics_config"`
|
||||
}
|
||||
|
||||
type EndpointConfig struct {
|
||||
// The full URL of the endpoint to reach
|
||||
URL string
|
||||
// A configuration in which an actual URL is constructed from, using the container's ip address
|
||||
URLConfig URLConfig
|
||||
}
|
||||
|
||||
type URLConfig struct {
|
||||
// the protocol to use for connecting to the endpoint. Eg 'http' or 'https'
|
||||
Protocol string `json:"protocol"`
|
||||
|
||||
// the port to use for connecting to the endpoint. Eg '8778'
|
||||
Port json.Number `json:"port"`
|
||||
|
||||
// the path to use for the endpoint. Eg '/metrics'
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
func (ec *EndpointConfig) UnmarshalJSON(b []byte) error {
|
||||
url := ""
|
||||
config := URLConfig{
|
||||
Protocol: "http",
|
||||
Port: "8000",
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &url); err == nil {
|
||||
ec.URL = url
|
||||
return nil
|
||||
}
|
||||
err := json.Unmarshal(b, &config)
|
||||
if err == nil {
|
||||
ec.URLConfig = config
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
37
vendor/github.com/google/cadvisor/collector/fakes.go
generated
vendored
Normal file
37
vendor/github.com/google/cadvisor/collector/fakes.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type FakeCollectorManager struct {
|
||||
}
|
||||
|
||||
func (fkm *FakeCollectorManager) RegisterCollector(collector Collector) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fkm *FakeCollectorManager) GetSpec() ([]v1.MetricSpec, error) {
|
||||
return []v1.MetricSpec{}, nil
|
||||
}
|
||||
|
||||
func (fkm *FakeCollectorManager) Collect(metric map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) {
|
||||
var zero time.Time
|
||||
return zero, metric, nil
|
||||
}
|
183
vendor/github.com/google/cadvisor/collector/generic_collector.go
generated
vendored
Normal file
183
vendor/github.com/google/cadvisor/collector/generic_collector.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type GenericCollector struct {
|
||||
// name of the collector
|
||||
name string
|
||||
|
||||
// holds information extracted from the config file for a collector
|
||||
configFile Config
|
||||
|
||||
// holds information necessary to extract metrics
|
||||
info *collectorInfo
|
||||
|
||||
// The Http client to use when connecting to metric endpoints
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
type collectorInfo struct {
|
||||
// minimum polling frequency among all metrics
|
||||
minPollingFrequency time.Duration
|
||||
|
||||
// regular expresssions for all metrics
|
||||
regexps []*regexp.Regexp
|
||||
|
||||
// Limit for the number of srcaped metrics. If the count is higher,
|
||||
// no metrics will be returned.
|
||||
metricCountLimit int
|
||||
}
|
||||
|
||||
// Returns a new collector using the information extracted from the configfile
|
||||
func NewCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*GenericCollector, error) {
|
||||
var configInJSON Config
|
||||
err := json.Unmarshal(configFile, &configInJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configInJSON.Endpoint.configure(containerHandler)
|
||||
|
||||
// TODO : Add checks for validity of config file (eg : Accurate JSON fields)
|
||||
|
||||
if len(configInJSON.MetricsConfig) == 0 {
|
||||
return nil, fmt.Errorf("No metrics provided in config")
|
||||
}
|
||||
|
||||
minPollFrequency := time.Duration(0)
|
||||
regexprs := make([]*regexp.Regexp, len(configInJSON.MetricsConfig))
|
||||
|
||||
for ind, metricConfig := range configInJSON.MetricsConfig {
|
||||
// Find the minimum specified polling frequency in metric config.
|
||||
if metricConfig.PollingFrequency != 0 {
|
||||
if minPollFrequency == 0 || metricConfig.PollingFrequency < minPollFrequency {
|
||||
minPollFrequency = metricConfig.PollingFrequency
|
||||
}
|
||||
}
|
||||
|
||||
regexprs[ind], err = regexp.Compile(metricConfig.Regex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid regexp %v for metric %v", metricConfig.Regex, metricConfig.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Minimum supported polling frequency is 1s.
|
||||
minSupportedFrequency := 1 * time.Second
|
||||
if minPollFrequency < minSupportedFrequency {
|
||||
minPollFrequency = minSupportedFrequency
|
||||
}
|
||||
|
||||
if len(configInJSON.MetricsConfig) > metricCountLimit {
|
||||
return nil, fmt.Errorf("Too many metrics defined: %d limit: %d", len(configInJSON.MetricsConfig), metricCountLimit)
|
||||
}
|
||||
|
||||
return &GenericCollector{
|
||||
name: collectorName,
|
||||
configFile: configInJSON,
|
||||
info: &collectorInfo{
|
||||
minPollingFrequency: minPollFrequency,
|
||||
regexps: regexprs,
|
||||
metricCountLimit: metricCountLimit,
|
||||
},
|
||||
httpClient: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Returns name of the collector
|
||||
func (collector *GenericCollector) Name() string {
|
||||
return collector.name
|
||||
}
|
||||
|
||||
func (collector *GenericCollector) configToSpec(config MetricConfig) v1.MetricSpec {
|
||||
return v1.MetricSpec{
|
||||
Name: config.Name,
|
||||
Type: config.MetricType,
|
||||
Format: config.DataType,
|
||||
Units: config.Units,
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *GenericCollector) GetSpec() []v1.MetricSpec {
|
||||
specs := []v1.MetricSpec{}
|
||||
for _, metricConfig := range collector.configFile.MetricsConfig {
|
||||
spec := collector.configToSpec(metricConfig)
|
||||
specs = append(specs, spec)
|
||||
}
|
||||
return specs
|
||||
}
|
||||
|
||||
// Returns collected metrics and the next collection time of the collector
|
||||
func (collector *GenericCollector) Collect(metrics map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) {
|
||||
currentTime := time.Now()
|
||||
nextCollectionTime := currentTime.Add(time.Duration(collector.info.minPollingFrequency))
|
||||
|
||||
uri := collector.configFile.Endpoint.URL
|
||||
response, err := collector.httpClient.Get(uri)
|
||||
if err != nil {
|
||||
return nextCollectionTime, nil, err
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
pageContent, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nextCollectionTime, nil, err
|
||||
}
|
||||
|
||||
var errorSlice []error
|
||||
|
||||
for ind, metricConfig := range collector.configFile.MetricsConfig {
|
||||
matchString := collector.info.regexps[ind].FindStringSubmatch(string(pageContent))
|
||||
if matchString != nil {
|
||||
if metricConfig.DataType == v1.FloatType {
|
||||
regVal, err := strconv.ParseFloat(strings.TrimSpace(matchString[1]), 64)
|
||||
if err != nil {
|
||||
errorSlice = append(errorSlice, err)
|
||||
}
|
||||
metrics[metricConfig.Name] = []v1.MetricVal{
|
||||
{FloatValue: regVal, Timestamp: currentTime},
|
||||
}
|
||||
} else if metricConfig.DataType == v1.IntType {
|
||||
regVal, err := strconv.ParseInt(strings.TrimSpace(matchString[1]), 10, 64)
|
||||
if err != nil {
|
||||
errorSlice = append(errorSlice, err)
|
||||
}
|
||||
metrics[metricConfig.Name] = []v1.MetricVal{
|
||||
{IntValue: regVal, Timestamp: currentTime},
|
||||
}
|
||||
|
||||
} else {
|
||||
errorSlice = append(errorSlice, fmt.Errorf("Unexpected value of 'data_type' for metric '%v' in config ", metricConfig.Name))
|
||||
}
|
||||
} else {
|
||||
errorSlice = append(errorSlice, fmt.Errorf("No match found for regexp: %v for metric '%v' in config", metricConfig.Regex, metricConfig.Name))
|
||||
}
|
||||
}
|
||||
return nextCollectionTime, metrics, compileErrors(errorSlice)
|
||||
}
|
286
vendor/github.com/google/cadvisor/collector/prometheus_collector.go
generated
vendored
Normal file
286
vendor/github.com/google/cadvisor/collector/prometheus_collector.go
generated
vendored
Normal file
@ -0,0 +1,286 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
rawmodel "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type PrometheusCollector struct {
|
||||
// name of the collector
|
||||
name string
|
||||
|
||||
// rate at which metrics are collected
|
||||
pollingFrequency time.Duration
|
||||
|
||||
// holds information extracted from the config file for a collector
|
||||
configFile Prometheus
|
||||
|
||||
// the metrics to gather (uses a map as a set)
|
||||
metricsSet map[string]bool
|
||||
|
||||
// Limit for the number of scaped metrics. If the count is higher,
|
||||
// no metrics will be returned.
|
||||
metricCountLimit int
|
||||
|
||||
// The Http client to use when connecting to metric endpoints
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// Returns a new collector using the information extracted from the configfile
|
||||
func NewPrometheusCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*PrometheusCollector, error) {
|
||||
var configInJSON Prometheus
|
||||
err := json.Unmarshal(configFile, &configInJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configInJSON.Endpoint.configure(containerHandler)
|
||||
|
||||
minPollingFrequency := configInJSON.PollingFrequency
|
||||
|
||||
// Minimum supported frequency is 1s
|
||||
minSupportedFrequency := 1 * time.Second
|
||||
|
||||
if minPollingFrequency < minSupportedFrequency {
|
||||
minPollingFrequency = minSupportedFrequency
|
||||
}
|
||||
|
||||
if metricCountLimit < 0 {
|
||||
return nil, fmt.Errorf("Metric count limit must be greater than or equal to 0")
|
||||
}
|
||||
|
||||
var metricsSet map[string]bool
|
||||
if len(configInJSON.MetricsConfig) > 0 {
|
||||
metricsSet = make(map[string]bool, len(configInJSON.MetricsConfig))
|
||||
for _, name := range configInJSON.MetricsConfig {
|
||||
metricsSet[name] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(configInJSON.MetricsConfig) > metricCountLimit {
|
||||
return nil, fmt.Errorf("Too many metrics defined: %d limit %d", len(configInJSON.MetricsConfig), metricCountLimit)
|
||||
}
|
||||
|
||||
// TODO : Add checks for validity of config file (eg : Accurate JSON fields)
|
||||
return &PrometheusCollector{
|
||||
name: collectorName,
|
||||
pollingFrequency: minPollingFrequency,
|
||||
configFile: configInJSON,
|
||||
metricsSet: metricsSet,
|
||||
metricCountLimit: metricCountLimit,
|
||||
httpClient: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Returns name of the collector
|
||||
func (collector *PrometheusCollector) Name() string {
|
||||
return collector.name
|
||||
}
|
||||
|
||||
func (collector *PrometheusCollector) GetSpec() []v1.MetricSpec {
|
||||
|
||||
response, err := collector.httpClient.Get(collector.configFile.Endpoint.URL)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
dec := expfmt.NewDecoder(response.Body, expfmt.ResponseFormat(response.Header))
|
||||
|
||||
var specs []v1.MetricSpec
|
||||
|
||||
for {
|
||||
d := rawmodel.MetricFamily{}
|
||||
if err = dec.Decode(&d); err != nil {
|
||||
break
|
||||
}
|
||||
name := d.GetName()
|
||||
if len(name) == 0 {
|
||||
continue
|
||||
}
|
||||
// If metrics to collect is specified, skip any metrics not in the list to collect.
|
||||
if _, ok := collector.metricsSet[name]; collector.metricsSet != nil && !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
spec := v1.MetricSpec{
|
||||
Name: name,
|
||||
Type: metricType(d.GetType()),
|
||||
Format: v1.FloatType,
|
||||
}
|
||||
specs = append(specs, spec)
|
||||
}
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return specs
|
||||
}
|
||||
|
||||
// metricType converts Prometheus metric type to cadvisor metric type.
|
||||
// If there is no mapping then just return the name of the Prometheus metric type.
|
||||
func metricType(t rawmodel.MetricType) v1.MetricType {
|
||||
switch t {
|
||||
case rawmodel.MetricType_COUNTER:
|
||||
return v1.MetricCumulative
|
||||
case rawmodel.MetricType_GAUGE:
|
||||
return v1.MetricGauge
|
||||
default:
|
||||
return v1.MetricType(t.String())
|
||||
}
|
||||
}
|
||||
|
||||
type prometheusLabels []*rawmodel.LabelPair
|
||||
|
||||
func labelSetToLabelPairs(labels model.Metric) prometheusLabels {
|
||||
var promLabels prometheusLabels
|
||||
for k, v := range labels {
|
||||
name := string(k)
|
||||
value := string(v)
|
||||
promLabels = append(promLabels, &rawmodel.LabelPair{Name: &name, Value: &value})
|
||||
}
|
||||
return promLabels
|
||||
}
|
||||
|
||||
func (s prometheusLabels) Len() int { return len(s) }
|
||||
func (s prometheusLabels) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// ByName implements sort.Interface by providing Less and using the Len and
|
||||
// Swap methods of the embedded PrometheusLabels value.
|
||||
type byName struct{ prometheusLabels }
|
||||
|
||||
func (s byName) Less(i, j int) bool {
|
||||
return s.prometheusLabels[i].GetName() < s.prometheusLabels[j].GetName()
|
||||
}
|
||||
|
||||
func prometheusLabelSetToCadvisorLabels(promLabels model.Metric) map[string]string {
|
||||
labels := make(map[string]string)
|
||||
for k, v := range promLabels {
|
||||
if string(k) == "__name__" {
|
||||
continue
|
||||
}
|
||||
labels[string(k)] = string(v)
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
func prometheusLabelSetToCadvisorLabel(promLabels model.Metric) string {
|
||||
labels := labelSetToLabelPairs(promLabels)
|
||||
sort.Sort(byName{labels})
|
||||
var b bytes.Buffer
|
||||
|
||||
for i, l := range labels {
|
||||
if i > 0 {
|
||||
b.WriteString("\xff")
|
||||
}
|
||||
b.WriteString(l.GetName())
|
||||
b.WriteString("=")
|
||||
b.WriteString(l.GetValue())
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Returns collected metrics and the next collection time of the collector
|
||||
func (collector *PrometheusCollector) Collect(metrics map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) {
|
||||
currentTime := time.Now()
|
||||
nextCollectionTime := currentTime.Add(time.Duration(collector.pollingFrequency))
|
||||
|
||||
uri := collector.configFile.Endpoint.URL
|
||||
response, err := collector.httpClient.Get(uri)
|
||||
if err != nil {
|
||||
return nextCollectionTime, nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nextCollectionTime, nil, fmt.Errorf("server returned HTTP status %s", response.Status)
|
||||
}
|
||||
|
||||
sdec := expfmt.SampleDecoder{
|
||||
Dec: expfmt.NewDecoder(response.Body, expfmt.ResponseFormat(response.Header)),
|
||||
Opts: &expfmt.DecodeOptions{
|
||||
Timestamp: model.TimeFromUnixNano(currentTime.UnixNano()),
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
// 50 is chosen as a reasonable guesstimate at a number of metrics we can
|
||||
// expect from virtually any endpoint to try to save allocations.
|
||||
decSamples = make(model.Vector, 0, 50)
|
||||
newMetrics = make(map[string][]v1.MetricVal)
|
||||
)
|
||||
for {
|
||||
if err = sdec.Decode(&decSamples); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
for _, sample := range decSamples {
|
||||
metName := string(sample.Metric[model.MetricNameLabel])
|
||||
if len(metName) == 0 {
|
||||
continue
|
||||
}
|
||||
// If metrics to collect is specified, skip any metrics not in the list to collect.
|
||||
if _, ok := collector.metricsSet[metName]; collector.metricsSet != nil && !ok {
|
||||
continue
|
||||
}
|
||||
// TODO Handle multiple labels nicer. Prometheus metrics can have multiple
|
||||
// labels, cadvisor only accepts a single string for the metric label.
|
||||
label := prometheusLabelSetToCadvisorLabel(sample.Metric)
|
||||
labels := prometheusLabelSetToCadvisorLabels(sample.Metric)
|
||||
|
||||
metric := v1.MetricVal{
|
||||
FloatValue: float64(sample.Value),
|
||||
Timestamp: sample.Timestamp.Time(),
|
||||
Label: label,
|
||||
Labels: labels,
|
||||
}
|
||||
newMetrics[metName] = append(newMetrics[metName], metric)
|
||||
if len(newMetrics) > collector.metricCountLimit {
|
||||
return nextCollectionTime, nil, fmt.Errorf("too many metrics to collect")
|
||||
}
|
||||
}
|
||||
decSamples = decSamples[:0]
|
||||
}
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return nextCollectionTime, nil, err
|
||||
}
|
||||
|
||||
for key, val := range newMetrics {
|
||||
metrics[key] = append(metrics[key], val...)
|
||||
}
|
||||
|
||||
return nextCollectionTime, metrics, nil
|
||||
}
|
53
vendor/github.com/google/cadvisor/collector/types.go
generated
vendored
Normal file
53
vendor/github.com/google/cadvisor/collector/types.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
// TODO(vmarmol): Export to a custom metrics type when that is available.
|
||||
|
||||
// Metric collector.
|
||||
type Collector interface {
|
||||
// Collect metrics from this collector.
|
||||
// Returns the next time this collector should be collected from.
|
||||
// Next collection time is always returned, even when an error occurs.
|
||||
// A collection time of zero means no more collection.
|
||||
Collect(map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error)
|
||||
|
||||
// Return spec for all metrics associated with this collector
|
||||
GetSpec() []v1.MetricSpec
|
||||
|
||||
// Name of this collector.
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Manages and runs collectors.
|
||||
type CollectorManager interface {
|
||||
// Register a collector.
|
||||
RegisterCollector(collector Collector) error
|
||||
|
||||
// Collect from collectors that are ready and return the next time
|
||||
// at which a collector will be ready to collect from.
|
||||
// Next collection time is always returned, even when an error occurs.
|
||||
// A collection time of zero means no more collection.
|
||||
Collect() (time.Time, map[string][]v1.MetricVal, error)
|
||||
|
||||
// Get metric spec from all registered collectors.
|
||||
GetSpec() ([]v1.MetricSpec, error)
|
||||
}
|
26
vendor/github.com/google/cadvisor/collector/util.go
generated
vendored
Normal file
26
vendor/github.com/google/cadvisor/collector/util.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package collector
|
||||
|
||||
import "github.com/google/cadvisor/container"
|
||||
|
||||
func (endpointConfig *EndpointConfig) configure(containerHandler container.ContainerHandler) {
|
||||
//If the exact URL was not specified, generate it based on the ip address of the container.
|
||||
endpoint := endpointConfig
|
||||
if endpoint.URL == "" {
|
||||
ipAddress := containerHandler.GetContainerIPAddress()
|
||||
endpointConfig.URL = endpoint.URLConfig.Protocol + "://" + ipAddress + ":" + endpoint.URLConfig.Port.String() + endpoint.URLConfig.Path
|
||||
}
|
||||
}
|
60
vendor/github.com/google/cadvisor/container/common/container_hints.go
generated
vendored
Normal file
60
vendor/github.com/google/cadvisor/container/common/container_hints.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Unmarshal's a Containers description json file. The json file contains
|
||||
// an array of ContainerHint structs, each with a container's id and networkInterface
|
||||
// This allows collecting stats about network interfaces configured outside docker
|
||||
// and lxc
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"os"
|
||||
)
|
||||
|
||||
var ArgContainerHints = flag.String("container_hints", "/etc/cadvisor/container_hints.json", "location of the container hints file")
|
||||
|
||||
type ContainerHints struct {
|
||||
AllHosts []containerHint `json:"all_hosts,omitempty"`
|
||||
}
|
||||
|
||||
type containerHint struct {
|
||||
FullName string `json:"full_path,omitempty"`
|
||||
NetworkInterface *networkInterface `json:"network_interface,omitempty"`
|
||||
Mounts []Mount `json:"mounts,omitempty"`
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
HostDir string `json:"host_dir,omitempty"`
|
||||
ContainerDir string `json:"container_dir,omitempty"`
|
||||
}
|
||||
|
||||
type networkInterface struct {
|
||||
VethHost string `json:"veth_host,omitempty"`
|
||||
VethChild string `json:"veth_child,omitempty"`
|
||||
}
|
||||
|
||||
func GetContainerHintsFromFile(containerHintsFile string) (ContainerHints, error) {
|
||||
dat, err := os.ReadFile(containerHintsFile)
|
||||
if os.IsNotExist(err) {
|
||||
return ContainerHints{}, nil
|
||||
}
|
||||
var cHints ContainerHints
|
||||
if err == nil {
|
||||
err = json.Unmarshal(dat, &cHints)
|
||||
}
|
||||
|
||||
return cHints, err
|
||||
}
|
155
vendor/github.com/google/cadvisor/container/common/fsHandler.go
generated
vendored
Normal file
155
vendor/github.com/google/cadvisor/container/common/fsHandler.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Handler for Docker containers.
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/fs"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type FsHandler interface {
|
||||
Start()
|
||||
Usage() FsUsage
|
||||
Stop()
|
||||
}
|
||||
|
||||
type FsUsage struct {
|
||||
BaseUsageBytes uint64
|
||||
TotalUsageBytes uint64
|
||||
InodeUsage uint64
|
||||
}
|
||||
|
||||
type realFsHandler struct {
|
||||
sync.RWMutex
|
||||
lastUpdate time.Time
|
||||
usage FsUsage
|
||||
period time.Duration
|
||||
minPeriod time.Duration
|
||||
rootfs string
|
||||
extraDir string
|
||||
fsInfo fs.FsInfo
|
||||
// Tells the container to stop.
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
const (
|
||||
maxBackoffFactor = 20
|
||||
)
|
||||
|
||||
const DefaultPeriod = time.Minute
|
||||
|
||||
var _ FsHandler = &realFsHandler{}
|
||||
|
||||
func NewFsHandler(period time.Duration, rootfs, extraDir string, fsInfo fs.FsInfo) FsHandler {
|
||||
return &realFsHandler{
|
||||
lastUpdate: time.Time{},
|
||||
usage: FsUsage{},
|
||||
period: period,
|
||||
minPeriod: period,
|
||||
rootfs: rootfs,
|
||||
extraDir: extraDir,
|
||||
fsInfo: fsInfo,
|
||||
stopChan: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) update() error {
|
||||
var (
|
||||
rootUsage, extraUsage fs.UsageInfo
|
||||
rootErr, extraErr error
|
||||
)
|
||||
// TODO(vishh): Add support for external mounts.
|
||||
if fh.rootfs != "" {
|
||||
rootUsage, rootErr = fh.fsInfo.GetDirUsage(fh.rootfs)
|
||||
}
|
||||
|
||||
if fh.extraDir != "" {
|
||||
extraUsage, extraErr = fh.fsInfo.GetDirUsage(fh.extraDir)
|
||||
}
|
||||
|
||||
// Wait to handle errors until after all operartions are run.
|
||||
// An error in one will not cause an early return, skipping others
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.lastUpdate = time.Now()
|
||||
if fh.rootfs != "" && rootErr == nil {
|
||||
fh.usage.InodeUsage = rootUsage.Inodes
|
||||
fh.usage.BaseUsageBytes = rootUsage.Bytes
|
||||
fh.usage.TotalUsageBytes = rootUsage.Bytes
|
||||
}
|
||||
if fh.extraDir != "" && extraErr == nil {
|
||||
if fh.rootfs != "" {
|
||||
fh.usage.TotalUsageBytes += extraUsage.Bytes
|
||||
} else {
|
||||
// rootfs is empty, totalUsageBytes use extra usage bytes
|
||||
fh.usage.TotalUsageBytes = extraUsage.Bytes
|
||||
}
|
||||
}
|
||||
|
||||
// Combine errors into a single error to return
|
||||
if rootErr != nil || extraErr != nil {
|
||||
return fmt.Errorf("rootDiskErr: %v, extraDiskErr: %v", rootErr, extraErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) trackUsage() {
|
||||
longOp := time.Second
|
||||
for {
|
||||
start := time.Now()
|
||||
if err := fh.update(); err != nil {
|
||||
klog.Errorf("failed to collect filesystem stats - %v", err)
|
||||
fh.period = fh.period * 2
|
||||
if fh.period > maxBackoffFactor*fh.minPeriod {
|
||||
fh.period = maxBackoffFactor * fh.minPeriod
|
||||
}
|
||||
} else {
|
||||
fh.period = fh.minPeriod
|
||||
}
|
||||
duration := time.Since(start)
|
||||
if duration > longOp {
|
||||
// adapt longOp time so that message doesn't continue to print
|
||||
// if the long duration is persistent either because of slow
|
||||
// disk or lots of containers.
|
||||
longOp = longOp + time.Second
|
||||
klog.V(2).Infof("fs: disk usage and inodes count on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp)
|
||||
}
|
||||
select {
|
||||
case <-fh.stopChan:
|
||||
return
|
||||
case <-time.After(fh.period):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) Start() {
|
||||
go fh.trackUsage()
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) Stop() {
|
||||
close(fh.stopChan)
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) Usage() FsUsage {
|
||||
fh.RLock()
|
||||
defer fh.RUnlock()
|
||||
return fh.usage
|
||||
}
|
459
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
Normal file
459
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
Normal file
@ -0,0 +1,459 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func DebugInfo(watches map[string][]string) map[string][]string {
|
||||
out := make(map[string][]string)
|
||||
|
||||
lines := make([]string, 0, len(watches))
|
||||
for containerName, cgroupWatches := range watches {
|
||||
lines = append(lines, fmt.Sprintf("%s:", containerName))
|
||||
for _, cg := range cgroupWatches {
|
||||
lines = append(lines, fmt.Sprintf("\t%s", cg))
|
||||
}
|
||||
}
|
||||
out["Inotify watches"] = lines
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
var bootTime = func() time.Time {
|
||||
now := time.Now()
|
||||
var sysinfo unix.Sysinfo_t
|
||||
if err := unix.Sysinfo(&sysinfo); err != nil {
|
||||
return now
|
||||
}
|
||||
sinceBoot := time.Duration(sysinfo.Uptime) * time.Second
|
||||
return now.Add(-1 * sinceBoot).Truncate(time.Minute)
|
||||
}()
|
||||
|
||||
func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoFactory, hasNetwork, hasFilesystem bool) (info.ContainerSpec, error) {
|
||||
return getSpecInternal(cgroupPaths, machineInfoFactory, hasNetwork, hasFilesystem, cgroups.IsCgroup2UnifiedMode())
|
||||
}
|
||||
|
||||
func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoFactory, hasNetwork, hasFilesystem, cgroup2UnifiedMode bool) (info.ContainerSpec, error) {
|
||||
var spec info.ContainerSpec
|
||||
|
||||
// Assume unified hierarchy containers.
|
||||
// Get the lowest creation time from all hierarchies as the container creation time.
|
||||
now := time.Now()
|
||||
lowestTime := now
|
||||
for _, cgroupPathDir := range cgroupPaths {
|
||||
dir, err := os.Stat(cgroupPathDir)
|
||||
if err == nil && dir.ModTime().Before(lowestTime) {
|
||||
lowestTime = dir.ModTime()
|
||||
} else if os.IsNotExist(err) {
|
||||
// Directory does not exist, skip checking for files within.
|
||||
continue
|
||||
}
|
||||
|
||||
// The modified time of the cgroup directory sometimes changes whenever a subcontainer is created.
|
||||
// eg. /docker will have creation time matching the creation of latest docker container.
|
||||
// Use clone_children/events as a workaround as it isn't usually modified. It is only likely changed
|
||||
// immediately after creating a container. If the directory modified time is lower, we use that.
|
||||
cgroupPathFile := path.Join(cgroupPathDir, "cgroup.clone_children")
|
||||
if cgroup2UnifiedMode {
|
||||
cgroupPathFile = path.Join(cgroupPathDir, "cgroup.events")
|
||||
}
|
||||
fi, err := os.Stat(cgroupPathFile)
|
||||
if err == nil && fi.ModTime().Before(lowestTime) {
|
||||
lowestTime = fi.ModTime()
|
||||
}
|
||||
}
|
||||
if lowestTime.Before(bootTime) {
|
||||
lowestTime = bootTime
|
||||
}
|
||||
|
||||
if lowestTime != now {
|
||||
spec.CreationTime = lowestTime
|
||||
}
|
||||
|
||||
// Get machine info.
|
||||
mi, err := machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return spec, err
|
||||
}
|
||||
|
||||
// CPU.
|
||||
cpuRoot, ok := GetControllerPath(cgroupPaths, "cpu", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if utils.FileExists(cpuRoot) {
|
||||
if cgroup2UnifiedMode {
|
||||
spec.HasCpu = true
|
||||
|
||||
weight := readUInt64(cpuRoot, "cpu.weight")
|
||||
if weight > 0 {
|
||||
limit, err := convertCPUWeightToCPULimit(weight)
|
||||
if err != nil {
|
||||
klog.Errorf("GetSpec: Failed to read CPULimit from %q: %s", path.Join(cpuRoot, "cpu.weight"), err)
|
||||
} else {
|
||||
spec.Cpu.Limit = limit
|
||||
}
|
||||
}
|
||||
max := readString(cpuRoot, "cpu.max")
|
||||
if max != "" {
|
||||
splits := strings.SplitN(max, " ", 2)
|
||||
if len(splits) != 2 {
|
||||
klog.Errorf("GetSpec: Failed to parse CPUmax from %q", path.Join(cpuRoot, "cpu.max"))
|
||||
} else {
|
||||
if splits[0] != "max" {
|
||||
spec.Cpu.Quota = parseUint64String(splits[0])
|
||||
}
|
||||
spec.Cpu.Period = parseUint64String(splits[1])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
spec.HasCpu = true
|
||||
spec.Cpu.Limit = readUInt64(cpuRoot, "cpu.shares")
|
||||
spec.Cpu.Period = readUInt64(cpuRoot, "cpu.cfs_period_us")
|
||||
quota := readString(cpuRoot, "cpu.cfs_quota_us")
|
||||
|
||||
if quota != "" && quota != "-1" {
|
||||
val, err := strconv.ParseUint(quota, 10, 64)
|
||||
if err != nil {
|
||||
klog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
|
||||
} else {
|
||||
spec.Cpu.Quota = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cpu Mask.
|
||||
// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
|
||||
cpusetRoot, ok := GetControllerPath(cgroupPaths, "cpuset", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if utils.FileExists(cpusetRoot) {
|
||||
spec.HasCpu = true
|
||||
mask := ""
|
||||
if cgroup2UnifiedMode {
|
||||
mask = readString(cpusetRoot, "cpuset.cpus.effective")
|
||||
} else {
|
||||
mask = readString(cpusetRoot, "cpuset.cpus")
|
||||
}
|
||||
spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores)
|
||||
}
|
||||
}
|
||||
|
||||
// Memory
|
||||
memoryRoot, ok := GetControllerPath(cgroupPaths, "memory", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if cgroup2UnifiedMode {
|
||||
if utils.FileExists(path.Join(memoryRoot, "memory.max")) {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.min")
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.max")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.swap.max")
|
||||
}
|
||||
} else {
|
||||
if utils.FileExists(memoryRoot) {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.soft_limit_in_bytes")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Hugepage
|
||||
hugepageRoot, ok := cgroupPaths["hugetlb"]
|
||||
if ok {
|
||||
if utils.FileExists(hugepageRoot) {
|
||||
spec.HasHugetlb = true
|
||||
}
|
||||
}
|
||||
|
||||
// Processes, read it's value from pids path directly
|
||||
pidsRoot, ok := GetControllerPath(cgroupPaths, "pids", cgroup2UnifiedMode)
|
||||
if ok {
|
||||
if utils.FileExists(pidsRoot) {
|
||||
spec.HasProcesses = true
|
||||
spec.Processes.Limit = readUInt64(pidsRoot, "pids.max")
|
||||
}
|
||||
}
|
||||
|
||||
spec.HasNetwork = hasNetwork
|
||||
spec.HasFilesystem = hasFilesystem
|
||||
|
||||
ioControllerName := "blkio"
|
||||
if cgroup2UnifiedMode {
|
||||
ioControllerName = "io"
|
||||
}
|
||||
|
||||
if blkioRoot, ok := GetControllerPath(cgroupPaths, ioControllerName, cgroup2UnifiedMode); ok && utils.FileExists(blkioRoot) {
|
||||
spec.HasDiskIo = true
|
||||
}
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func GetControllerPath(cgroupPaths map[string]string, controllerName string, cgroup2UnifiedMode bool) (string, bool) {
|
||||
|
||||
ok := false
|
||||
path := ""
|
||||
|
||||
if cgroup2UnifiedMode {
|
||||
path, ok = cgroupPaths[""]
|
||||
} else {
|
||||
path, ok = cgroupPaths[controllerName]
|
||||
}
|
||||
return path, ok
|
||||
}
|
||||
|
||||
func readString(dirpath string, file string) string {
|
||||
cgroupFile := path.Join(dirpath, file)
|
||||
|
||||
// Read
|
||||
out, err := os.ReadFile(cgroupFile)
|
||||
if err != nil {
|
||||
// Ignore non-existent files
|
||||
if !os.IsNotExist(err) {
|
||||
klog.Warningf("readString: Failed to read %q: %s", cgroupFile, err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// Convert from [1-10000] to [2-262144]
|
||||
func convertCPUWeightToCPULimit(weight uint64) (uint64, error) {
|
||||
const (
|
||||
// minWeight is the lowest value possible for cpu.weight
|
||||
minWeight = 1
|
||||
// maxWeight is the highest value possible for cpu.weight
|
||||
maxWeight = 10000
|
||||
)
|
||||
if weight < minWeight || weight > maxWeight {
|
||||
return 0, fmt.Errorf("convertCPUWeightToCPULimit: invalid cpu weight: %v", weight)
|
||||
}
|
||||
return 2 + ((weight-1)*262142)/9999, nil
|
||||
}
|
||||
|
||||
func parseUint64String(strValue string) uint64 {
|
||||
if strValue == "max" {
|
||||
return math.MaxUint64
|
||||
}
|
||||
if strValue == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(strValue, 10, 64)
|
||||
if err != nil {
|
||||
klog.Errorf("parseUint64String: Failed to parse int %q: %s", strValue, err)
|
||||
return 0
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func readUInt64(dirpath string, file string) uint64 {
|
||||
out := readString(dirpath, file)
|
||||
if out == "max" {
|
||||
return math.MaxUint64
|
||||
}
|
||||
if out == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(out, 10, 64)
|
||||
if err != nil {
|
||||
klog.Errorf("readUInt64: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err)
|
||||
return 0
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
// Lists all directories under "path" and outputs the results as children of "parent".
|
||||
func ListDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error {
|
||||
buf := make([]byte, godirwalk.MinimumScratchBufferSize)
|
||||
return listDirectories(dirpath, parent, recursive, output, buf)
|
||||
}
|
||||
|
||||
func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}, buf []byte) error {
|
||||
dirents, err := godirwalk.ReadDirents(dirpath, buf)
|
||||
if err != nil {
|
||||
// Ignore if this hierarchy does not exist.
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
for _, dirent := range dirents {
|
||||
// We only grab directories.
|
||||
if !dirent.IsDir() {
|
||||
continue
|
||||
}
|
||||
dirname := dirent.Name()
|
||||
|
||||
name := path.Join(parent, dirname)
|
||||
output[name] = struct{}{}
|
||||
|
||||
// List subcontainers if asked to.
|
||||
if recursive {
|
||||
err := listDirectories(path.Join(dirpath, dirname), name, true, output, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func MakeCgroupPaths(mountPoints map[string]string, name string) map[string]string {
|
||||
cgroupPaths := make(map[string]string, len(mountPoints))
|
||||
for key, val := range mountPoints {
|
||||
cgroupPaths[key] = path.Join(val, name)
|
||||
}
|
||||
|
||||
return cgroupPaths
|
||||
}
|
||||
|
||||
func CgroupExists(cgroupPaths map[string]string) bool {
|
||||
// If any cgroup exists, the container is still alive.
|
||||
for _, cgroupPath := range cgroupPaths {
|
||||
if utils.FileExists(cgroupPath) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ListContainers(name string, cgroupPaths map[string]string, listType container.ListType) ([]info.ContainerReference, error) {
|
||||
containers := make(map[string]struct{})
|
||||
for _, cgroupPath := range cgroupPaths {
|
||||
err := ListDirectories(cgroupPath, name, listType == container.ListRecursive, containers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Make into container references.
|
||||
ret := make([]info.ContainerReference, 0, len(containers))
|
||||
for cont := range containers {
|
||||
ret = append(ret, info.ContainerReference{
|
||||
Name: cont,
|
||||
})
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// AssignDeviceNamesToDiskStats assigns the Device field on the provided DiskIoStats by looking up
|
||||
// the device major and minor identifiers in the provided device namer.
|
||||
func AssignDeviceNamesToDiskStats(namer DeviceNamer, stats *info.DiskIoStats) {
|
||||
assignDeviceNamesToPerDiskStats(
|
||||
namer,
|
||||
stats.IoMerged,
|
||||
stats.IoQueued,
|
||||
stats.IoServiceBytes,
|
||||
stats.IoServiceTime,
|
||||
stats.IoServiced,
|
||||
stats.IoTime,
|
||||
stats.IoWaitTime,
|
||||
stats.Sectors,
|
||||
)
|
||||
}
|
||||
|
||||
// assignDeviceNamesToPerDiskStats looks up device names for the provided stats, caching names
|
||||
// if necessary.
|
||||
func assignDeviceNamesToPerDiskStats(namer DeviceNamer, diskStats ...[]info.PerDiskStats) {
|
||||
devices := make(deviceIdentifierMap)
|
||||
for _, stats := range diskStats {
|
||||
for i, stat := range stats {
|
||||
stats[i].Device = devices.Find(stat.Major, stat.Minor, namer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeviceNamer returns string names for devices by their major and minor id.
|
||||
type DeviceNamer interface {
|
||||
// DeviceName returns the name of the device by its major and minor ids, or false if no
|
||||
// such device is recognized.
|
||||
DeviceName(major, minor uint64) (string, bool)
|
||||
}
|
||||
|
||||
type MachineInfoNamer info.MachineInfo
|
||||
|
||||
func (n *MachineInfoNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.DiskMap {
|
||||
if info.Major == major && info.Minor == minor {
|
||||
return "/dev/" + info.Name, true
|
||||
}
|
||||
}
|
||||
for _, info := range n.Filesystems {
|
||||
if info.DeviceMajor == major && info.DeviceMinor == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
type deviceIdentifier struct {
|
||||
major uint64
|
||||
minor uint64
|
||||
}
|
||||
|
||||
type deviceIdentifierMap map[deviceIdentifier]string
|
||||
|
||||
// Find locates the device name by device identifier out of from, caching the result as necessary.
|
||||
func (m deviceIdentifierMap) Find(major, minor uint64, namer DeviceNamer) string {
|
||||
d := deviceIdentifier{major, minor}
|
||||
if s, ok := m[d]; ok {
|
||||
return s
|
||||
}
|
||||
s, _ := namer.DeviceName(major, minor)
|
||||
m[d] = s
|
||||
return s
|
||||
}
|
||||
|
||||
// RemoveNetMetrics is used to remove any network metrics from the given MetricSet.
|
||||
// It returns the original set as is if remove is false, or if there are no metrics
|
||||
// to remove.
|
||||
func RemoveNetMetrics(metrics container.MetricSet, remove bool) container.MetricSet {
|
||||
if !remove {
|
||||
return metrics
|
||||
}
|
||||
|
||||
// Check if there is anything we can remove, to avoid useless copying.
|
||||
if !metrics.HasAny(container.AllNetworkMetrics) {
|
||||
return metrics
|
||||
}
|
||||
|
||||
// A copy of all metrics except for network ones.
|
||||
return metrics.Difference(container.AllNetworkMetrics)
|
||||
}
|
135
vendor/github.com/google/cadvisor/container/common/inotify_watcher.go
generated
vendored
Normal file
135
vendor/github.com/google/cadvisor/container/common/inotify_watcher.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
inotify "k8s.io/utils/inotify"
|
||||
)
|
||||
|
||||
// Watcher for container-related inotify events in the cgroup hierarchy.
|
||||
//
|
||||
// Implementation is thread-safe.
|
||||
type InotifyWatcher struct {
|
||||
// Underlying inotify watcher.
|
||||
watcher *inotify.Watcher
|
||||
|
||||
// Map of containers being watched to cgroup paths watched for that container.
|
||||
containersWatched map[string]map[string]bool
|
||||
|
||||
// Lock for all datastructure access.
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func NewInotifyWatcher() (*InotifyWatcher, error) {
|
||||
w, err := inotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &InotifyWatcher{
|
||||
watcher: w,
|
||||
containersWatched: make(map[string]map[string]bool),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Add a watch to the specified directory. Returns if the container was already being watched.
|
||||
func (iw *InotifyWatcher) AddWatch(containerName, dir string) (bool, error) {
|
||||
iw.lock.Lock()
|
||||
defer iw.lock.Unlock()
|
||||
|
||||
cgroupsWatched, alreadyWatched := iw.containersWatched[containerName]
|
||||
|
||||
// Register an inotify notification.
|
||||
if !cgroupsWatched[dir] {
|
||||
err := iw.watcher.AddWatch(dir, inotify.InCreate|inotify.InDelete|inotify.InMove)
|
||||
if err != nil {
|
||||
return alreadyWatched, err
|
||||
}
|
||||
|
||||
if cgroupsWatched == nil {
|
||||
cgroupsWatched = make(map[string]bool)
|
||||
}
|
||||
cgroupsWatched[dir] = true
|
||||
}
|
||||
|
||||
// Record our watching of the container.
|
||||
if !alreadyWatched {
|
||||
iw.containersWatched[containerName] = cgroupsWatched
|
||||
}
|
||||
return alreadyWatched, nil
|
||||
}
|
||||
|
||||
// Remove watch from the specified directory. Returns if this was the last watch on the specified container.
|
||||
func (iw *InotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) {
|
||||
iw.lock.Lock()
|
||||
defer iw.lock.Unlock()
|
||||
|
||||
// If we don't have a watch registered for this, just return.
|
||||
cgroupsWatched, ok := iw.containersWatched[containerName]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Remove the inotify watch if it exists.
|
||||
if cgroupsWatched[dir] {
|
||||
err := iw.watcher.RemoveWatch(dir)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
delete(cgroupsWatched, dir)
|
||||
}
|
||||
|
||||
// Remove the record if this is the last watch.
|
||||
if len(cgroupsWatched) == 0 {
|
||||
delete(iw.containersWatched, containerName)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Errors are returned on this channel.
|
||||
func (iw *InotifyWatcher) Error() chan error {
|
||||
return iw.watcher.Error
|
||||
}
|
||||
|
||||
// Events are returned on this channel.
|
||||
func (iw *InotifyWatcher) Event() chan *inotify.Event {
|
||||
return iw.watcher.Event
|
||||
}
|
||||
|
||||
// Closes the inotify watcher.
|
||||
func (iw *InotifyWatcher) Close() error {
|
||||
return iw.watcher.Close()
|
||||
}
|
||||
|
||||
// Returns a map of containers to the cgroup paths being watched.
|
||||
func (iw *InotifyWatcher) GetWatches() map[string][]string {
|
||||
out := make(map[string][]string, len(iw.containersWatched))
|
||||
for k, v := range iw.containersWatched {
|
||||
out[k] = mapToSlice(v)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mapToSlice(m map[string]bool) []string {
|
||||
out := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
out = append(out, k)
|
||||
}
|
||||
return out
|
||||
}
|
79
vendor/github.com/google/cadvisor/container/container.go
generated
vendored
Normal file
79
vendor/github.com/google/cadvisor/container/container.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package container defines types for sub-container events and also
|
||||
// defines an interface for container operation handlers.
|
||||
package container
|
||||
|
||||
import info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
// ListType describes whether listing should be just for a
|
||||
// specific container or performed recursively.
|
||||
type ListType int
|
||||
|
||||
const (
|
||||
ListSelf ListType = iota
|
||||
ListRecursive
|
||||
)
|
||||
|
||||
type ContainerType int
|
||||
|
||||
const (
|
||||
ContainerTypeRaw ContainerType = iota
|
||||
ContainerTypeDocker
|
||||
ContainerTypeCrio
|
||||
ContainerTypeContainerd
|
||||
ContainerTypeMesos
|
||||
ContainerTypePodman
|
||||
)
|
||||
|
||||
// Interface for container operation handlers.
|
||||
type ContainerHandler interface {
|
||||
// Returns the ContainerReference
|
||||
ContainerReference() (info.ContainerReference, error)
|
||||
|
||||
// Returns container's isolation spec.
|
||||
GetSpec() (info.ContainerSpec, error)
|
||||
|
||||
// Returns the current stats values of the container.
|
||||
GetStats() (*info.ContainerStats, error)
|
||||
|
||||
// Returns the subcontainers of this container.
|
||||
ListContainers(listType ListType) ([]info.ContainerReference, error)
|
||||
|
||||
// Returns the processes inside this container.
|
||||
ListProcesses(listType ListType) ([]int, error)
|
||||
|
||||
// Returns absolute cgroup path for the requested resource.
|
||||
GetCgroupPath(resource string) (string, error)
|
||||
|
||||
// Returns container labels, if available.
|
||||
GetContainerLabels() map[string]string
|
||||
|
||||
// Returns the container's ip address, if available
|
||||
GetContainerIPAddress() string
|
||||
|
||||
// Returns whether the container still exists.
|
||||
Exists() bool
|
||||
|
||||
// Cleanup frees up any resources being held like fds or go routines, etc.
|
||||
Cleanup()
|
||||
|
||||
// Start starts any necessary background goroutines - must be cleaned up in Cleanup().
|
||||
// It is expected that most implementations will be a no-op.
|
||||
Start()
|
||||
|
||||
// Type of handler
|
||||
Type() ContainerType
|
||||
}
|
162
vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
Normal file
162
vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||
tasksapi "github.com/containerd/containerd/api/services/tasks/v1"
|
||||
versionapi "github.com/containerd/containerd/api/services/version/v1"
|
||||
tasktypes "github.com/containerd/containerd/api/types/task"
|
||||
"github.com/containerd/errdefs"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
|
||||
"github.com/google/cadvisor/container/containerd/containers"
|
||||
"github.com/google/cadvisor/container/containerd/pkg/dialer"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
containerService containersapi.ContainersClient
|
||||
taskService tasksapi.TasksClient
|
||||
versionService versionapi.VersionClient
|
||||
}
|
||||
|
||||
type ContainerdClient interface {
|
||||
LoadContainer(ctx context.Context, id string) (*containers.Container, error)
|
||||
TaskPid(ctx context.Context, id string) (uint32, error)
|
||||
Version(ctx context.Context) (string, error)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrTaskIsInUnknownState = errors.New("containerd task is in unknown state") // used when process reported in containerd task is in Unknown State
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
var ctrdClient ContainerdClient = nil
|
||||
|
||||
const (
|
||||
maxBackoffDelay = 3 * time.Second
|
||||
baseBackoffDelay = 100 * time.Millisecond
|
||||
connectionTimeout = 2 * time.Second
|
||||
maxMsgSize = 16 * 1024 * 1024 // 16MB
|
||||
)
|
||||
|
||||
// Client creates a containerd client
|
||||
func Client(address, namespace string) (ContainerdClient, error) {
|
||||
var retErr error
|
||||
once.Do(func() {
|
||||
tryConn, err := net.DialTimeout("unix", address, connectionTimeout)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("containerd: cannot unix dial containerd api service: %v", err)
|
||||
return
|
||||
}
|
||||
tryConn.Close()
|
||||
|
||||
connParams := grpc.ConnectParams{
|
||||
Backoff: backoff.DefaultConfig,
|
||||
}
|
||||
connParams.Backoff.BaseDelay = baseBackoffDelay
|
||||
connParams.Backoff.MaxDelay = maxBackoffDelay
|
||||
//nolint:staticcheck // SA1019
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(dialer.ContextDialer),
|
||||
grpc.WithBlock(),
|
||||
grpc.WithConnectParams(connParams),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)),
|
||||
}
|
||||
unary, stream := newNSInterceptors(namespace)
|
||||
gopts = append(gopts,
|
||||
grpc.WithUnaryInterceptor(unary),
|
||||
grpc.WithStreamInterceptor(stream),
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout)
|
||||
defer cancel()
|
||||
//nolint:staticcheck // SA1019
|
||||
conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...)
|
||||
if err != nil {
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
ctrdClient = &client{
|
||||
containerService: containersapi.NewContainersClient(conn),
|
||||
taskService: tasksapi.NewTasksClient(conn),
|
||||
versionService: versionapi.NewVersionClient(conn),
|
||||
}
|
||||
})
|
||||
return ctrdClient, retErr
|
||||
}
|
||||
|
||||
func (c *client) LoadContainer(ctx context.Context, id string) (*containers.Container, error) {
|
||||
r, err := c.containerService.Get(ctx, &containersapi.GetContainerRequest{
|
||||
ID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
return containerFromProto(r.Container), nil
|
||||
}
|
||||
|
||||
func (c *client) TaskPid(ctx context.Context, id string) (uint32, error) {
|
||||
response, err := c.taskService.Get(ctx, &tasksapi.GetRequest{
|
||||
ContainerID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errdefs.FromGRPC(err)
|
||||
}
|
||||
if response.Process.Status == tasktypes.Status_UNKNOWN {
|
||||
return 0, ErrTaskIsInUnknownState
|
||||
}
|
||||
return response.Process.Pid, nil
|
||||
}
|
||||
|
||||
func (c *client) Version(ctx context.Context) (string, error) {
|
||||
response, err := c.versionService.Version(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return "", errdefs.FromGRPC(err)
|
||||
}
|
||||
return response.Version, nil
|
||||
}
|
||||
|
||||
func containerFromProto(containerpb *containersapi.Container) *containers.Container {
|
||||
var runtime containers.RuntimeInfo
|
||||
// TODO: is nil check required for containerpb
|
||||
if containerpb.Runtime != nil {
|
||||
runtime = containers.RuntimeInfo{
|
||||
Name: containerpb.Runtime.Name,
|
||||
Options: containerpb.Runtime.Options,
|
||||
}
|
||||
}
|
||||
return &containers.Container{
|
||||
ID: containerpb.ID,
|
||||
Labels: containerpb.Labels,
|
||||
Image: containerpb.Image,
|
||||
Runtime: runtime,
|
||||
Spec: containerpb.Spec,
|
||||
Snapshotter: containerpb.Snapshotter,
|
||||
SnapshotKey: containerpb.SnapshotKey,
|
||||
Extensions: containerpb.Extensions,
|
||||
}
|
||||
}
|
125
vendor/github.com/google/cadvisor/container/containerd/containers/containers.go
generated
vendored
Normal file
125
vendor/github.com/google/cadvisor/container/containerd/containers/containers.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
// Container represents the set of data pinned by a container. Unless otherwise
|
||||
// noted, the resources here are considered in use by the container.
|
||||
//
|
||||
// The resources specified in this object are used to create tasks from the container.
|
||||
type Container struct {
|
||||
// ID uniquely identifies the container in a namespace.
|
||||
//
|
||||
// This property is required and cannot be changed after creation.
|
||||
ID string
|
||||
|
||||
// Labels provide metadata extension for a container.
|
||||
//
|
||||
// These are optional and fully mutable.
|
||||
Labels map[string]string
|
||||
|
||||
// Image specifies the image reference used for a container.
|
||||
//
|
||||
// This property is optional and mutable.
|
||||
Image string
|
||||
|
||||
// Runtime specifies which runtime should be used when launching container
|
||||
// tasks.
|
||||
//
|
||||
// This property is required and immutable.
|
||||
Runtime RuntimeInfo
|
||||
|
||||
// Spec should carry the runtime specification used to implement the
|
||||
// container.
|
||||
//
|
||||
// This field is required but mutable.
|
||||
Spec *anypb.Any
|
||||
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// This field is not required but mutable.
|
||||
SnapshotKey string
|
||||
|
||||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
//
|
||||
// This field is not required but immutable.
|
||||
Snapshotter string
|
||||
|
||||
// CreatedAt is the time at which the container was created.
|
||||
CreatedAt time.Time
|
||||
|
||||
// UpdatedAt is the time at which the container was updated.
|
||||
UpdatedAt time.Time
|
||||
|
||||
// Extensions stores client-specified metadata
|
||||
Extensions map[string]*anypb.Any
|
||||
}
|
||||
|
||||
// RuntimeInfo holds runtime specific information
|
||||
type RuntimeInfo struct {
|
||||
Name string
|
||||
Options *anypb.Any
|
||||
}
|
||||
|
||||
// Store interacts with the underlying container storage
|
||||
type Store interface {
|
||||
// Get a container using the id.
|
||||
//
|
||||
// Container object is returned on success. If the id is not known to the
|
||||
// store, an error will be returned.
|
||||
Get(ctx context.Context, id string) (Container, error)
|
||||
|
||||
// List returns containers that match one or more of the provided filters.
|
||||
List(ctx context.Context, filters ...string) ([]Container, error)
|
||||
|
||||
// Create a container in the store from the provided container.
|
||||
Create(ctx context.Context, container Container) (Container, error)
|
||||
|
||||
// Update the container with the provided container object. ID must be set.
|
||||
//
|
||||
// If one or more fieldpaths are provided, only the field corresponding to
|
||||
// the fieldpaths will be mutated.
|
||||
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
|
||||
|
||||
// Delete a container using the id.
|
||||
//
|
||||
// nil will be returned on success. If the container is not known to the
|
||||
// store, ErrNotFound will be returned.
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
157
vendor/github.com/google/cadvisor/container/containerd/factory.go
generated
vendored
Normal file
157
vendor/github.com/google/cadvisor/container/containerd/factory.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
)
|
||||
|
||||
var ArgContainerdEndpoint = flag.String("containerd", "/run/containerd/containerd.sock", "containerd endpoint")
|
||||
var ArgContainerdNamespace = flag.String("containerd-namespace", "k8s.io", "containerd namespace")
|
||||
|
||||
var containerdEnvMetadataWhiteList = flag.String("containerd_env_metadata_whitelist", "", "DEPRECATED: this flag will be removed, please use `env_metadata_whitelist`. A comma-separated list of environment variable keys matched with specified prefix that needs to be collected for containerd containers")
|
||||
|
||||
// The namespace under which containerd aliases are unique.
|
||||
const k8sContainerdNamespace = "containerd"
|
||||
|
||||
// Regexp that identifies containerd cgroups, containers started with
|
||||
// --cgroup-parent have another prefix than 'containerd'
|
||||
var containerdCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`)
|
||||
|
||||
type containerdFactory struct {
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
client ContainerdClient
|
||||
version string
|
||||
// Information about the mounted cgroup subsystems.
|
||||
cgroupSubsystems map[string]string
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
includedMetrics container.MetricSet
|
||||
}
|
||||
|
||||
func (f *containerdFactory) String() string {
|
||||
return k8sContainerdNamespace
|
||||
}
|
||||
|
||||
func (f *containerdFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
|
||||
client, err := Client(*ArgContainerdEndpoint, *ArgContainerdNamespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
containerdMetadataEnvAllowList := strings.Split(*containerdEnvMetadataWhiteList, ",")
|
||||
|
||||
// prefer using the unified metadataEnvAllowList
|
||||
if len(metadataEnvAllowList) != 0 {
|
||||
containerdMetadataEnvAllowList = metadataEnvAllowList
|
||||
}
|
||||
|
||||
return newContainerdContainerHandler(
|
||||
client,
|
||||
name,
|
||||
f.machineInfoFactory,
|
||||
f.fsInfo,
|
||||
f.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
containerdMetadataEnvAllowList,
|
||||
f.includedMetrics,
|
||||
)
|
||||
}
|
||||
|
||||
// Returns the containerd ID from the full container name.
|
||||
func ContainerNameToContainerdID(name string) string {
|
||||
id := path.Base(name)
|
||||
if matches := containerdCgroupRegexp.FindStringSubmatch(id); matches != nil {
|
||||
return matches[1]
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// isContainerName returns true if the cgroup with associated name
|
||||
// corresponds to a containerd container.
|
||||
func isContainerName(name string) bool {
|
||||
// TODO: May be check with HasPrefix ContainerdNamespace
|
||||
if strings.HasSuffix(name, ".mount") {
|
||||
return false
|
||||
}
|
||||
return containerdCgroupRegexp.MatchString(path.Base(name))
|
||||
}
|
||||
|
||||
// Containerd can handle and accept all containerd created containers
|
||||
func (f *containerdFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
// if the container is not associated with containerd, we can't handle it or accept it.
|
||||
if !isContainerName(name) {
|
||||
return false, false, nil
|
||||
}
|
||||
// Check if the container is known to containerd and it is running.
|
||||
id := ContainerNameToContainerdID(name)
|
||||
// If container and task lookup in containerd fails then we assume
|
||||
// that the container state is not known to containerd
|
||||
ctx := context.Background()
|
||||
_, err := f.client.LoadContainer(ctx, id)
|
||||
if err != nil {
|
||||
return false, false, fmt.Errorf("failed to load container: %v", err)
|
||||
}
|
||||
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
func (f *containerdFactory) DebugInfo() map[string][]string {
|
||||
return map[string][]string{}
|
||||
}
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client(*ArgContainerdEndpoint, *ArgContainerdNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create containerd client: %v", err)
|
||||
}
|
||||
|
||||
containerdVersion, err := client.Version(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch containerd client version: %v", err)
|
||||
}
|
||||
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems(includedMetrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
klog.V(1).Infof("Registering containerd factory")
|
||||
f := &containerdFactory{
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
client: client,
|
||||
fsInfo: fsInfo,
|
||||
machineInfoFactory: factory,
|
||||
version: containerdVersion,
|
||||
includedMetrics: includedMetrics,
|
||||
}
|
||||
|
||||
container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
return nil
|
||||
}
|
49
vendor/github.com/google/cadvisor/container/containerd/grpc.go
generated
vendored
Normal file
49
vendor/github.com/google/cadvisor/container/containerd/grpc.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This code has been taken from containerd repo to avoid large library import
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"github.com/google/cadvisor/container/containerd/namespaces"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type namespaceInterceptor struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (ni namespaceInterceptor) unary(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
_, ok := namespaces.Namespace(ctx)
|
||||
if !ok {
|
||||
ctx = namespaces.WithNamespace(ctx, ni.namespace)
|
||||
}
|
||||
return invoker(ctx, method, req, reply, cc, opts...)
|
||||
}
|
||||
|
||||
func (ni namespaceInterceptor) stream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
_, ok := namespaces.Namespace(ctx)
|
||||
if !ok {
|
||||
ctx = namespaces.WithNamespace(ctx, ni.namespace)
|
||||
}
|
||||
return streamer(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
|
||||
func newNSInterceptors(ns string) (grpc.UnaryClientInterceptor, grpc.StreamClientInterceptor) {
|
||||
ni := namespaceInterceptor{
|
||||
namespace: ns,
|
||||
}
|
||||
return grpc.UnaryClientInterceptor(ni.unary), grpc.StreamClientInterceptor(ni.stream)
|
||||
}
|
250
vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
Normal file
250
vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
Normal file
@ -0,0 +1,250 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Handler for containerd containers.
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
containerlibcontainer "github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type containerdContainerHandler struct {
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
fsInfo fs.FsInfo
|
||||
// Metadata associated with the container.
|
||||
reference info.ContainerReference
|
||||
envs map[string]string
|
||||
labels map[string]string
|
||||
// Image name used for this container.
|
||||
image string
|
||||
// Filesystem handler.
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &containerdContainerHandler{}
|
||||
|
||||
// newContainerdContainerHandler returns a new container.ContainerHandler
|
||||
func newContainerdContainerHandler(
|
||||
client ContainerdClient,
|
||||
name string,
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
cgroupSubsystems map[string]string,
|
||||
inHostNamespace bool,
|
||||
metadataEnvAllowList []string,
|
||||
includedMetrics container.MetricSet,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems, name)
|
||||
|
||||
// Generate the equivalent cgroup manager for this container.
|
||||
cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id := ContainerNameToContainerdID(name)
|
||||
// We assume that if load fails then the container is not known to containerd.
|
||||
ctx := context.Background()
|
||||
cntr, err := client.LoadContainer(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var spec specs.Spec
|
||||
if err := json.Unmarshal(cntr.Spec.Value, &spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Cgroup is created during task creation. When cadvisor sees the cgroup,
|
||||
// task may not be fully created yet. Use a retry+backoff to tolerant the
|
||||
// race condition.
|
||||
// TODO(random-liu): Use cri-containerd client to talk with cri-containerd
|
||||
// instead. cri-containerd has some internal synchronization to make sure
|
||||
// `ContainerStatus` only returns result after `StartContainer` finishes.
|
||||
var taskPid uint32
|
||||
backoff := 100 * time.Millisecond
|
||||
retry := 5
|
||||
for {
|
||||
taskPid, err = client.TaskPid(ctx, id)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Retry when task is not created yet or task is in unknown state (likely in process of initializing)
|
||||
isRetriableError := errdefs.IsNotFound(err) || errors.Is(err, ErrTaskIsInUnknownState)
|
||||
if !isRetriableError || retry == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
retry--
|
||||
time.Sleep(backoff)
|
||||
backoff *= 2
|
||||
}
|
||||
|
||||
rootfs := "/"
|
||||
if !inHostNamespace {
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
|
||||
containerReference := info.ContainerReference{
|
||||
Id: id,
|
||||
Name: name,
|
||||
Namespace: k8sContainerdNamespace,
|
||||
Aliases: []string{id, name},
|
||||
}
|
||||
|
||||
// Containers that don't have their own network -- this includes
|
||||
// containers running in Kubernetes pods that use the network of the
|
||||
// infrastructure container -- does not need their stats to be
|
||||
// reported. This stops metrics being reported multiple times for each
|
||||
// container in a pod.
|
||||
metrics := common.RemoveNetMetrics(includedMetrics, cntr.Labels["io.cri-containerd.kind"] != "sandbox")
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), metrics)
|
||||
|
||||
handler := &containerdContainerHandler{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
envs: make(map[string]string),
|
||||
labels: cntr.Labels,
|
||||
includedMetrics: metrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
}
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.image = cntr.Image
|
||||
|
||||
for _, exposedEnv := range metadataEnvAllowList {
|
||||
if exposedEnv == "" {
|
||||
// if no containerdEnvWhitelist provided, len(metadataEnvAllowList) == 1, metadataEnvAllowList[0] == ""
|
||||
continue
|
||||
}
|
||||
|
||||
for _, envVar := range spec.Process.Env {
|
||||
if envVar != "" {
|
||||
splits := strings.SplitN(envVar, "=", 2)
|
||||
if len(splits) == 2 && strings.HasPrefix(splits[0], exposedEnv) {
|
||||
handler.envs[splits[0]] = splits[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
return h.reference, nil
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
// TODO: Since we dont collect disk usage stats for containerd, we set hasFilesystem
|
||||
// to false. Revisit when we support disk usage stats for containerd
|
||||
hasFilesystem := false
|
||||
hasNet := h.includedMetrics.Has(container.NetworkUsageMetrics)
|
||||
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, hasNet, hasFilesystem)
|
||||
spec.Labels = h.labels
|
||||
spec.Envs = h.envs
|
||||
spec.Image = h.image
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := h.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if h.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
stats, err := h.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// Get filesystem stats.
|
||||
err = h.getFsStats(stats)
|
||||
return stats, err
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
return []info.ContainerReference{}, nil
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
var res string
|
||||
if !cgroups.IsCgroup2UnifiedMode() {
|
||||
res = resource
|
||||
}
|
||||
path, ok := h.cgroupPaths[res]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) GetContainerLabels() map[string]string {
|
||||
return h.labels
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return h.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) Exists() bool {
|
||||
return common.CgroupExists(h.cgroupPaths)
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) Type() container.ContainerType {
|
||||
return container.ContainerTypeContainerd
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) Start() {
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) Cleanup() {
|
||||
}
|
||||
|
||||
func (h *containerdContainerHandler) GetContainerIPAddress() string {
|
||||
// containerd doesnt take care of networking.So it doesnt maintain networking states
|
||||
return ""
|
||||
}
|
86
vendor/github.com/google/cadvisor/container/containerd/identifiers/validate.go
generated
vendored
Normal file
86
vendor/github.com/google/cadvisor/container/containerd/identifiers/validate.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package identifiers provides common validation for identifiers and keys
|
||||
// across containerd.
|
||||
//
|
||||
// Identifiers in containerd must be a alphanumeric, allowing limited
|
||||
// underscores, dashes and dots.
|
||||
//
|
||||
// While the character set may be expanded in the future, identifiers
|
||||
// are guaranteed to be safely used as filesystem path components.
|
||||
package identifiers
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
maxLength = 76
|
||||
alphanum = `[A-Za-z0-9]+`
|
||||
separators = `[._-]`
|
||||
)
|
||||
|
||||
var (
|
||||
// identifierRe defines the pattern for valid identifiers.
|
||||
identifierRe = regexp.MustCompile(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*"))
|
||||
)
|
||||
|
||||
// Validate returns nil if the string s is a valid identifier.
|
||||
//
|
||||
// identifiers are similar to the domain name rules according to RFC 1035, section 2.3.1. However
|
||||
// rules in this package are relaxed to allow numerals to follow period (".") and mixed case is
|
||||
// allowed.
|
||||
//
|
||||
// In general identifiers that pass this validation should be safe for use as filesystem path components.
|
||||
func Validate(s string) error {
|
||||
if len(s) == 0 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty")
|
||||
}
|
||||
|
||||
if len(s) > maxLength {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength)
|
||||
}
|
||||
|
||||
if !identifierRe.MatchString(s) {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reGroup(s string) string {
|
||||
return `(?:` + s + `)`
|
||||
}
|
||||
|
||||
func reAnchor(s string) string {
|
||||
return `^` + s + `$`
|
||||
}
|
30
vendor/github.com/google/cadvisor/container/containerd/install/install.go
generated
vendored
Normal file
30
vendor/github.com/google/cadvisor/container/containerd/install/install.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2019 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The install package registers containerd.NewPlugin() as the "containerd" container provider when imported
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/containerd"
|
||||
)
|
||||
|
||||
func init() {
|
||||
err := container.RegisterPlugin("containerd", containerd.NewPlugin())
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to register containerd plugin: %v", err)
|
||||
}
|
||||
}
|
92
vendor/github.com/google/cadvisor/container/containerd/namespaces/context.go
generated
vendored
Normal file
92
vendor/github.com/google/cadvisor/container/containerd/namespaces/context.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/google/cadvisor/container/containerd/identifiers"
|
||||
)
|
||||
|
||||
const (
|
||||
// NamespaceEnvVar is the environment variable key name
|
||||
NamespaceEnvVar = "CONTAINERD_NAMESPACE"
|
||||
// Default is the name of the default namespace
|
||||
Default = "default"
|
||||
)
|
||||
|
||||
type namespaceKey struct{}
|
||||
|
||||
// WithNamespace sets a given namespace on the context
|
||||
func WithNamespace(ctx context.Context, namespace string) context.Context {
|
||||
ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace
|
||||
// also store on the grpc and ttrpc headers so it gets picked up by any clients that
|
||||
// are using this.
|
||||
return withTTRPCNamespaceHeader(withGRPCNamespaceHeader(ctx, namespace), namespace)
|
||||
}
|
||||
|
||||
// NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or
|
||||
// default
|
||||
func NamespaceFromEnv(ctx context.Context) context.Context {
|
||||
namespace := os.Getenv(NamespaceEnvVar)
|
||||
if namespace == "" {
|
||||
namespace = Default
|
||||
}
|
||||
return WithNamespace(ctx, namespace)
|
||||
}
|
||||
|
||||
// Namespace returns the namespace from the context.
|
||||
//
|
||||
// The namespace is not guaranteed to be valid.
|
||||
func Namespace(ctx context.Context) (string, bool) {
|
||||
namespace, ok := ctx.Value(namespaceKey{}).(string)
|
||||
if !ok {
|
||||
if namespace, ok = fromGRPCHeader(ctx); !ok {
|
||||
return fromTTRPCHeader(ctx)
|
||||
}
|
||||
}
|
||||
return namespace, ok
|
||||
}
|
||||
|
||||
// NamespaceRequired returns the valid namespace from the context or an error.
|
||||
func NamespaceRequired(ctx context.Context) (string, error) {
|
||||
namespace, ok := Namespace(ctx)
|
||||
if !ok || namespace == "" {
|
||||
return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required")
|
||||
}
|
||||
if err := identifiers.Validate(namespace); err != nil {
|
||||
return "", errors.Wrap(err, "namespace validation")
|
||||
}
|
||||
return namespace, nil
|
||||
}
|
74
vendor/github.com/google/cadvisor/container/containerd/namespaces/grpc.go
generated
vendored
Normal file
74
vendor/github.com/google/cadvisor/container/containerd/namespaces/grpc.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
// GRPCHeader defines the header name for specifying a containerd namespace.
|
||||
GRPCHeader = "containerd-namespace"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): We can stub this file out if we don't want a grpc dependency here.
|
||||
|
||||
func withGRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
|
||||
// also store on the grpc headers so it gets picked up by any clients that
|
||||
// are using this.
|
||||
nsheader := metadata.Pairs(GRPCHeader, namespace)
|
||||
md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context.
|
||||
if !ok {
|
||||
md = nsheader
|
||||
} else {
|
||||
// order ensures the latest is first in this list.
|
||||
md = metadata.Join(nsheader, md)
|
||||
}
|
||||
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func fromGRPCHeader(ctx context.Context) (string, bool) {
|
||||
// try to extract for use in grpc servers.
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
// TODO(stevvooe): Check outgoing context?
|
||||
return "", false
|
||||
}
|
||||
|
||||
values := md[GRPCHeader]
|
||||
if len(values) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return values[0], true
|
||||
}
|
57
vendor/github.com/google/cadvisor/container/containerd/namespaces/store.go
generated
vendored
Normal file
57
vendor/github.com/google/cadvisor/container/containerd/namespaces/store.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import "context"
|
||||
|
||||
// Store provides introspection about namespaces.
|
||||
//
|
||||
// Note that these are slightly different than other objects, which are record
|
||||
// oriented. A namespace is really just a name and a set of labels. Objects
|
||||
// that belong to a namespace are returned when the namespace is assigned to a
|
||||
// given context.
|
||||
type Store interface {
|
||||
Create(ctx context.Context, namespace string, labels map[string]string) error
|
||||
Labels(ctx context.Context, namespace string) (map[string]string, error)
|
||||
SetLabel(ctx context.Context, namespace, key, value string) error
|
||||
List(ctx context.Context) ([]string, error)
|
||||
|
||||
// Delete removes the namespace. The namespace must be empty to be deleted.
|
||||
Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error
|
||||
}
|
||||
|
||||
// DeleteInfo specifies information for the deletion of a namespace
|
||||
type DeleteInfo struct {
|
||||
// Name of the namespace
|
||||
Name string
|
||||
}
|
||||
|
||||
// DeleteOpts allows the caller to set options for namespace deletion
|
||||
type DeleteOpts func(context.Context, *DeleteInfo) error
|
64
vendor/github.com/google/cadvisor/container/containerd/namespaces/ttrpc.go
generated
vendored
Normal file
64
vendor/github.com/google/cadvisor/container/containerd/namespaces/ttrpc.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/ttrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
// TTRPCHeader defines the header name for specifying a containerd namespace
|
||||
TTRPCHeader = "containerd-namespace-ttrpc"
|
||||
)
|
||||
|
||||
func copyMetadata(src ttrpc.MD) ttrpc.MD {
|
||||
md := ttrpc.MD{}
|
||||
for k, v := range src {
|
||||
md[k] = append(md[k], v...)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
|
||||
md, ok := ttrpc.GetMetadata(ctx)
|
||||
if !ok {
|
||||
md = ttrpc.MD{}
|
||||
} else {
|
||||
md = copyMetadata(md)
|
||||
}
|
||||
md.Set(TTRPCHeader, namespace)
|
||||
return ttrpc.WithMetadata(ctx, md)
|
||||
}
|
||||
|
||||
func fromTTRPCHeader(ctx context.Context) (string, bool) {
|
||||
return ttrpc.GetMetadataValue(ctx, TTRPCHeader)
|
||||
}
|
92
vendor/github.com/google/cadvisor/container/containerd/pkg/dialer/dialer.go
generated
vendored
Normal file
92
vendor/github.com/google/cadvisor/container/containerd/pkg/dialer/dialer.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dialer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dialResult struct {
|
||||
c net.Conn
|
||||
err error
|
||||
}
|
||||
|
||||
// ContextDialer returns a GRPC net.Conn connected to the provided address
|
||||
func ContextDialer(ctx context.Context, address string) (net.Conn, error) {
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
return timeoutDialer(address, time.Until(deadline))
|
||||
}
|
||||
return timeoutDialer(address, 0)
|
||||
}
|
||||
|
||||
// Dialer returns a GRPC net.Conn connected to the provided address
|
||||
// Deprecated: use ContextDialer and grpc.WithContextDialer.
|
||||
var Dialer = timeoutDialer
|
||||
|
||||
func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||
var (
|
||||
stopC = make(chan struct{})
|
||||
synC = make(chan *dialResult)
|
||||
)
|
||||
go func() {
|
||||
defer close(synC)
|
||||
for {
|
||||
select {
|
||||
case <-stopC:
|
||||
return
|
||||
default:
|
||||
c, err := dialer(address, timeout)
|
||||
if isNoent(err) {
|
||||
<-time.After(10 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
synC <- &dialResult{c, err}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case dr := <-synC:
|
||||
return dr.c, dr.err
|
||||
case <-time.After(timeout):
|
||||
close(stopC)
|
||||
go func() {
|
||||
dr := <-synC
|
||||
if dr != nil && dr.c != nil {
|
||||
dr.c.Close()
|
||||
}
|
||||
}()
|
||||
return nil, errors.Errorf("dial %s: timeout", address)
|
||||
}
|
||||
}
|
66
vendor/github.com/google/cadvisor/container/containerd/pkg/dialer/dialer_unix.go
generated
vendored
Normal file
66
vendor/github.com/google/cadvisor/container/containerd/pkg/dialer/dialer_unix.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dialer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DialAddress returns the address with unix:// prepended to the
|
||||
// provided address
|
||||
func DialAddress(address string) string {
|
||||
return fmt.Sprintf("unix://%s", address)
|
||||
}
|
||||
|
||||
func isNoent(err error) bool {
|
||||
if err != nil {
|
||||
if nerr, ok := err.(*net.OpError); ok {
|
||||
if serr, ok := nerr.Err.(*os.SyscallError); ok {
|
||||
if serr.Err == syscall.ENOENT {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||
address = strings.TrimPrefix(address, "unix://")
|
||||
return net.DialTimeout("unix", address, timeout)
|
||||
}
|
51
vendor/github.com/google/cadvisor/container/containerd/pkg/dialer/dialer_windows.go
generated
vendored
Normal file
51
vendor/github.com/google/cadvisor/container/containerd/pkg/dialer/dialer_windows.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dialer
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
func isNoent(err error) bool {
|
||||
return os.IsNotExist(err)
|
||||
}
|
||||
|
||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||
return winio.DialPipe(address, &timeout)
|
||||
}
|
||||
|
||||
// DialAddress returns the dial address
|
||||
func DialAddress(address string) string {
|
||||
return address
|
||||
}
|
38
vendor/github.com/google/cadvisor/container/containerd/plugin.go
generated
vendored
Normal file
38
vendor/github.com/google/cadvisor/container/containerd/plugin.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2019 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
)
|
||||
|
||||
// NewPlugin returns an implementation of container.Plugin suitable for passing to container.RegisterPlugin()
|
||||
func NewPlugin() container.Plugin {
|
||||
return &plugin{}
|
||||
}
|
||||
|
||||
type plugin struct{}
|
||||
|
||||
func (p *plugin) InitializeFSContext(context *fs.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *plugin) Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) (watcher.ContainerWatcher, error) {
|
||||
err := Register(factory, fsInfo, includedMetrics)
|
||||
return nil, err
|
||||
}
|
167
vendor/github.com/google/cadvisor/container/crio/client.go
generated
vendored
Normal file
167
vendor/github.com/google/cadvisor/container/crio/client.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var crioClientTimeout = flag.Duration("crio_client_timeout", time.Duration(0), "CRI-O client timeout. Default is no timeout.")
|
||||
|
||||
const (
|
||||
CrioSocket = "/var/run/crio/crio.sock"
|
||||
maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
|
||||
)
|
||||
|
||||
var (
|
||||
theClient CrioClient
|
||||
clientErr error
|
||||
crioClientOnce sync.Once
|
||||
)
|
||||
|
||||
// Info represents CRI-O information as sent by the CRI-O server
|
||||
type Info struct {
|
||||
StorageDriver string `json:"storage_driver"`
|
||||
StorageRoot string `json:"storage_root"`
|
||||
StorageImage string `json:"storage_image"`
|
||||
}
|
||||
|
||||
// ContainerInfo represents a given container information
|
||||
type ContainerInfo struct {
|
||||
Name string `json:"name"`
|
||||
Pid int `json:"pid"`
|
||||
Image string `json:"image"`
|
||||
CreatedTime int64 `json:"created_time"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
LogPath string `json:"log_path"`
|
||||
Root string `json:"root"`
|
||||
IP string `json:"ip_address"`
|
||||
IPs []string `json:"ip_addresses"`
|
||||
}
|
||||
|
||||
type CrioClient interface {
|
||||
Info() (Info, error)
|
||||
ContainerInfo(string) (*ContainerInfo, error)
|
||||
}
|
||||
|
||||
type crioClientImpl struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func configureUnixTransport(tr *http.Transport, proto, addr string) error {
|
||||
if len(addr) > maxUnixSocketPathSize {
|
||||
return fmt.Errorf("Unix socket path %q is too long", addr)
|
||||
}
|
||||
// No need for compression in local communications.
|
||||
tr.DisableCompression = true
|
||||
tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.DialTimeout(proto, addr, 32*time.Second)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Client returns a new configured CRI-O client
|
||||
func Client() (CrioClient, error) {
|
||||
crioClientOnce.Do(func() {
|
||||
tr := new(http.Transport)
|
||||
theClient = nil
|
||||
if clientErr = configureUnixTransport(tr, "unix", CrioSocket); clientErr != nil {
|
||||
return
|
||||
}
|
||||
theClient = &crioClientImpl{
|
||||
client: &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: *crioClientTimeout,
|
||||
},
|
||||
}
|
||||
})
|
||||
return theClient, clientErr
|
||||
}
|
||||
|
||||
func getRequest(path string) (*http.Request, error) {
|
||||
req, err := http.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// For local communications over a unix socket, it doesn't matter what
|
||||
// the host is. We just need a valid and meaningful host name.
|
||||
req.Host = "crio"
|
||||
req.URL.Host = CrioSocket
|
||||
req.URL.Scheme = "http"
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Info returns generic info from the CRI-O server
|
||||
func (c *crioClientImpl) Info() (Info, error) {
|
||||
info := Info{}
|
||||
req, err := getRequest("/info")
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
|
||||
return info, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// ContainerInfo returns information about a given container
|
||||
func (c *crioClientImpl) ContainerInfo(id string) (*ContainerInfo, error) {
|
||||
req, err := getRequest("/containers/" + id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cInfo := ContainerInfo{}
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// golang's http.Do doesn't return an error if non 200 response code is returned
|
||||
// handle this case here, rather than failing to decode the body
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error finding container %s: Status %d", id, resp.StatusCode)
|
||||
}
|
||||
return nil, fmt.Errorf("Error finding container %s: Status %d returned error %s", id, resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&cInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cInfo.IP) > 0 {
|
||||
return &cInfo, nil
|
||||
}
|
||||
if len(cInfo.IPs) > 0 {
|
||||
cInfo.IP = cInfo.IPs[0]
|
||||
}
|
||||
return &cInfo, nil
|
||||
}
|
166
vendor/github.com/google/cadvisor/container/crio/factory.go
generated
vendored
Normal file
166
vendor/github.com/google/cadvisor/container/crio/factory.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// The namespace under which crio aliases are unique.
|
||||
const CrioNamespace = "crio"
|
||||
|
||||
// The namespace systemd runs components under.
|
||||
const SystemdNamespace = "system-systemd"
|
||||
|
||||
// Regexp that identifies CRI-O cgroups
|
||||
var crioCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`)
|
||||
|
||||
type storageDriver string
|
||||
|
||||
const (
|
||||
// TODO add full set of supported drivers in future..
|
||||
overlayStorageDriver storageDriver = "overlay"
|
||||
overlay2StorageDriver storageDriver = "overlay2"
|
||||
)
|
||||
|
||||
type crioFactory struct {
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
storageDriver storageDriver
|
||||
storageDir string
|
||||
|
||||
// Information about the mounted cgroup subsystems.
|
||||
cgroupSubsystems map[string]string
|
||||
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
client CrioClient
|
||||
}
|
||||
|
||||
func (f *crioFactory) String() string {
|
||||
return CrioNamespace
|
||||
}
|
||||
|
||||
func (f *crioFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
handler, err = newCrioContainerHandler(
|
||||
client,
|
||||
name,
|
||||
f.machineInfoFactory,
|
||||
f.fsInfo,
|
||||
f.storageDriver,
|
||||
f.storageDir,
|
||||
f.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
metadataEnvAllowList,
|
||||
f.includedMetrics,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the CRIO ID from the full container name.
|
||||
func ContainerNameToCrioId(name string) string {
|
||||
id := path.Base(name)
|
||||
|
||||
if matches := crioCgroupRegexp.FindStringSubmatch(id); matches != nil {
|
||||
return matches[1]
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
// isContainerName returns true if the cgroup with associated name
|
||||
// corresponds to a crio container.
|
||||
func isContainerName(name string) bool {
|
||||
// always ignore .mount cgroup even if associated with crio and delegate to systemd
|
||||
if strings.HasSuffix(name, ".mount") {
|
||||
return false
|
||||
}
|
||||
return crioCgroupRegexp.MatchString(path.Base(name))
|
||||
}
|
||||
|
||||
// crio handles all containers under /crio
|
||||
func (f *crioFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
if strings.HasPrefix(path.Base(name), "crio-conmon") {
|
||||
// TODO(runcom): should we include crio-conmon cgroups?
|
||||
return false, false, nil
|
||||
}
|
||||
if !strings.HasPrefix(path.Base(name), CrioNamespace) {
|
||||
return false, false, nil
|
||||
}
|
||||
if strings.HasPrefix(path.Base(name), SystemdNamespace) {
|
||||
return true, false, nil
|
||||
}
|
||||
// if the container is not associated with CRI-O, we can't handle it or accept it.
|
||||
if !isContainerName(name) {
|
||||
return false, false, nil
|
||||
}
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
func (f *crioFactory) DebugInfo() map[string][]string {
|
||||
return map[string][]string{}
|
||||
}
|
||||
|
||||
// Register root container before running this function!
|
||||
func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
client, err := Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := client.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO determine crio version so we can work differently w/ future versions if needed
|
||||
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems(includedMetrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
klog.V(1).Infof("Registering CRI-O factory")
|
||||
f := &crioFactory{
|
||||
client: client,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
fsInfo: fsInfo,
|
||||
machineInfoFactory: factory,
|
||||
storageDriver: storageDriver(info.StorageDriver),
|
||||
storageDir: info.StorageRoot,
|
||||
includedMetrics: includedMetrics,
|
||||
}
|
||||
|
||||
container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
return nil
|
||||
}
|
362
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
Normal file
362
vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Handler for CRI-O containers.
|
||||
package crio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
containerlibcontainer "github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type crioContainerHandler struct {
|
||||
client CrioClient
|
||||
name string
|
||||
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// the CRI-O storage driver
|
||||
storageDriver storageDriver
|
||||
fsInfo fs.FsInfo
|
||||
rootfsStorageDir string
|
||||
|
||||
// Metadata associated with the container.
|
||||
envs map[string]string
|
||||
labels map[string]string
|
||||
|
||||
// TODO
|
||||
// crio version handling...
|
||||
|
||||
// Image name used for this container.
|
||||
image string
|
||||
|
||||
// The network mode of the container
|
||||
// TODO
|
||||
|
||||
// Filesystem handler.
|
||||
fsHandler common.FsHandler
|
||||
|
||||
// The IP address of the container
|
||||
ipAddress string
|
||||
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
reference info.ContainerReference
|
||||
|
||||
libcontainerHandler *containerlibcontainer.Handler
|
||||
cgroupManager cgroups.Manager
|
||||
rootFs string
|
||||
pidKnown bool
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &crioContainerHandler{}
|
||||
|
||||
// newCrioContainerHandler returns a new container.ContainerHandler
|
||||
func newCrioContainerHandler(
|
||||
client CrioClient,
|
||||
name string,
|
||||
machineInfoFactory info.MachineInfoFactory,
|
||||
fsInfo fs.FsInfo,
|
||||
storageDriver storageDriver,
|
||||
storageDir string,
|
||||
cgroupSubsystems map[string]string,
|
||||
inHostNamespace bool,
|
||||
metadataEnvAllowList []string,
|
||||
includedMetrics container.MetricSet,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems, name)
|
||||
|
||||
// Generate the equivalent cgroup manager for this container.
|
||||
cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
|
||||
id := ContainerNameToCrioId(name)
|
||||
pidKnown := true
|
||||
|
||||
cInfo, err := client.ContainerInfo(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cInfo.Pid == 0 {
|
||||
// If pid is not known yet, network related stats can not be retrieved by the
|
||||
// libcontainer handler GetStats(). In this case, the crio handler GetStats()
|
||||
// will reattempt to get the pid and, if now known, will construct the libcontainer
|
||||
// handler. This libcontainer handler is then cached and reused without additional
|
||||
// calls to crio.
|
||||
pidKnown = false
|
||||
}
|
||||
|
||||
// passed to fs handler below ...
|
||||
// XXX: this is using the full container logpath, as constructed by the CRI
|
||||
// /var/log/pods/<pod_uuid>/container_instance.log
|
||||
// It's not actually a log dir, as the CRI doesn't have per-container dirs
|
||||
// under /var/log/pods/<pod_uuid>/
|
||||
// We can't use /var/log/pods/<pod_uuid>/ to count per-container log usage.
|
||||
// We use the container log file directly.
|
||||
storageLogDir := cInfo.LogPath
|
||||
|
||||
// Determine the rootfs storage dir
|
||||
rootfsStorageDir := cInfo.Root
|
||||
// TODO(runcom): CRI-O doesn't strip /merged but we need to in order to
|
||||
// get device ID from root, otherwise, it's going to error out as overlay
|
||||
// mounts doesn't have fixed dev ids.
|
||||
rootfsStorageDir = strings.TrimSuffix(rootfsStorageDir, "/merged")
|
||||
switch storageDriver {
|
||||
case overlayStorageDriver, overlay2StorageDriver:
|
||||
// overlay and overlay2 driver are the same "overlay2" driver so treat
|
||||
// them the same.
|
||||
rootfsStorageDir = filepath.Join(rootfsStorageDir, "diff")
|
||||
}
|
||||
|
||||
containerReference := info.ContainerReference{
|
||||
Id: id,
|
||||
Name: name,
|
||||
Aliases: []string{cInfo.Name, id},
|
||||
Namespace: CrioNamespace,
|
||||
}
|
||||
|
||||
// Find out if we need network metrics reported for this container.
|
||||
// Containers that don't have their own network -- this includes
|
||||
// containers running in Kubernetes pods that use the network of the
|
||||
// infrastructure container -- does not need their stats to be
|
||||
// reported. This stops metrics being reported multiple times for each
|
||||
// container in a pod.
|
||||
metrics := common.RemoveNetMetrics(includedMetrics, cInfo.Labels["io.kubernetes.container.name"] != "POD")
|
||||
|
||||
libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, metrics)
|
||||
|
||||
// TODO: extract object mother method
|
||||
handler := &crioContainerHandler{
|
||||
client: client,
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
labels: cInfo.Labels,
|
||||
includedMetrics: metrics,
|
||||
reference: containerReference,
|
||||
libcontainerHandler: libcontainerHandler,
|
||||
cgroupManager: cgroupManager,
|
||||
rootFs: rootFs,
|
||||
pidKnown: pidKnown,
|
||||
}
|
||||
|
||||
handler.image = cInfo.Image
|
||||
// TODO: we wantd to know graph driver DeviceId (dont think this is needed now)
|
||||
|
||||
// ignore err and get zero as default, this happens with sandboxes, not sure why...
|
||||
// kube isn't sending restart count in labels for sandboxes.
|
||||
restartCount, _ := strconv.Atoi(cInfo.Annotations["io.kubernetes.container.restartCount"])
|
||||
// Only adds restartcount label if it's greater than 0
|
||||
if restartCount > 0 {
|
||||
handler.labels["restartcount"] = strconv.Itoa(restartCount)
|
||||
}
|
||||
|
||||
handler.ipAddress = cInfo.IP
|
||||
|
||||
// we optionally collect disk usage metrics
|
||||
if includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, storageLogDir, fsInfo)
|
||||
}
|
||||
// TODO for env vars we wanted to show from container.Config.Env from whitelist
|
||||
//for _, exposedEnv := range metadataEnvAllowList {
|
||||
//klog.V(4).Infof("TODO env whitelist: %v", exposedEnv)
|
||||
//}
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) Start() {
|
||||
if h.fsHandler != nil {
|
||||
h.fsHandler.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) Cleanup() {
|
||||
if h.fsHandler != nil {
|
||||
h.fsHandler.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
return h.reference, nil
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
hasFilesystem := h.includedMetrics.Has(container.DiskUsageMetrics)
|
||||
hasNet := h.includedMetrics.Has(container.NetworkUsageMetrics)
|
||||
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, hasNet, hasFilesystem)
|
||||
|
||||
spec.Labels = h.labels
|
||||
spec.Envs = h.envs
|
||||
spec.Image = h.image
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := h.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if h.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if !h.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
var device string
|
||||
switch h.storageDriver {
|
||||
case overlay2StorageDriver, overlayStorageDriver:
|
||||
deviceInfo, err := h.fsInfo.GetDirFsDevice(h.rootfsStorageDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine device info for dir: %v: %v", h.rootfsStorageDir, err)
|
||||
}
|
||||
device = deviceInfo.Device
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
limit uint64
|
||||
fsType string
|
||||
)
|
||||
|
||||
// crio does not impose any filesystem limits for containers. So use capacity as limit.
|
||||
for _, fs := range mi.Filesystems {
|
||||
if fs.Device == device {
|
||||
limit = fs.Capacity
|
||||
fsType = fs.Type
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fsType == "" {
|
||||
return fmt.Errorf("unable to determine fs type for device: %v", device)
|
||||
}
|
||||
fsStat := info.FsStats{Device: device, Type: fsType, Limit: limit}
|
||||
usage := h.fsHandler.Usage()
|
||||
fsStat.BaseUsage = usage.BaseUsageBytes
|
||||
fsStat.Usage = usage.TotalUsageBytes
|
||||
fsStat.Inodes = usage.InodeUsage
|
||||
|
||||
stats.Filesystem = append(stats.Filesystem, fsStat)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) getLibcontainerHandler() *containerlibcontainer.Handler {
|
||||
if h.pidKnown {
|
||||
return h.libcontainerHandler
|
||||
}
|
||||
|
||||
id := ContainerNameToCrioId(h.name)
|
||||
|
||||
cInfo, err := h.client.ContainerInfo(id)
|
||||
if err != nil || cInfo.Pid == 0 {
|
||||
return h.libcontainerHandler
|
||||
}
|
||||
|
||||
h.pidKnown = true
|
||||
h.libcontainerHandler = containerlibcontainer.NewHandler(h.cgroupManager, h.rootFs, cInfo.Pid, h.includedMetrics)
|
||||
|
||||
return h.libcontainerHandler
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
libcontainerHandler := h.getLibcontainerHandler()
|
||||
stats, err := libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
if h.includedMetrics.Has(container.NetworkUsageMetrics) && len(stats.Network.Interfaces) == 0 {
|
||||
// No network related information indicates that the pid of the
|
||||
// container is not longer valid and we need to ask crio to
|
||||
// provide the pid of another container from that pod
|
||||
h.pidKnown = false
|
||||
return stats, nil
|
||||
}
|
||||
// Get filesystem stats.
|
||||
err = h.getFsStats(stats)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
// No-op for Docker driver.
|
||||
return []info.ContainerReference{}, nil
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
var res string
|
||||
if !cgroups.IsCgroup2UnifiedMode() {
|
||||
res = resource
|
||||
}
|
||||
path, ok := h.cgroupPaths[res]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) GetContainerLabels() map[string]string {
|
||||
return h.labels
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) GetContainerIPAddress() string {
|
||||
return h.ipAddress
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return h.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) Exists() bool {
|
||||
return common.CgroupExists(h.cgroupPaths)
|
||||
}
|
||||
|
||||
func (h *crioContainerHandler) Type() container.ContainerType {
|
||||
return container.ContainerTypeCrio
|
||||
}
|
30
vendor/github.com/google/cadvisor/container/crio/install/install.go
generated
vendored
Normal file
30
vendor/github.com/google/cadvisor/container/crio/install/install.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2019 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The install package registers crio.NewPlugin() as the "crio" container provider when imported
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/crio"
|
||||
)
|
||||
|
||||
func init() {
|
||||
err := container.RegisterPlugin("crio", crio.NewPlugin())
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to register crio plugin: %v", err)
|
||||
}
|
||||
}
|
51
vendor/github.com/google/cadvisor/container/crio/plugin.go
generated
vendored
Normal file
51
vendor/github.com/google/cadvisor/container/crio/plugin.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2019 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crio
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
)
|
||||
|
||||
// NewPlugin returns an implementation of container.Plugin suitable for passing to container.RegisterPlugin()
|
||||
func NewPlugin() container.Plugin {
|
||||
return &plugin{}
|
||||
}
|
||||
|
||||
type plugin struct{}
|
||||
|
||||
func (p *plugin) InitializeFSContext(context *fs.Context) error {
|
||||
crioClient, err := Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
crioInfo, err := crioClient.Info()
|
||||
if err != nil {
|
||||
klog.V(5).Infof("CRI-O not connected: %v", err)
|
||||
} else {
|
||||
context.Crio = fs.CrioContext{Root: crioInfo.StorageRoot, ImageStore: crioInfo.StorageImage, Driver: crioInfo.StorageDriver}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *plugin) Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) (watcher.ContainerWatcher, error) {
|
||||
err := Register(factory, fsInfo, includedMetrics)
|
||||
return nil, err
|
||||
}
|
330
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
Normal file
330
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type ContainerHandlerFactory interface {
|
||||
// Create a new ContainerHandler using this factory. CanHandleAndAccept() must have returned true.
|
||||
NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (c ContainerHandler, err error)
|
||||
|
||||
// Returns whether this factory can handle and accept the specified container.
|
||||
CanHandleAndAccept(name string) (handle bool, accept bool, err error)
|
||||
|
||||
// Name of the factory.
|
||||
String() string
|
||||
|
||||
// Returns debugging information. Map of lines per category.
|
||||
DebugInfo() map[string][]string
|
||||
}
|
||||
|
||||
// MetricKind represents the kind of metrics that cAdvisor exposes.
|
||||
type MetricKind string
|
||||
|
||||
const (
|
||||
CpuUsageMetrics MetricKind = "cpu"
|
||||
ProcessSchedulerMetrics MetricKind = "sched"
|
||||
PerCpuUsageMetrics MetricKind = "percpu"
|
||||
MemoryUsageMetrics MetricKind = "memory"
|
||||
MemoryNumaMetrics MetricKind = "memory_numa"
|
||||
CpuLoadMetrics MetricKind = "cpuLoad"
|
||||
DiskIOMetrics MetricKind = "diskIO"
|
||||
DiskUsageMetrics MetricKind = "disk"
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkAdvancedTcpUsageMetrics MetricKind = "advtcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AppMetrics MetricKind = "app"
|
||||
ProcessMetrics MetricKind = "process"
|
||||
HugetlbUsageMetrics MetricKind = "hugetlb"
|
||||
PerfMetrics MetricKind = "perf_event"
|
||||
ReferencedMemoryMetrics MetricKind = "referenced_memory"
|
||||
CPUTopologyMetrics MetricKind = "cpu_topology"
|
||||
ResctrlMetrics MetricKind = "resctrl"
|
||||
CPUSetMetrics MetricKind = "cpuset"
|
||||
OOMMetrics MetricKind = "oom_event"
|
||||
)
|
||||
|
||||
// AllMetrics represents all kinds of metrics that cAdvisor supported.
|
||||
var AllMetrics = MetricSet{
|
||||
CpuUsageMetrics: struct{}{},
|
||||
ProcessSchedulerMetrics: struct{}{},
|
||||
PerCpuUsageMetrics: struct{}{},
|
||||
MemoryUsageMetrics: struct{}{},
|
||||
MemoryNumaMetrics: struct{}{},
|
||||
CpuLoadMetrics: struct{}{},
|
||||
DiskIOMetrics: struct{}{},
|
||||
DiskUsageMetrics: struct{}{},
|
||||
NetworkUsageMetrics: struct{}{},
|
||||
NetworkTcpUsageMetrics: struct{}{},
|
||||
NetworkAdvancedTcpUsageMetrics: struct{}{},
|
||||
NetworkUdpUsageMetrics: struct{}{},
|
||||
ProcessMetrics: struct{}{},
|
||||
AppMetrics: struct{}{},
|
||||
HugetlbUsageMetrics: struct{}{},
|
||||
PerfMetrics: struct{}{},
|
||||
ReferencedMemoryMetrics: struct{}{},
|
||||
CPUTopologyMetrics: struct{}{},
|
||||
ResctrlMetrics: struct{}{},
|
||||
CPUSetMetrics: struct{}{},
|
||||
OOMMetrics: struct{}{},
|
||||
}
|
||||
|
||||
// AllNetworkMetrics represents all network metrics that cAdvisor supports.
|
||||
var AllNetworkMetrics = MetricSet{
|
||||
NetworkUsageMetrics: struct{}{},
|
||||
NetworkTcpUsageMetrics: struct{}{},
|
||||
NetworkAdvancedTcpUsageMetrics: struct{}{},
|
||||
NetworkUdpUsageMetrics: struct{}{},
|
||||
}
|
||||
|
||||
func (mk MetricKind) String() string {
|
||||
return string(mk)
|
||||
}
|
||||
|
||||
type MetricSet map[MetricKind]struct{}
|
||||
|
||||
func (ms MetricSet) Has(mk MetricKind) bool {
|
||||
_, exists := ms[mk]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (ms MetricSet) HasAny(ms1 MetricSet) bool {
|
||||
for m := range ms1 {
|
||||
if _, ok := ms[m]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ms MetricSet) add(mk MetricKind) {
|
||||
ms[mk] = struct{}{}
|
||||
}
|
||||
|
||||
func (ms MetricSet) String() string {
|
||||
values := make([]string, 0, len(ms))
|
||||
for metric := range ms {
|
||||
values = append(values, string(metric))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return strings.Join(values, ",")
|
||||
}
|
||||
|
||||
// Not thread-safe, exported only for https://pkg.go.dev/flag#Value
|
||||
func (ms *MetricSet) Set(value string) error {
|
||||
*ms = MetricSet{}
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
for _, metric := range strings.Split(value, ",") {
|
||||
if AllMetrics.Has(MetricKind(metric)) {
|
||||
(*ms).add(MetricKind(metric))
|
||||
} else {
|
||||
return fmt.Errorf("unsupported metric %q specified", metric)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms MetricSet) Difference(ms1 MetricSet) MetricSet {
|
||||
result := MetricSet{}
|
||||
for kind := range ms {
|
||||
if !ms1.Has(kind) {
|
||||
result.add(kind)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (ms MetricSet) Append(ms1 MetricSet) MetricSet {
|
||||
result := ms
|
||||
for kind := range ms1 {
|
||||
if !ms.Has(kind) {
|
||||
result.add(kind)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// All registered auth provider plugins.
|
||||
var pluginsLock sync.Mutex
|
||||
var plugins = make(map[string]Plugin)
|
||||
|
||||
type Plugin interface {
|
||||
// InitializeFSContext is invoked when populating an fs.Context object for a new manager.
|
||||
// A returned error here is fatal.
|
||||
InitializeFSContext(context *fs.Context) error
|
||||
|
||||
// Register is invoked when starting a manager. It can optionally return a container watcher.
|
||||
// A returned error is logged, but is not fatal.
|
||||
Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics MetricSet) (watcher.ContainerWatcher, error)
|
||||
}
|
||||
|
||||
func RegisterPlugin(name string, plugin Plugin) error {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
if _, found := plugins[name]; found {
|
||||
return fmt.Errorf("Plugin %q was registered twice", name)
|
||||
}
|
||||
klog.V(4).Infof("Registered Plugin %q", name)
|
||||
plugins[name] = plugin
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitializeFSContext(context *fs.Context) error {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
for name, plugin := range plugins {
|
||||
err := plugin.InitializeFSContext(context)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("Initialization of the %s context failed: %v", name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitializePlugins(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics MetricSet) []watcher.ContainerWatcher {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
|
||||
containerWatchers := []watcher.ContainerWatcher{}
|
||||
for name, plugin := range plugins {
|
||||
watcher, err := plugin.Register(factory, fsInfo, includedMetrics)
|
||||
if err != nil {
|
||||
klog.Infof("Registration of the %s container factory failed: %v", name, err)
|
||||
} else {
|
||||
klog.Infof("Registration of the %s container factory successfully", name)
|
||||
}
|
||||
if watcher != nil {
|
||||
containerWatchers = append(containerWatchers, watcher)
|
||||
}
|
||||
}
|
||||
return containerWatchers
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Consider not making this global.
|
||||
// Global list of factories.
|
||||
var (
|
||||
factories = map[watcher.ContainerWatchSource][]ContainerHandlerFactory{}
|
||||
factoriesLock sync.RWMutex
|
||||
)
|
||||
|
||||
// Register a ContainerHandlerFactory. These should be registered from least general to most general
|
||||
// as they will be asked in order whether they can handle a particular container.
|
||||
func RegisterContainerHandlerFactory(factory ContainerHandlerFactory, watchTypes []watcher.ContainerWatchSource) {
|
||||
factoriesLock.Lock()
|
||||
defer factoriesLock.Unlock()
|
||||
|
||||
for _, watchType := range watchTypes {
|
||||
factories[watchType] = append(factories[watchType], factory)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns whether there are any container handler factories registered.
|
||||
func HasFactories() bool {
|
||||
factoriesLock.Lock()
|
||||
defer factoriesLock.Unlock()
|
||||
|
||||
return len(factories) != 0
|
||||
}
|
||||
|
||||
// Create a new ContainerHandler for the specified container.
|
||||
func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, metadataEnvAllowList []string, inHostNamespace bool) (ContainerHandler, bool, error) {
|
||||
factoriesLock.RLock()
|
||||
defer factoriesLock.RUnlock()
|
||||
|
||||
// Create the ContainerHandler with the first factory that supports it.
|
||||
// Note that since RawContainerHandler can support a wide range of paths,
|
||||
// it's evaluated last just to make sure if any other ContainerHandler
|
||||
// can support it.
|
||||
for _, factory := range GetReorderedFactoryList(watchType) {
|
||||
canHandle, canAccept, err := factory.CanHandleAndAccept(name)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err)
|
||||
}
|
||||
if canHandle {
|
||||
if !canAccept {
|
||||
klog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name)
|
||||
return nil, false, nil
|
||||
}
|
||||
klog.V(3).Infof("Using factory %q for container %q", factory, name)
|
||||
handle, err := factory.NewContainerHandler(name, metadataEnvAllowList, inHostNamespace)
|
||||
return handle, canAccept, err
|
||||
}
|
||||
klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("no known factory can handle creation of container")
|
||||
}
|
||||
|
||||
// Clear the known factories.
|
||||
func ClearContainerHandlerFactories() {
|
||||
factoriesLock.Lock()
|
||||
defer factoriesLock.Unlock()
|
||||
|
||||
factories = map[watcher.ContainerWatchSource][]ContainerHandlerFactory{}
|
||||
}
|
||||
|
||||
func DebugInfo() map[string][]string {
|
||||
factoriesLock.RLock()
|
||||
defer factoriesLock.RUnlock()
|
||||
|
||||
// Get debug information for all factories.
|
||||
out := make(map[string][]string)
|
||||
for _, factoriesSlice := range factories {
|
||||
for _, factory := range factoriesSlice {
|
||||
for k, v := range factory.DebugInfo() {
|
||||
out[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// GetReorderedFactoryList returns the list of ContainerHandlerFactory where the
|
||||
// RawContainerHandler is always the last element.
|
||||
func GetReorderedFactoryList(watchType watcher.ContainerWatchSource) []ContainerHandlerFactory {
|
||||
ContainerHandlerFactoryList := make([]ContainerHandlerFactory, 0, len(factories))
|
||||
|
||||
var rawFactory ContainerHandlerFactory
|
||||
for _, v := range factories[watchType] {
|
||||
if v != nil {
|
||||
if v.String() == "raw" {
|
||||
rawFactory = v
|
||||
continue
|
||||
}
|
||||
ContainerHandlerFactoryList = append(ContainerHandlerFactoryList, v)
|
||||
}
|
||||
}
|
||||
|
||||
if rawFactory != nil {
|
||||
ContainerHandlerFactoryList = append(ContainerHandlerFactoryList, rawFactory)
|
||||
}
|
||||
|
||||
return ContainerHandlerFactoryList
|
||||
}
|
917
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
Normal file
917
vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
Normal file
@ -0,0 +1,917 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
referencedResetInterval = flag.Uint64("referenced_reset_interval", 0,
|
||||
"Reset interval for referenced bytes (container_referenced_bytes metric), number of measurement cycles after which referenced bytes are cleared, if set to 0 referenced bytes are never cleared (default: 0)")
|
||||
|
||||
smapsFilePathPattern = "/proc/%d/smaps"
|
||||
clearRefsFilePathPattern = "/proc/%d/clear_refs"
|
||||
|
||||
referencedRegexp = regexp.MustCompile(`Referenced:\s*([0-9]+)\s*kB`)
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
cgroupManager cgroups.Manager
|
||||
rootFs string
|
||||
pid int
|
||||
includedMetrics container.MetricSet
|
||||
// pidMetricsCache holds CPU scheduler stats for existing processes (map key is PID) between calls to schedulerStatsFromProcs.
|
||||
pidMetricsCache map[int]*info.CpuSchedstat
|
||||
// pidMetricsSaved holds accumulated CPU scheduler stats for processes that no longer exist.
|
||||
pidMetricsSaved info.CpuSchedstat
|
||||
cycles uint64
|
||||
}
|
||||
|
||||
func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedMetrics container.MetricSet) *Handler {
|
||||
return &Handler{
|
||||
cgroupManager: cgroupManager,
|
||||
rootFs: rootFs,
|
||||
pid: pid,
|
||||
includedMetrics: includedMetrics,
|
||||
pidMetricsCache: make(map[int]*info.CpuSchedstat),
|
||||
}
|
||||
}
|
||||
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func (h *Handler) GetStats() (*info.ContainerStats, error) {
|
||||
ignoreStatsError := false
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
// On cgroup v2 the root cgroup stats have been introduced in recent kernel versions,
|
||||
// so not all kernel versions have all the data. This means that stat fetching can fail
|
||||
// due to lacking cgroup stat files, but that some data is provided.
|
||||
if h.cgroupManager.Path("") == fs2.UnifiedMountpoint {
|
||||
ignoreStatsError = true
|
||||
}
|
||||
}
|
||||
|
||||
cgroupStats, err := h.cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
if !ignoreStatsError {
|
||||
return nil, err
|
||||
}
|
||||
klog.V(4).Infof("Ignoring errors when gathering stats for root cgroup since some controllers don't have stats on the root cgroup: %v", err)
|
||||
}
|
||||
stats := newContainerStats(cgroupStats, h.includedMetrics)
|
||||
|
||||
if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
|
||||
stats.Cpu.Schedstat, err = h.schedulerStatsFromProcs()
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if h.includedMetrics.Has(container.ReferencedMemoryMetrics) {
|
||||
h.cycles++
|
||||
pids, err := h.cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.ReferencedMemory, err = referencedBytesStat(pids, h.cycles, *referencedResetInterval)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get referenced bytes: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
||||
if h.pid > 0 {
|
||||
if h.includedMetrics.Has(container.NetworkUsageMetrics) {
|
||||
netStats, err := networkStatsFromProc(h.rootFs, h.pid)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
}
|
||||
if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) {
|
||||
t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp = t
|
||||
}
|
||||
|
||||
t6, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp6")
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
|
||||
}
|
||||
if h.includedMetrics.Has(container.NetworkAdvancedTcpUsageMetrics) {
|
||||
ta, err := advancedTCPStatsFromProc(h.rootFs, h.pid, "net/netstat", "net/snmp")
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get advanced tcp stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.TcpAdvanced = ta
|
||||
}
|
||||
}
|
||||
if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Udp = u
|
||||
}
|
||||
|
||||
u6, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp6")
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
|
||||
} else {
|
||||
stats.Network.Udp6 = u6
|
||||
}
|
||||
}
|
||||
}
|
||||
// some process metrics are per container ( number of processes, number of
|
||||
// file descriptors etc.) and not required a proper container's
|
||||
// root PID (systemd services don't have the root PID atm)
|
||||
if h.includedMetrics.Has(container.ProcessMetrics) {
|
||||
path, ok := common.GetControllerPath(h.cgroupManager.GetPaths(), "cpu", cgroups.IsCgroup2UnifiedMode())
|
||||
if !ok {
|
||||
klog.V(4).Infof("Could not find cgroups CPU for container %d", h.pid)
|
||||
} else {
|
||||
stats.Processes, err = processStatsFromProcs(h.rootFs, path, h.pid)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get Process Stats: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if include processes metrics, just set threads metrics if exist, and has no relationship with cpu path
|
||||
setThreadsStats(cgroupStats, stats)
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
stats.Network.InterfaceStats = stats.Network.Interfaces[0]
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func parseUlimit(value string) (int64, error) {
|
||||
num, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
if strings.EqualFold(value, "unlimited") {
|
||||
// -1 implies unlimited except for priority and nice; man limits.conf
|
||||
num = -1
|
||||
} else {
|
||||
// Value is not a number or "unlimited"; return an error
|
||||
return 0, fmt.Errorf("unable to parse limit: %s", value)
|
||||
}
|
||||
}
|
||||
return num, nil
|
||||
}
|
||||
|
||||
func processLimitsFile(fileData string) []info.UlimitSpec {
|
||||
const maxOpenFilesLinePrefix = "Max open files"
|
||||
|
||||
limits := strings.Split(fileData, "\n")
|
||||
ulimits := make([]info.UlimitSpec, 0, len(limits))
|
||||
for _, lim := range limits {
|
||||
// Skip any headers/footers
|
||||
if strings.HasPrefix(lim, "Max open files") {
|
||||
// Remove line prefix
|
||||
ulimit, err := processMaxOpenFileLimitLine(
|
||||
"max_open_files",
|
||||
lim[len(maxOpenFilesLinePrefix):],
|
||||
)
|
||||
if err == nil {
|
||||
ulimits = append(ulimits, ulimit)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ulimits
|
||||
}
|
||||
|
||||
// Any caller of processMaxOpenFileLimitLine must ensure that the name prefix is already removed from the limit line.
|
||||
// with the "Max open files" prefix.
|
||||
func processMaxOpenFileLimitLine(name, line string) (info.UlimitSpec, error) {
|
||||
// Remove any leading whitespace
|
||||
line = strings.TrimSpace(line)
|
||||
// Split on whitespace
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 3 {
|
||||
return info.UlimitSpec{}, fmt.Errorf("unable to parse max open files line: %s", line)
|
||||
}
|
||||
// The first field is the soft limit, the second is the hard limit
|
||||
soft, err := parseUlimit(fields[0])
|
||||
if err != nil {
|
||||
return info.UlimitSpec{}, err
|
||||
}
|
||||
hard, err := parseUlimit(fields[1])
|
||||
if err != nil {
|
||||
return info.UlimitSpec{}, err
|
||||
}
|
||||
return info.UlimitSpec{
|
||||
Name: name,
|
||||
SoftLimit: soft,
|
||||
HardLimit: hard,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func processRootProcUlimits(rootFs string, rootPid int) []info.UlimitSpec {
|
||||
filePath := path.Join(rootFs, "/proc", strconv.Itoa(rootPid), "limits")
|
||||
out, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error while listing directory %q to read ulimits: %v", filePath, err)
|
||||
return []info.UlimitSpec{}
|
||||
}
|
||||
return processLimitsFile(string(out))
|
||||
}
|
||||
|
||||
func processStatsFromProcs(rootFs string, cgroupPath string, rootPid int) (info.ProcessStats, error) {
|
||||
var fdCount, socketCount uint64
|
||||
filePath := path.Join(cgroupPath, "cgroup.procs")
|
||||
out, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return info.ProcessStats{}, fmt.Errorf("couldn't open cpu cgroup procs file %v : %v", filePath, err)
|
||||
}
|
||||
|
||||
pids := strings.Split(string(out), "\n")
|
||||
|
||||
// EOL is also treated as a new line while reading "cgroup.procs" file with os.ReadFile.
|
||||
// The last value is an empty string "". Ex: pids = ["22", "1223", ""]
|
||||
// Trim the last value
|
||||
if len(pids) != 0 && pids[len(pids)-1] == "" {
|
||||
pids = pids[:len(pids)-1]
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
dirPath := path.Join(rootFs, "/proc", pid, "fd")
|
||||
fds, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
|
||||
continue
|
||||
}
|
||||
fdCount += uint64(len(fds))
|
||||
for _, fd := range fds {
|
||||
fdPath := path.Join(dirPath, fd.Name())
|
||||
linkName, err := os.Readlink(fdPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error while reading %q link: %v", fdPath, err)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(linkName, "socket") {
|
||||
socketCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
processStats := info.ProcessStats{
|
||||
ProcessCount: uint64(len(pids)),
|
||||
FdCount: fdCount,
|
||||
SocketCount: socketCount,
|
||||
}
|
||||
|
||||
if rootPid > 0 {
|
||||
processStats.Ulimits = processRootProcUlimits(rootFs, rootPid)
|
||||
}
|
||||
|
||||
return processStats, nil
|
||||
}
|
||||
|
||||
func (h *Handler) schedulerStatsFromProcs() (info.CpuSchedstat, error) {
|
||||
pids, err := h.cgroupManager.GetAllPids()
|
||||
if err != nil {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("Could not get PIDs for container %d: %w", h.pid, err)
|
||||
}
|
||||
alivePids := make(map[int]struct{}, len(pids))
|
||||
for _, pid := range pids {
|
||||
f, err := os.Open(path.Join(h.rootFs, "proc", strconv.Itoa(pid), "schedstat"))
|
||||
if err != nil {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("couldn't open scheduler statistics for process %d: %v", pid, err)
|
||||
}
|
||||
defer f.Close()
|
||||
contents, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("couldn't read scheduler statistics for process %d: %v", pid, err)
|
||||
}
|
||||
alivePids[pid] = struct{}{}
|
||||
rawMetrics := bytes.Split(bytes.TrimRight(contents, "\n"), []byte(" "))
|
||||
if len(rawMetrics) != 3 {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("unexpected number of metrics in schedstat file for process %d", pid)
|
||||
}
|
||||
cacheEntry, ok := h.pidMetricsCache[pid]
|
||||
if !ok {
|
||||
cacheEntry = &info.CpuSchedstat{}
|
||||
h.pidMetricsCache[pid] = cacheEntry
|
||||
}
|
||||
for i, rawMetric := range rawMetrics {
|
||||
metric, err := strconv.ParseUint(string(rawMetric), 10, 64)
|
||||
if err != nil {
|
||||
return info.CpuSchedstat{}, fmt.Errorf("parsing error while reading scheduler statistics for process: %d: %v", pid, err)
|
||||
}
|
||||
switch i {
|
||||
case 0:
|
||||
cacheEntry.RunTime = metric
|
||||
case 1:
|
||||
cacheEntry.RunqueueTime = metric
|
||||
case 2:
|
||||
cacheEntry.RunPeriods = metric
|
||||
}
|
||||
}
|
||||
}
|
||||
schedstats := h.pidMetricsSaved // copy
|
||||
for p, v := range h.pidMetricsCache {
|
||||
schedstats.RunPeriods += v.RunPeriods
|
||||
schedstats.RunqueueTime += v.RunqueueTime
|
||||
schedstats.RunTime += v.RunTime
|
||||
if _, alive := alivePids[p]; !alive {
|
||||
// PID p is gone: accumulate its stats ...
|
||||
h.pidMetricsSaved.RunPeriods += v.RunPeriods
|
||||
h.pidMetricsSaved.RunqueueTime += v.RunqueueTime
|
||||
h.pidMetricsSaved.RunTime += v.RunTime
|
||||
// ... and remove its cache entry, to prevent
|
||||
// pidMetricsCache from growing.
|
||||
delete(h.pidMetricsCache, p)
|
||||
}
|
||||
}
|
||||
return schedstats, nil
|
||||
}
|
||||
|
||||
// referencedBytesStat gets and clears referenced bytes
|
||||
// see: https://github.com/brendangregg/wss#wsspl-referenced-page-flag
|
||||
func referencedBytesStat(pids []int, cycles uint64, resetInterval uint64) (uint64, error) {
|
||||
referencedKBytes, err := getReferencedKBytes(pids)
|
||||
if err != nil {
|
||||
return uint64(0), err
|
||||
}
|
||||
|
||||
err = clearReferencedBytes(pids, cycles, resetInterval)
|
||||
if err != nil {
|
||||
return uint64(0), err
|
||||
}
|
||||
return referencedKBytes * 1024, nil
|
||||
}
|
||||
|
||||
func getReferencedKBytes(pids []int) (uint64, error) {
|
||||
referencedKBytes := uint64(0)
|
||||
readSmapsContent := false
|
||||
foundMatch := false
|
||||
for _, pid := range pids {
|
||||
smapsFilePath := fmt.Sprintf(smapsFilePathPattern, pid)
|
||||
smapsContent, err := os.ReadFile(smapsFilePath)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("Cannot read %s file, err: %s", smapsFilePath, err)
|
||||
if os.IsNotExist(err) {
|
||||
continue // smaps file does not exists for all PIDs
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
readSmapsContent = true
|
||||
|
||||
allMatches := referencedRegexp.FindAllSubmatch(smapsContent, -1)
|
||||
if len(allMatches) == 0 {
|
||||
klog.V(5).Infof("Not found any information about referenced bytes in %s file", smapsFilePath)
|
||||
continue // referenced bytes may not exist in smaps file
|
||||
}
|
||||
|
||||
for _, matches := range allMatches {
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("failed to match regexp in output: %s", string(smapsContent))
|
||||
}
|
||||
foundMatch = true
|
||||
referenced, err := strconv.ParseUint(string(matches[1]), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
referencedKBytes += referenced
|
||||
}
|
||||
}
|
||||
|
||||
if len(pids) != 0 {
|
||||
if !readSmapsContent {
|
||||
klog.Warningf("Cannot read smaps files for any PID from %s", "CONTAINER")
|
||||
} else if !foundMatch {
|
||||
klog.Warningf("Not found any information about referenced bytes in smaps files for any PID from %s", "CONTAINER")
|
||||
}
|
||||
}
|
||||
return referencedKBytes, nil
|
||||
}
|
||||
|
||||
func clearReferencedBytes(pids []int, cycles uint64, resetInterval uint64) error {
|
||||
if resetInterval == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cycles%resetInterval == 0 {
|
||||
for _, pid := range pids {
|
||||
clearRefsFilePath := fmt.Sprintf(clearRefsFilePathPattern, pid)
|
||||
clerRefsFile, err := os.OpenFile(clearRefsFilePath, os.O_WRONLY, 0o644)
|
||||
if err != nil {
|
||||
// clear_refs file may not exist for all PIDs
|
||||
continue
|
||||
}
|
||||
_, err = clerRefsFile.WriteString("1\n")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = clerRefsFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
|
||||
netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
|
||||
|
||||
ifaceStats, err := scanInterfaceStats(netStatsFile)
|
||||
if err != nil {
|
||||
return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
|
||||
}
|
||||
|
||||
return ifaceStats, nil
|
||||
}
|
||||
|
||||
var ignoredDevicePrefixes = []string{"lo", "veth", "docker", "nerdctl"}
|
||||
|
||||
func isIgnoredDevice(ifName string) bool {
|
||||
for _, prefix := range ignoredDevicePrefixes {
|
||||
if strings.HasPrefix(strings.ToLower(ifName), prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
||||
file, err := os.Open(netStatsFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
// Discard header lines
|
||||
for i := 0; i < 2; i++ {
|
||||
if b := scanner.Scan(); !b {
|
||||
return nil, scanner.Err()
|
||||
}
|
||||
}
|
||||
|
||||
stats := []info.InterfaceStats{}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line = strings.Replace(line, ":", "", -1)
|
||||
|
||||
fields := strings.Fields(line)
|
||||
// If the format of the line is invalid then don't trust any of the stats
|
||||
// in this file.
|
||||
if len(fields) != 17 {
|
||||
return nil, fmt.Errorf("invalid interface stats line: %v", line)
|
||||
}
|
||||
|
||||
devName := fields[0]
|
||||
if isIgnoredDevice(devName) {
|
||||
continue
|
||||
}
|
||||
|
||||
i := info.InterfaceStats{
|
||||
Name: devName,
|
||||
}
|
||||
|
||||
statFields := append(fields[1:5], fields[9:13]...)
|
||||
statPointers := []*uint64{
|
||||
&i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped,
|
||||
&i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped,
|
||||
}
|
||||
|
||||
err := setInterfaceStatValues(statFields, statPointers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse interface stats (%v): %v", err, line)
|
||||
}
|
||||
|
||||
stats = append(stats, i)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func setInterfaceStatValues(fields []string, pointers []*uint64) error {
|
||||
for i, v := range fields {
|
||||
val, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*pointers[i] = val
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
|
||||
tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
tcpStats, err := scanTCPStats(tcpStatsFile)
|
||||
if err != nil {
|
||||
return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
|
||||
}
|
||||
|
||||
return tcpStats, nil
|
||||
}
|
||||
|
||||
func advancedTCPStatsFromProc(rootFs string, pid int, file1, file2 string) (info.TcpAdvancedStat, error) {
|
||||
var advancedStats info.TcpAdvancedStat
|
||||
var err error
|
||||
|
||||
netstatFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file1)
|
||||
err = scanAdvancedTCPStats(&advancedStats, netstatFile)
|
||||
if err != nil {
|
||||
return advancedStats, err
|
||||
}
|
||||
|
||||
snmpFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file2)
|
||||
err = scanAdvancedTCPStats(&advancedStats, snmpFile)
|
||||
if err != nil {
|
||||
return advancedStats, err
|
||||
}
|
||||
|
||||
return advancedStats, nil
|
||||
}
|
||||
|
||||
func scanAdvancedTCPStats(advancedStats *info.TcpAdvancedStat, advancedTCPStatsFile string) error {
|
||||
data, err := os.ReadFile(advancedTCPStatsFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure opening %s: %v", advancedTCPStatsFile, err)
|
||||
}
|
||||
|
||||
reader := strings.NewReader(string(data))
|
||||
scanner := bufio.NewScanner(reader)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
advancedTCPStats := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
nameParts := strings.Split(scanner.Text(), " ")
|
||||
scanner.Scan()
|
||||
valueParts := strings.Split(scanner.Text(), " ")
|
||||
// Remove trailing :. and ignore non-tcp
|
||||
protocol := nameParts[0][:len(nameParts[0])-1]
|
||||
if protocol != "TcpExt" && protocol != "Tcp" {
|
||||
continue
|
||||
}
|
||||
if len(nameParts) != len(valueParts) {
|
||||
return fmt.Errorf("mismatch field count mismatch in %s: %s",
|
||||
advancedTCPStatsFile, protocol)
|
||||
}
|
||||
for i := 1; i < len(nameParts); i++ {
|
||||
if strings.Contains(valueParts[i], "-") {
|
||||
vInt64, err := strconv.ParseInt(valueParts[i], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decode value: %s to int64 error: %s", valueParts[i], err)
|
||||
}
|
||||
advancedTCPStats[nameParts[i]] = vInt64
|
||||
} else {
|
||||
vUint64, err := strconv.ParseUint(valueParts[i], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decode value: %s to uint64 error: %s", valueParts[i], err)
|
||||
}
|
||||
advancedTCPStats[nameParts[i]] = vUint64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b, err := json.Marshal(advancedTCPStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(b, advancedStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
func scanTCPStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||
var stats info.TcpStat
|
||||
|
||||
data, err := os.ReadFile(tcpStatsFile)
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", tcpStatsFile, err)
|
||||
}
|
||||
|
||||
tcpStateMap := map[string]uint64{
|
||||
"01": 0, // ESTABLISHED
|
||||
"02": 0, // SYN_SENT
|
||||
"03": 0, // SYN_RECV
|
||||
"04": 0, // FIN_WAIT1
|
||||
"05": 0, // FIN_WAIT2
|
||||
"06": 0, // TIME_WAIT
|
||||
"07": 0, // CLOSE
|
||||
"08": 0, // CLOSE_WAIT
|
||||
"09": 0, // LAST_ACK
|
||||
"0A": 0, // LISTEN
|
||||
"0B": 0, // CLOSING
|
||||
}
|
||||
|
||||
reader := strings.NewReader(string(data))
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
state := strings.Fields(line)
|
||||
// TCP state is the 4th field.
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
||||
tcpState := state[3]
|
||||
_, ok := tcpStateMap[tcpState]
|
||||
if !ok {
|
||||
return stats, fmt.Errorf("invalid TCP stats line: %v", line)
|
||||
}
|
||||
tcpStateMap[tcpState]++
|
||||
}
|
||||
|
||||
stats = info.TcpStat{
|
||||
Established: tcpStateMap["01"],
|
||||
SynSent: tcpStateMap["02"],
|
||||
SynRecv: tcpStateMap["03"],
|
||||
FinWait1: tcpStateMap["04"],
|
||||
FinWait2: tcpStateMap["05"],
|
||||
TimeWait: tcpStateMap["06"],
|
||||
Close: tcpStateMap["07"],
|
||||
CloseWait: tcpStateMap["08"],
|
||||
LastAck: tcpStateMap["09"],
|
||||
Listen: tcpStateMap["0A"],
|
||||
Closing: tcpStateMap["0B"],
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
||||
var err error
|
||||
var udpStats info.UdpStat
|
||||
|
||||
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
r, err := os.Open(udpStatsFile)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||
}
|
||||
|
||||
udpStats, err = scanUDPStats(r)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
||||
}
|
||||
|
||||
return udpStats, nil
|
||||
}
|
||||
|
||||
func scanUDPStats(r io.Reader) (info.UdpStat, error) {
|
||||
var stats info.UdpStat
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||
|
||||
listening++
|
||||
|
||||
fs := strings.Fields(line)
|
||||
if len(fs) != 13 {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
d, err := strconv.Atoi(string(fs[12]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dropped += uint64(d)
|
||||
}
|
||||
|
||||
stats = info.UdpStat{
|
||||
Listen: listening,
|
||||
Dropped: dropped,
|
||||
RxQueued: rxQueued,
|
||||
TxQueued: txQueued,
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (h *Handler) GetProcesses() ([]int, error) {
|
||||
pids, err := h.cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// Convert libcontainer stats to info.ContainerStats.
|
||||
func setCPUStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
||||
ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
|
||||
ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
|
||||
ret.Cpu.Usage.Total = s.CpuStats.CpuUsage.TotalUsage
|
||||
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
||||
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
||||
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
||||
|
||||
if !withPerCPU {
|
||||
return
|
||||
}
|
||||
if len(s.CpuStats.CpuUsage.PercpuUsage) == 0 {
|
||||
// libcontainer's 'GetStats' can leave 'PercpuUsage' nil if it skipped the
|
||||
// cpuacct subsystem.
|
||||
return
|
||||
}
|
||||
ret.Cpu.Usage.PerCpu = s.CpuStats.CpuUsage.PercpuUsage
|
||||
}
|
||||
|
||||
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoServiceBytes = diskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
|
||||
ret.DiskIo.IoServiced = diskStatsCopy(s.BlkioStats.IoServicedRecursive)
|
||||
ret.DiskIo.IoQueued = diskStatsCopy(s.BlkioStats.IoQueuedRecursive)
|
||||
ret.DiskIo.Sectors = diskStatsCopy(s.BlkioStats.SectorsRecursive)
|
||||
ret.DiskIo.IoServiceTime = diskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
|
||||
ret.DiskIo.IoWaitTime = diskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||
ret.DiskIo.IoMerged = diskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||
ret.DiskIo.IoTime = diskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
}
|
||||
|
||||
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.KernelUsage = s.MemoryStats.KernelUsage.Usage
|
||||
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["file"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["anon"]
|
||||
ret.Memory.Swap = s.MemoryStats.SwapUsage.Usage - s.MemoryStats.Usage.Usage
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["file_mapped"]
|
||||
} else if s.MemoryStats.UseHierarchy {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["total_cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["total_mapped_file"]
|
||||
} else {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||
ret.Memory.Swap = s.MemoryStats.Stats["swap"]
|
||||
ret.Memory.MappedFile = s.MemoryStats.Stats["mapped_file"]
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
}
|
||||
if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgmajfault = v
|
||||
ret.Memory.HierarchicalData.Pgmajfault = v
|
||||
}
|
||||
|
||||
inactiveFileKeyName := "total_inactive_file"
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
inactiveFileKeyName = "inactive_file"
|
||||
}
|
||||
|
||||
activeFileKeyName := "total_active_file"
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
activeFileKeyName = "active_file"
|
||||
}
|
||||
|
||||
if v, ok := s.MemoryStats.Stats[activeFileKeyName]; ok {
|
||||
ret.Memory.TotalActiveFile = v
|
||||
}
|
||||
|
||||
workingSet := ret.Memory.Usage
|
||||
if v, ok := s.MemoryStats.Stats[inactiveFileKeyName]; ok {
|
||||
ret.Memory.TotalInactiveFile = v
|
||||
if workingSet < v {
|
||||
workingSet = 0
|
||||
} else {
|
||||
workingSet -= v
|
||||
}
|
||||
}
|
||||
ret.Memory.WorkingSet = workingSet
|
||||
}
|
||||
|
||||
func setCPUSetStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.CpuSet.MemoryMigrate = s.CPUSetStats.MemoryMigrate
|
||||
}
|
||||
|
||||
func getNumaStats(memoryStats map[uint8]uint64) map[uint8]uint64 {
|
||||
stats := make(map[uint8]uint64, len(memoryStats))
|
||||
for node, usage := range memoryStats {
|
||||
stats[node] = usage
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func setMemoryNumaStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.ContainerData.NumaStats.File = getNumaStats(s.MemoryStats.PageUsageByNUMA.File.Nodes)
|
||||
ret.Memory.ContainerData.NumaStats.Anon = getNumaStats(s.MemoryStats.PageUsageByNUMA.Anon.Nodes)
|
||||
ret.Memory.ContainerData.NumaStats.Unevictable = getNumaStats(s.MemoryStats.PageUsageByNUMA.Unevictable.Nodes)
|
||||
|
||||
ret.Memory.HierarchicalData.NumaStats.File = getNumaStats(s.MemoryStats.PageUsageByNUMA.Hierarchical.File.Nodes)
|
||||
ret.Memory.HierarchicalData.NumaStats.Anon = getNumaStats(s.MemoryStats.PageUsageByNUMA.Hierarchical.Anon.Nodes)
|
||||
ret.Memory.HierarchicalData.NumaStats.Unevictable = getNumaStats(s.MemoryStats.PageUsageByNUMA.Hierarchical.Unevictable.Nodes)
|
||||
}
|
||||
|
||||
func setHugepageStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Hugetlb = make(map[string]info.HugetlbStats)
|
||||
for k, v := range s.HugetlbStats {
|
||||
ret.Hugetlb[k] = info.HugetlbStats{
|
||||
Usage: v.Usage,
|
||||
MaxUsage: v.MaxUsage,
|
||||
Failcnt: v.Failcnt,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read from pids path not cpu
|
||||
func setThreadsStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
if s != nil {
|
||||
ret.Processes.ThreadsCurrent = s.PidsStats.Current
|
||||
ret.Processes.ThreadsMax = s.PidsStats.Limit
|
||||
}
|
||||
}
|
||||
|
||||
func newContainerStats(cgroupStats *cgroups.Stats, includedMetrics container.MetricSet) *info.ContainerStats {
|
||||
ret := &info.ContainerStats{
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
if s := cgroupStats; s != nil {
|
||||
setCPUStats(s, ret, includedMetrics.Has(container.PerCpuUsageMetrics))
|
||||
if includedMetrics.Has(container.DiskIOMetrics) {
|
||||
setDiskIoStats(s, ret)
|
||||
}
|
||||
setMemoryStats(s, ret)
|
||||
if includedMetrics.Has(container.MemoryNumaMetrics) {
|
||||
setMemoryNumaStats(s, ret)
|
||||
}
|
||||
if includedMetrics.Has(container.HugetlbUsageMetrics) {
|
||||
setHugepageStats(s, ret)
|
||||
}
|
||||
if includedMetrics.Has(container.CPUSetMetrics) {
|
||||
setCPUSetStats(s, ret)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
170
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
Normal file
170
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
|
||||
fs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
fs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
configs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// GetCgroupSubsystems returns information about the cgroup subsystems that are
|
||||
// of interest as a map of cgroup controllers to their mount points.
|
||||
// For example, "cpu" -> "/sys/fs/cgroup/cpu".
|
||||
//
|
||||
// The incudeMetrics arguments specifies which metrics are requested,
|
||||
// and is used to filter out some cgroups and their mounts. If nil,
|
||||
// all supported cgroup subsystems are included.
|
||||
//
|
||||
// For cgroup v2, includedMetrics argument is unused, the only map key is ""
|
||||
// (empty string), and the value is the unified cgroup mount point.
|
||||
func GetCgroupSubsystems(includedMetrics container.MetricSet) (map[string]string, error) {
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
return map[string]string{"": fs2.UnifiedMountpoint}, nil
|
||||
}
|
||||
// Get all cgroup mounts.
|
||||
allCgroups, err := cgroups.GetCgroupMounts(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getCgroupSubsystemsHelper(allCgroups, includedMetrics)
|
||||
}
|
||||
|
||||
func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount, includedMetrics container.MetricSet) (map[string]string, error) {
|
||||
if len(allCgroups) == 0 {
|
||||
return nil, fmt.Errorf("failed to find cgroup mounts")
|
||||
}
|
||||
|
||||
// Trim the mounts to only the subsystems we care about.
|
||||
mountPoints := make(map[string]string, len(allCgroups))
|
||||
for _, mount := range allCgroups {
|
||||
for _, subsystem := range mount.Subsystems {
|
||||
if !needSubsys(subsystem, includedMetrics) {
|
||||
continue
|
||||
}
|
||||
if _, ok := mountPoints[subsystem]; ok {
|
||||
// duplicate mount for this subsystem; use the first one we saw
|
||||
klog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem])
|
||||
continue
|
||||
}
|
||||
mountPoints[subsystem] = mount.Mountpoint
|
||||
}
|
||||
}
|
||||
|
||||
return mountPoints, nil
|
||||
}
|
||||
|
||||
// A map of cgroup subsystems we support listing (should be the minimal set
|
||||
// we need stats from) to a respective MetricKind.
|
||||
var supportedSubsystems = map[string]container.MetricKind{
|
||||
"cpu": container.CpuUsageMetrics,
|
||||
"cpuacct": container.CpuUsageMetrics,
|
||||
"memory": container.MemoryUsageMetrics,
|
||||
"hugetlb": container.HugetlbUsageMetrics,
|
||||
"pids": container.ProcessMetrics,
|
||||
"cpuset": container.CPUSetMetrics,
|
||||
"blkio": container.DiskIOMetrics,
|
||||
"io": container.DiskIOMetrics,
|
||||
"devices": "",
|
||||
"perf_event": container.PerfMetrics,
|
||||
}
|
||||
|
||||
// Check if this cgroup subsystem/controller is of use.
|
||||
func needSubsys(name string, metrics container.MetricSet) bool {
|
||||
// Check if supported.
|
||||
metric, supported := supportedSubsystems[name]
|
||||
if !supported {
|
||||
return false
|
||||
}
|
||||
// Check if needed.
|
||||
if metrics == nil || metric == "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return metrics.Has(metric)
|
||||
}
|
||||
|
||||
func diskStatsCopy0(major, minor uint64) *info.PerDiskStats {
|
||||
disk := info.PerDiskStats{
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
}
|
||||
disk.Stats = make(map[string]uint64)
|
||||
return &disk
|
||||
}
|
||||
|
||||
type diskKey struct {
|
||||
Major uint64
|
||||
Minor uint64
|
||||
}
|
||||
|
||||
func diskStatsCopy1(diskStat map[diskKey]*info.PerDiskStats) []info.PerDiskStats {
|
||||
i := 0
|
||||
stat := make([]info.PerDiskStats, len(diskStat))
|
||||
for _, disk := range diskStat {
|
||||
stat[i] = *disk
|
||||
i++
|
||||
}
|
||||
return stat
|
||||
}
|
||||
|
||||
func diskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) {
|
||||
if len(blkioStats) == 0 {
|
||||
return
|
||||
}
|
||||
diskStat := make(map[diskKey]*info.PerDiskStats)
|
||||
for i := range blkioStats {
|
||||
major := blkioStats[i].Major
|
||||
minor := blkioStats[i].Minor
|
||||
key := diskKey{
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
}
|
||||
diskp, ok := diskStat[key]
|
||||
if !ok {
|
||||
diskp = diskStatsCopy0(major, minor)
|
||||
diskStat[key] = diskp
|
||||
}
|
||||
op := blkioStats[i].Op
|
||||
if op == "" {
|
||||
op = "Count"
|
||||
}
|
||||
diskp.Stats[op] = blkioStats[i].Value
|
||||
}
|
||||
return diskStatsCopy1(diskStat)
|
||||
}
|
||||
|
||||
func NewCgroupManager(name string, paths map[string]string) (cgroups.Manager, error) {
|
||||
config := &configs.Cgroup{
|
||||
Name: name,
|
||||
Resources: &configs.Resources{},
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
path := paths[""]
|
||||
return fs2.NewManager(config, path)
|
||||
}
|
||||
|
||||
return fs.NewManager(config, paths)
|
||||
}
|
114
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
Normal file
114
vendor/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raw
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
watch "github.com/google/cadvisor/watcher"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
DockerOnly = flag.Bool("docker_only", false, "Only report docker containers in addition to root stats")
|
||||
disableRootCgroupStats = flag.Bool("disable_root_cgroup_stats", false, "Disable collecting root Cgroup stats")
|
||||
)
|
||||
|
||||
type rawFactory struct {
|
||||
// Factory for machine information.
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Information about the cgroup subsystems.
|
||||
cgroupSubsystems map[string]string
|
||||
|
||||
// Information about mounted filesystems.
|
||||
fsInfo fs.FsInfo
|
||||
|
||||
// Watcher for inotify events.
|
||||
watcher *common.InotifyWatcher
|
||||
|
||||
// List of metrics to be included.
|
||||
includedMetrics map[container.MetricKind]struct{}
|
||||
|
||||
// List of raw container cgroup path prefix whitelist.
|
||||
rawPrefixWhiteList []string
|
||||
}
|
||||
|
||||
func (f *rawFactory) String() string {
|
||||
return "raw"
|
||||
}
|
||||
|
||||
func (f *rawFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (container.ContainerHandler, error) {
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
return newRawContainerHandler(name, f.cgroupSubsystems, f.machineInfoFactory, f.fsInfo, f.watcher, rootFs, f.includedMetrics)
|
||||
}
|
||||
|
||||
// The raw factory can handle any container. If --docker_only is set to true, non-docker containers are ignored except for "/" and those whitelisted by raw_cgroup_prefix_whitelist flag.
|
||||
func (f *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
if name == "/" {
|
||||
return true, true, nil
|
||||
}
|
||||
if *DockerOnly && f.rawPrefixWhiteList[0] == "" {
|
||||
return true, false, nil
|
||||
}
|
||||
for _, prefix := range f.rawPrefixWhiteList {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
return true, true, nil
|
||||
}
|
||||
}
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
func (f *rawFactory) DebugInfo() map[string][]string {
|
||||
return common.DebugInfo(f.watcher.GetWatches())
|
||||
}
|
||||
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics map[container.MetricKind]struct{}, rawPrefixWhiteList []string) error {
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems(includedMetrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
if len(cgroupSubsystems) == 0 {
|
||||
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
|
||||
}
|
||||
|
||||
watcher, err := common.NewInotifyWatcher()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(1).Infof("Registering Raw factory")
|
||||
factory := &rawFactory{
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
fsInfo: fsInfo,
|
||||
cgroupSubsystems: cgroupSubsystems,
|
||||
watcher: watcher,
|
||||
includedMetrics: includedMetrics,
|
||||
rawPrefixWhiteList: rawPrefixWhiteList,
|
||||
}
|
||||
container.RegisterContainerHandlerFactory(factory, []watch.ContainerWatchSource{watch.Raw})
|
||||
return nil
|
||||
}
|
304
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
Normal file
304
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Handler for "raw" containers.
|
||||
package raw
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/machine"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type rawContainerHandler struct {
|
||||
// Name of the container for this handler.
|
||||
name string
|
||||
machineInfoFactory info.MachineInfoFactory
|
||||
|
||||
// Absolute path to the cgroup hierarchies of this container.
|
||||
// (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test")
|
||||
cgroupPaths map[string]string
|
||||
|
||||
fsInfo fs.FsInfo
|
||||
externalMounts []common.Mount
|
||||
includedMetrics container.MetricSet
|
||||
|
||||
libcontainerHandler *libcontainer.Handler
|
||||
}
|
||||
|
||||
func isRootCgroup(name string) bool {
|
||||
return name == "/"
|
||||
}
|
||||
|
||||
func newRawContainerHandler(name string, cgroupSubsystems map[string]string, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) {
|
||||
cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems, name)
|
||||
|
||||
cgroupManager, err := libcontainer.NewCgroupManager(name, cgroupPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var externalMounts []common.Mount
|
||||
for _, container := range cHints.AllHosts {
|
||||
if name == container.FullName {
|
||||
externalMounts = container.Mounts
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pid := 0
|
||||
if isRootCgroup(name) {
|
||||
pid = 1
|
||||
|
||||
// delete pids from cgroup paths because /sys/fs/cgroup/pids/pids.current not exist
|
||||
delete(cgroupPaths, "pids")
|
||||
}
|
||||
|
||||
handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics)
|
||||
|
||||
return &rawContainerHandler{
|
||||
name: name,
|
||||
machineInfoFactory: machineInfoFactory,
|
||||
cgroupPaths: cgroupPaths,
|
||||
fsInfo: fsInfo,
|
||||
externalMounts: externalMounts,
|
||||
includedMetrics: includedMetrics,
|
||||
libcontainerHandler: handler,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) ContainerReference() (info.ContainerReference, error) {
|
||||
// We only know the container by its one name.
|
||||
return info.ContainerReference{
|
||||
Name: h.name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) GetRootNetworkDevices() ([]info.NetInfo, error) {
|
||||
nd := []info.NetInfo{}
|
||||
if isRootCgroup(h.name) {
|
||||
mi, err := h.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return nd, err
|
||||
}
|
||||
return mi.NetworkDevices, nil
|
||||
}
|
||||
return nd, nil
|
||||
}
|
||||
|
||||
// Nothing to start up.
|
||||
func (h *rawContainerHandler) Start() {}
|
||||
|
||||
// Nothing to clean up.
|
||||
func (h *rawContainerHandler) Cleanup() {}
|
||||
|
||||
func (h *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
const hasNetwork = false
|
||||
hasFilesystem := isRootCgroup(h.name) || len(h.externalMounts) > 0
|
||||
spec, err := common.GetSpec(h.cgroupPaths, h.machineInfoFactory, hasNetwork, hasFilesystem)
|
||||
if err != nil {
|
||||
return spec, err
|
||||
}
|
||||
|
||||
if isRootCgroup(h.name) {
|
||||
// Check physical network devices for root container.
|
||||
nd, err := h.GetRootNetworkDevices()
|
||||
if err != nil {
|
||||
return spec, err
|
||||
}
|
||||
spec.HasNetwork = spec.HasNetwork || len(nd) != 0
|
||||
|
||||
// Get memory and swap limits of the running machine
|
||||
memLimit, err := machine.GetMachineMemoryCapacity()
|
||||
if err != nil {
|
||||
klog.Warningf("failed to obtain memory limit for machine container")
|
||||
spec.HasMemory = false
|
||||
} else {
|
||||
spec.Memory.Limit = uint64(memLimit)
|
||||
// Spec is marked to have memory only if the memory limit is set
|
||||
spec.HasMemory = true
|
||||
}
|
||||
|
||||
swapLimit, err := machine.GetMachineSwapCapacity()
|
||||
if err != nil {
|
||||
klog.Warningf("failed to obtain swap limit for machine container")
|
||||
} else {
|
||||
spec.Memory.SwapLimit = uint64(swapLimit)
|
||||
}
|
||||
}
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func fsToFsStats(fs *fs.Fs) info.FsStats {
|
||||
inodes := uint64(0)
|
||||
inodesFree := uint64(0)
|
||||
hasInodes := fs.InodesFree != nil
|
||||
if hasInodes {
|
||||
inodes = *fs.Inodes
|
||||
inodesFree = *fs.InodesFree
|
||||
}
|
||||
return info.FsStats{
|
||||
Device: fs.Device,
|
||||
Type: fs.Type.String(),
|
||||
Limit: fs.Capacity,
|
||||
Usage: fs.Capacity - fs.Free,
|
||||
HasInodes: hasInodes,
|
||||
Inodes: inodes,
|
||||
InodesFree: inodesFree,
|
||||
Available: fs.Available,
|
||||
ReadsCompleted: fs.DiskStats.ReadsCompleted,
|
||||
ReadsMerged: fs.DiskStats.ReadsMerged,
|
||||
SectorsRead: fs.DiskStats.SectorsRead,
|
||||
ReadTime: fs.DiskStats.ReadTime,
|
||||
WritesCompleted: fs.DiskStats.WritesCompleted,
|
||||
WritesMerged: fs.DiskStats.WritesMerged,
|
||||
SectorsWritten: fs.DiskStats.SectorsWritten,
|
||||
WriteTime: fs.DiskStats.WriteTime,
|
||||
IoInProgress: fs.DiskStats.IoInProgress,
|
||||
IoTime: fs.DiskStats.IoTime,
|
||||
WeightedIoTime: fs.DiskStats.WeightedIoTime,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
var filesystems []fs.Fs
|
||||
var err error
|
||||
// Early exist if no disk metrics are to be collected.
|
||||
if !h.includedMetrics.Has(container.DiskUsageMetrics) && !h.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get Filesystem information only for the root cgroup.
|
||||
if isRootCgroup(h.name) {
|
||||
filesystems, err = h.fsInfo.GetGlobalFsInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if len(h.externalMounts) > 0 {
|
||||
mountSet := make(map[string]struct{})
|
||||
for _, mount := range h.externalMounts {
|
||||
mountSet[mount.HostDir] = struct{}{}
|
||||
}
|
||||
filesystems, err = h.fsInfo.GetFsInfoForPath(mountSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if h.includedMetrics.Has(container.DiskUsageMetrics) {
|
||||
for i := range filesystems {
|
||||
fs := filesystems[i]
|
||||
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
|
||||
}
|
||||
}
|
||||
|
||||
if h.includedMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats(&fsNamer{fs: filesystems, factory: h.machineInfoFactory}, &stats.DiskIo)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
if *disableRootCgroupStats && isRootCgroup(h.name) {
|
||||
return nil, nil
|
||||
}
|
||||
stats, err := h.libcontainerHandler.GetStats()
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// Get filesystem stats.
|
||||
err = h.getFsStats(stats)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) GetCgroupPath(resource string) (string, error) {
|
||||
var res string
|
||||
if !cgroups.IsCgroup2UnifiedMode() {
|
||||
res = resource
|
||||
}
|
||||
path, ok := h.cgroupPaths[res]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.name)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) GetContainerLabels() map[string]string {
|
||||
return map[string]string{}
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) GetContainerIPAddress() string {
|
||||
// the IP address for the raw container corresponds to the system ip address.
|
||||
return "127.0.0.1"
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
return common.ListContainers(h.name, h.cgroupPaths, listType)
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
|
||||
return h.libcontainerHandler.GetProcesses()
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) Exists() bool {
|
||||
return common.CgroupExists(h.cgroupPaths)
|
||||
}
|
||||
|
||||
func (h *rawContainerHandler) Type() container.ContainerType {
|
||||
return container.ContainerTypeRaw
|
||||
}
|
||||
|
||||
type fsNamer struct {
|
||||
fs []fs.Fs
|
||||
factory info.MachineInfoFactory
|
||||
info common.DeviceNamer
|
||||
}
|
||||
|
||||
func (n *fsNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.fs {
|
||||
if uint64(info.Major) == major && uint64(info.Minor) == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
if n.info == nil {
|
||||
mi, err := n.factory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
n.info = (*common.MachineInfoNamer)(mi)
|
||||
}
|
||||
return n.info.DeviceName(major, minor)
|
||||
}
|
243
vendor/github.com/google/cadvisor/container/raw/watcher.go
generated
vendored
Normal file
243
vendor/github.com/google/cadvisor/container/raw/watcher.go
generated
vendored
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package container defines types for sub-container events and also
|
||||
// defines an interface for container operation handlers.
|
||||
package raw
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
inotify "k8s.io/utils/inotify"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type rawContainerWatcher struct {
|
||||
// Absolute path to the root of the cgroup hierarchies
|
||||
cgroupPaths map[string]string
|
||||
|
||||
// Inotify event watcher.
|
||||
watcher *common.InotifyWatcher
|
||||
|
||||
// Signal for watcher thread to stop.
|
||||
stopWatcher chan error
|
||||
}
|
||||
|
||||
func NewRawContainerWatcher(includedMetrics container.MetricSet) (watcher.ContainerWatcher, error) {
|
||||
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems(includedMetrics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
if len(cgroupSubsystems) == 0 {
|
||||
return nil, fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
|
||||
}
|
||||
|
||||
watcher, err := common.NewInotifyWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawWatcher := &rawContainerWatcher{
|
||||
cgroupPaths: cgroupSubsystems,
|
||||
watcher: watcher,
|
||||
stopWatcher: make(chan error),
|
||||
}
|
||||
|
||||
return rawWatcher, nil
|
||||
}
|
||||
|
||||
func (w *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error {
|
||||
// Watch this container (all its cgroups) and all subdirectories.
|
||||
watched := make([]string, 0)
|
||||
for _, cgroupPath := range w.cgroupPaths {
|
||||
_, err := w.watchDirectory(events, cgroupPath, "/")
|
||||
if err != nil {
|
||||
for _, watchedCgroupPath := range watched {
|
||||
_, removeErr := w.watcher.RemoveWatch("/", watchedCgroupPath)
|
||||
if removeErr != nil {
|
||||
klog.Warningf("Failed to remove inotify watch for %q with error: %v", watchedCgroupPath, removeErr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
watched = append(watched, cgroupPath)
|
||||
}
|
||||
|
||||
// Process the events received from the kernel.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event := <-w.watcher.Event():
|
||||
err := w.processEvent(event, events)
|
||||
if err != nil {
|
||||
klog.Warningf("Error while processing event (%+v): %v", event, err)
|
||||
}
|
||||
case err := <-w.watcher.Error():
|
||||
klog.Warningf("Error while watching %q: %v", "/", err)
|
||||
case <-w.stopWatcher:
|
||||
err := w.watcher.Close()
|
||||
if err == nil {
|
||||
w.stopWatcher <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *rawContainerWatcher) Stop() error {
|
||||
// Rendezvous with the watcher thread.
|
||||
w.stopWatcher <- nil
|
||||
return <-w.stopWatcher
|
||||
}
|
||||
|
||||
// Watches the specified directory and all subdirectories. Returns whether the path was
|
||||
// already being watched and an error (if any).
|
||||
func (w *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEvent, dir string, containerName string) (bool, error) {
|
||||
// Don't watch .mount cgroups because they never have containers as sub-cgroups. A single container
|
||||
// can have many .mount cgroups associated with it which can quickly exhaust the inotify watches on a node.
|
||||
if strings.HasSuffix(containerName, ".mount") {
|
||||
return false, nil
|
||||
}
|
||||
alreadyWatching, err := w.watcher.AddWatch(containerName, dir)
|
||||
if err != nil {
|
||||
return alreadyWatching, err
|
||||
}
|
||||
|
||||
// Remove the watch if further operations failed.
|
||||
cleanup := true
|
||||
defer func() {
|
||||
if cleanup {
|
||||
_, err := w.watcher.RemoveWatch(containerName, dir)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO(vmarmol): We should re-do this once we're done to ensure directories were not added in the meantime.
|
||||
// Watch subdirectories as well.
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return alreadyWatching, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
entryPath := path.Join(dir, entry.Name())
|
||||
subcontainerName := path.Join(containerName, entry.Name())
|
||||
alreadyWatchingSubDir, err := w.watchDirectory(events, entryPath, subcontainerName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to watch directory %q: %v", entryPath, err)
|
||||
if os.IsNotExist(err) {
|
||||
// The directory may have been removed before watching. Try to watch the other
|
||||
// subdirectories. (https://github.com/kubernetes/kubernetes/issues/28997)
|
||||
continue
|
||||
}
|
||||
return alreadyWatching, err
|
||||
}
|
||||
// since we already missed the creation event for this directory, publish an event here.
|
||||
if !alreadyWatchingSubDir {
|
||||
go func() {
|
||||
events <- watcher.ContainerEvent{
|
||||
EventType: watcher.ContainerAdd,
|
||||
Name: subcontainerName,
|
||||
WatchSource: watcher.Raw,
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cleanup = false
|
||||
return alreadyWatching, nil
|
||||
}
|
||||
|
||||
func (w *rawContainerWatcher) processEvent(event *inotify.Event, events chan watcher.ContainerEvent) error {
|
||||
// Convert the inotify event type to a container create or delete.
|
||||
var eventType watcher.ContainerEventType
|
||||
switch {
|
||||
case (event.Mask & inotify.InCreate) > 0:
|
||||
eventType = watcher.ContainerAdd
|
||||
case (event.Mask & inotify.InDelete) > 0:
|
||||
eventType = watcher.ContainerDelete
|
||||
case (event.Mask & inotify.InMovedFrom) > 0:
|
||||
eventType = watcher.ContainerDelete
|
||||
case (event.Mask & inotify.InMovedTo) > 0:
|
||||
eventType = watcher.ContainerAdd
|
||||
default:
|
||||
// Ignore other events.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Derive the container name from the path name.
|
||||
var containerName string
|
||||
for _, mount := range w.cgroupPaths {
|
||||
mountLocation := path.Clean(mount) + "/"
|
||||
if strings.HasPrefix(event.Name, mountLocation) {
|
||||
containerName = event.Name[len(mountLocation)-1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
if containerName == "" {
|
||||
return fmt.Errorf("unable to detect container from watch event on directory %q", event.Name)
|
||||
}
|
||||
|
||||
// Maintain the watch for the new or deleted container.
|
||||
switch eventType {
|
||||
case watcher.ContainerAdd:
|
||||
// New container was created, watch it.
|
||||
alreadyWatched, err := w.watchDirectory(events, event.Name, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only report container creation once.
|
||||
if alreadyWatched {
|
||||
return nil
|
||||
}
|
||||
case watcher.ContainerDelete:
|
||||
// Container was deleted, stop watching for it.
|
||||
lastWatched, err := w.watcher.RemoveWatch(containerName, event.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only report container deletion once.
|
||||
if !lastWatched {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown event type %v", eventType)
|
||||
}
|
||||
|
||||
// Deliver the event.
|
||||
events <- watcher.ContainerEvent{
|
||||
EventType: eventType,
|
||||
Name: containerName,
|
||||
WatchSource: watcher.Raw,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
59
vendor/github.com/google/cadvisor/container/systemd/factory.go
generated
vendored
Normal file
59
vendor/github.com/google/cadvisor/container/systemd/factory.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package systemd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type systemdFactory struct{}
|
||||
|
||||
func (f *systemdFactory) String() string {
|
||||
return "systemd"
|
||||
}
|
||||
|
||||
func (f *systemdFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (container.ContainerHandler, error) {
|
||||
return nil, fmt.Errorf("Not yet supported")
|
||||
}
|
||||
|
||||
func (f *systemdFactory) CanHandleAndAccept(name string) (bool, bool, error) {
|
||||
// on systemd using devicemapper each mount into the container has an associated cgroup that we ignore.
|
||||
// for details on .mount units: http://man7.org/linux/man-pages/man5/systemd.mount.5.html
|
||||
if strings.HasSuffix(name, ".mount") {
|
||||
return true, false, nil
|
||||
}
|
||||
klog.V(5).Infof("%s not handled by systemd handler", name)
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
func (f *systemdFactory) DebugInfo() map[string][]string {
|
||||
return map[string][]string{}
|
||||
}
|
||||
|
||||
// Register registers the systemd container factory.
|
||||
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error {
|
||||
klog.V(1).Infof("Registering systemd factory")
|
||||
factory := &systemdFactory{}
|
||||
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw})
|
||||
return nil
|
||||
}
|
30
vendor/github.com/google/cadvisor/container/systemd/install/install.go
generated
vendored
Normal file
30
vendor/github.com/google/cadvisor/container/systemd/install/install.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2019 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The install package registers systemd.NewPlugin() as the "systemd" container provider when imported
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/systemd"
|
||||
)
|
||||
|
||||
func init() {
|
||||
err := container.RegisterPlugin("systemd", systemd.NewPlugin())
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to register systemd plugin: %v", err)
|
||||
}
|
||||
}
|
38
vendor/github.com/google/cadvisor/container/systemd/plugin.go
generated
vendored
Normal file
38
vendor/github.com/google/cadvisor/container/systemd/plugin.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2019 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package systemd
|
||||
|
||||
import (
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
)
|
||||
|
||||
// NewPlugin returns an implementation of container.Plugin suitable for passing to container.RegisterPlugin()
|
||||
func NewPlugin() container.Plugin {
|
||||
return &plugin{}
|
||||
}
|
||||
|
||||
type plugin struct{}
|
||||
|
||||
func (p *plugin) InitializeFSContext(context *fs.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *plugin) Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) (watcher.ContainerWatcher, error) {
|
||||
err := Register(factory, fsInfo, includedMetrics)
|
||||
return nil, err
|
||||
}
|
64
vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go
generated
vendored
Normal file
64
vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// DmsetupClient is a low-level client for interacting with device mapper via
|
||||
// the `dmsetup` utility, which is provided by the `device-mapper` package.
|
||||
type DmsetupClient interface {
|
||||
// Table runs `dmsetup table` on the given device name and returns the
|
||||
// output or an error.
|
||||
Table(deviceName string) ([]byte, error)
|
||||
// Message runs `dmsetup message` on the given device, passing the given
|
||||
// message to the given sector, and returns the output or an error.
|
||||
Message(deviceName string, sector int, message string) ([]byte, error)
|
||||
// Status runs `dmsetup status` on the given device and returns the output
|
||||
// or an error.
|
||||
Status(deviceName string) ([]byte, error)
|
||||
}
|
||||
|
||||
// NewDmSetupClient returns a new DmsetupClient.
|
||||
func NewDmsetupClient() DmsetupClient {
|
||||
return &defaultDmsetupClient{}
|
||||
}
|
||||
|
||||
// defaultDmsetupClient is a functional DmsetupClient
|
||||
type defaultDmsetupClient struct{}
|
||||
|
||||
var _ DmsetupClient = &defaultDmsetupClient{}
|
||||
|
||||
func (c *defaultDmsetupClient) Table(deviceName string) ([]byte, error) {
|
||||
return c.dmsetup("table", deviceName)
|
||||
}
|
||||
|
||||
func (c *defaultDmsetupClient) Message(deviceName string, sector int, message string) ([]byte, error) {
|
||||
return c.dmsetup("message", deviceName, strconv.Itoa(sector), message)
|
||||
}
|
||||
|
||||
func (c *defaultDmsetupClient) Status(deviceName string) ([]byte, error) {
|
||||
return c.dmsetup("status", deviceName)
|
||||
}
|
||||
|
||||
func (*defaultDmsetupClient) dmsetup(args ...string) ([]byte, error) {
|
||||
klog.V(5).Infof("running dmsetup %v", strings.Join(args, " "))
|
||||
return exec.Command("dmsetup", args...).Output()
|
||||
}
|
16
vendor/github.com/google/cadvisor/devicemapper/doc.go
generated
vendored
Normal file
16
vendor/github.com/google/cadvisor/devicemapper/doc.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package devicemapper contains code for working with devicemapper
|
||||
package devicemapper
|
93
vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go
generated
vendored
Normal file
93
vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// thinLsClient knows how to run a thin_ls very specific to CoW usage for
|
||||
// containers.
|
||||
type thinLsClient interface {
|
||||
// ThinLs runs a thin ls on the given device, which is expected to be a
|
||||
// metadata device. The caller must hold the metadata snapshot for the
|
||||
// device.
|
||||
ThinLs(deviceName string) (map[string]uint64, error)
|
||||
}
|
||||
|
||||
// newThinLsClient returns a thinLsClient or an error if the thin_ls binary
|
||||
// couldn't be located.
|
||||
func newThinLsClient() (thinLsClient, error) {
|
||||
thinLsPath, err := ThinLsBinaryPresent()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating thin_ls client: %v", err)
|
||||
}
|
||||
|
||||
return &defaultThinLsClient{thinLsPath}, nil
|
||||
}
|
||||
|
||||
// defaultThinLsClient is a functional thinLsClient
|
||||
type defaultThinLsClient struct {
|
||||
thinLsPath string
|
||||
}
|
||||
|
||||
var _ thinLsClient = &defaultThinLsClient{}
|
||||
|
||||
func (c *defaultThinLsClient) ThinLs(deviceName string) (map[string]uint64, error) {
|
||||
args := []string{"--no-headers", "-m", "-o", "DEV,EXCLUSIVE_BYTES", deviceName}
|
||||
klog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " "))
|
||||
|
||||
output, err := exec.Command(c.thinLsPath, args...).Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error running command `thin_ls %v`: %v\noutput:\n\n%v", strings.Join(args, " "), err, string(output))
|
||||
}
|
||||
|
||||
return parseThinLsOutput(output), nil
|
||||
}
|
||||
|
||||
// parseThinLsOutput parses the output returned by thin_ls to build a map of
|
||||
// device id -> usage.
|
||||
func parseThinLsOutput(output []byte) map[string]uint64 {
|
||||
cache := map[string]uint64{}
|
||||
|
||||
// parse output
|
||||
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||
for scanner.Scan() {
|
||||
output := scanner.Text()
|
||||
fields := strings.Fields(output)
|
||||
if len(fields) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
deviceID := fields[0]
|
||||
usage, err := strconv.ParseUint(fields[1], 10, 64)
|
||||
if err != nil {
|
||||
klog.Warningf("unexpected error parsing thin_ls output: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
cache[deviceID] = usage
|
||||
}
|
||||
|
||||
return cache
|
||||
|
||||
}
|
179
vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go
generated
vendored
Normal file
179
vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ThinPoolWatcher maintains a cache of device name -> usage stats for a
|
||||
// devicemapper thin-pool using thin_ls.
|
||||
type ThinPoolWatcher struct {
|
||||
poolName string
|
||||
metadataDevice string
|
||||
lock *sync.RWMutex
|
||||
cache map[string]uint64
|
||||
period time.Duration
|
||||
stopChan chan struct{}
|
||||
dmsetup DmsetupClient
|
||||
thinLsClient thinLsClient
|
||||
}
|
||||
|
||||
// NewThinPoolWatcher returns a new ThinPoolWatcher for the given devicemapper
|
||||
// thin pool name and metadata device or an error.
|
||||
func NewThinPoolWatcher(poolName, metadataDevice string) (*ThinPoolWatcher, error) {
|
||||
thinLsClient, err := newThinLsClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encountered error creating thin_ls client: %v", err)
|
||||
}
|
||||
|
||||
return &ThinPoolWatcher{poolName: poolName,
|
||||
metadataDevice: metadataDevice,
|
||||
lock: &sync.RWMutex{},
|
||||
cache: make(map[string]uint64),
|
||||
period: 15 * time.Second,
|
||||
stopChan: make(chan struct{}),
|
||||
dmsetup: NewDmsetupClient(),
|
||||
thinLsClient: thinLsClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start starts the ThinPoolWatcher.
|
||||
func (w *ThinPoolWatcher) Start() {
|
||||
err := w.Refresh()
|
||||
if err != nil {
|
||||
klog.Errorf("encountered error refreshing thin pool watcher: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-w.stopChan:
|
||||
return
|
||||
case <-time.After(w.period):
|
||||
start := time.Now()
|
||||
err = w.Refresh()
|
||||
if err != nil {
|
||||
klog.Errorf("encountered error refreshing thin pool watcher: %v", err)
|
||||
}
|
||||
|
||||
// print latency for refresh
|
||||
duration := time.Since(start)
|
||||
klog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the ThinPoolWatcher.
|
||||
func (w *ThinPoolWatcher) Stop() {
|
||||
close(w.stopChan)
|
||||
}
|
||||
|
||||
// GetUsage gets the cached usage value of the given device.
|
||||
func (w *ThinPoolWatcher) GetUsage(deviceID string) (uint64, error) {
|
||||
w.lock.RLock()
|
||||
defer w.lock.RUnlock()
|
||||
|
||||
v, ok := w.cache[deviceID]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("no cached value for usage of device %v", deviceID)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
const (
|
||||
reserveMetadataMessage = "reserve_metadata_snap"
|
||||
releaseMetadataMessage = "release_metadata_snap"
|
||||
)
|
||||
|
||||
// Refresh performs a `thin_ls` of the pool being watched and refreshes the
|
||||
// cached data with the result.
|
||||
func (w *ThinPoolWatcher) Refresh() error {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
currentlyReserved, err := w.checkReservation(w.poolName)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error determining whether snapshot is reserved: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if currentlyReserved {
|
||||
klog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName)
|
||||
_, err = w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error releasing metadata snapshot for %v: %v", w.poolName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
|
||||
// NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup
|
||||
// message'. It's not needed for thin pools.
|
||||
if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil {
|
||||
err = fmt.Errorf("error reserving metadata for thin-pool %v: %v output: %v", w.poolName, err, string(output))
|
||||
return err
|
||||
}
|
||||
klog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName)
|
||||
|
||||
defer func() {
|
||||
klog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName)
|
||||
_, err := w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage)
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to release metadata snapshot for thin-pool %v: %s", w.poolName, err)
|
||||
}
|
||||
}()
|
||||
|
||||
klog.V(5).Infof("running thin_ls on metadata device %v", w.metadataDevice)
|
||||
newCache, err := w.thinLsClient.ThinLs(w.metadataDevice)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error performing thin_ls on metadata device %v: %v", w.metadataDevice, err)
|
||||
return err
|
||||
}
|
||||
|
||||
w.cache = newCache
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
thinPoolDmsetupStatusHeldMetadataRoot = 6
|
||||
thinPoolDmsetupStatusMinFields = thinPoolDmsetupStatusHeldMetadataRoot + 1
|
||||
)
|
||||
|
||||
// checkReservation checks to see whether the thin device is currently holding
|
||||
// userspace metadata.
|
||||
func (w *ThinPoolWatcher) checkReservation(poolName string) (bool, error) {
|
||||
klog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot")
|
||||
output, err := w.dmsetup.Status(poolName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// we care about the field at fields[thinPoolDmsetupStatusHeldMetadataRoot],
|
||||
// so make sure we get enough fields
|
||||
fields := strings.Fields(string(output))
|
||||
if len(fields) < thinPoolDmsetupStatusMinFields {
|
||||
return false, fmt.Errorf("unexpected output of dmsetup status command; expected at least %d fields, got %v; output: %v", thinPoolDmsetupStatusMinFields, len(fields), string(output))
|
||||
}
|
||||
|
||||
heldMetadataRoot := fields[thinPoolDmsetupStatusHeldMetadataRoot]
|
||||
currentlyReserved := heldMetadataRoot != "-"
|
||||
return currentlyReserved, nil
|
||||
}
|
50
vendor/github.com/google/cadvisor/devicemapper/util.go
generated
vendored
Normal file
50
vendor/github.com/google/cadvisor/devicemapper/util.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package devicemapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// ThinLsBinaryPresent returns the location of the thin_ls binary in the mount
|
||||
// namespace cadvisor is running in or an error. The locations checked are:
|
||||
//
|
||||
// - /sbin/
|
||||
// - /bin/
|
||||
// - /usr/sbin/
|
||||
// - /usr/bin/
|
||||
//
|
||||
// The thin_ls binary is provided by the device-mapper-persistent-data
|
||||
// package.
|
||||
func ThinLsBinaryPresent() (string, error) {
|
||||
var (
|
||||
thinLsPath string
|
||||
err error
|
||||
)
|
||||
|
||||
for _, path := range []string{"/sbin", "/bin", "/usr/sbin/", "/usr/bin"} {
|
||||
// try paths for non-containerized operation
|
||||
// note: thin_ls is most likely a symlink to pdata_tools
|
||||
thinLsPath = filepath.Join(path, "thin_ls")
|
||||
_, err = os.Stat(thinLsPath)
|
||||
if err == nil {
|
||||
return thinLsPath, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unable to find thin_ls binary")
|
||||
}
|
339
vendor/github.com/google/cadvisor/events/handler.go
generated
vendored
Normal file
339
vendor/github.com/google/cadvisor/events/handler.go
generated
vendored
Normal file
@ -0,0 +1,339 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type byTimestamp []*info.Event
|
||||
|
||||
// functions necessary to implement the sort interface on the Events struct
|
||||
func (e byTimestamp) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e byTimestamp) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e byTimestamp) Less(i, j int) bool {
|
||||
return e[i].Timestamp.Before(e[j].Timestamp)
|
||||
}
|
||||
|
||||
type EventChannel struct {
|
||||
// Watch ID. Can be used by the caller to request cancellation of watch events.
|
||||
watchID int
|
||||
// Channel on which the caller can receive watch events.
|
||||
channel chan *info.Event
|
||||
}
|
||||
|
||||
// Request holds a set of parameters by which Event objects may be screened.
|
||||
// The caller may want events that occurred within a specific timeframe
|
||||
// or of a certain type, which may be specified in the *Request object
|
||||
// they pass to an EventManager function
|
||||
type Request struct {
|
||||
// events falling before StartTime do not satisfy the request. StartTime
|
||||
// must be left blank in calls to WatchEvents
|
||||
StartTime time.Time
|
||||
// events falling after EndTime do not satisfy the request. EndTime
|
||||
// must be left blank in calls to WatchEvents
|
||||
EndTime time.Time
|
||||
// EventType is a map that specifies the type(s) of events wanted
|
||||
EventType map[info.EventType]bool
|
||||
// allows the caller to put a limit on how many
|
||||
// events to receive. If there are more events than MaxEventsReturned
|
||||
// then the most chronologically recent events in the time period
|
||||
// specified are returned. Must be >= 1
|
||||
MaxEventsReturned int
|
||||
// the absolute container name for which the event occurred
|
||||
ContainerName string
|
||||
// if IncludeSubcontainers is false, only events occurring in the specific
|
||||
// container, and not the subcontainers, will be returned
|
||||
IncludeSubcontainers bool
|
||||
}
|
||||
|
||||
// EventManager is implemented by Events. It provides two ways to monitor
|
||||
// events and one way to add events
|
||||
type EventManager interface {
|
||||
// WatchEvents() allows a caller to register for receiving events based on the specified request.
|
||||
// On successful registration, an EventChannel object is returned.
|
||||
WatchEvents(request *Request) (*EventChannel, error)
|
||||
// GetEvents() returns all detected events based on the filters specified in request.
|
||||
GetEvents(request *Request) ([]*info.Event, error)
|
||||
// AddEvent allows the caller to add an event to an EventManager
|
||||
// object
|
||||
AddEvent(event *info.Event) error
|
||||
// Cancels a previously requested watch event.
|
||||
StopWatch(watchID int)
|
||||
}
|
||||
|
||||
// events provides an implementation for the EventManager interface.
|
||||
type events struct {
|
||||
// eventStore holds the events by event type.
|
||||
eventStore map[info.EventType]*utils.TimedStore
|
||||
// map of registered watchers keyed by watch id.
|
||||
watchers map[int]*watch
|
||||
// lock guarding the eventStore.
|
||||
eventsLock sync.RWMutex
|
||||
// lock guarding watchers.
|
||||
watcherLock sync.RWMutex
|
||||
// last allocated watch id.
|
||||
lastID int
|
||||
// Event storage policy.
|
||||
storagePolicy StoragePolicy
|
||||
}
|
||||
|
||||
// initialized by a call to WatchEvents(), a watch struct will then be added
|
||||
// to the events slice of *watch objects. When AddEvent() finds an event that
|
||||
// satisfies the request parameter of a watch object in events.watchers,
|
||||
// it will send that event out over the watch object's channel. The caller that
|
||||
// called WatchEvents will receive the event over the channel provided to
|
||||
// WatchEvents
|
||||
type watch struct {
|
||||
// request parameters passed in by the caller of WatchEvents()
|
||||
request *Request
|
||||
// a channel used to send event back to the caller.
|
||||
eventChannel *EventChannel
|
||||
}
|
||||
|
||||
func NewEventChannel(watchID int) *EventChannel {
|
||||
return &EventChannel{
|
||||
watchID: watchID,
|
||||
channel: make(chan *info.Event, 10),
|
||||
}
|
||||
}
|
||||
|
||||
// Policy specifying how many events to store.
|
||||
// MaxAge is the max duration for which to keep events.
|
||||
// MaxNumEvents is the max number of events to keep (-1 for no limit).
|
||||
type StoragePolicy struct {
|
||||
// Defaults limites, used if a per-event limit is not set.
|
||||
DefaultMaxAge time.Duration
|
||||
DefaultMaxNumEvents int
|
||||
|
||||
// Per-event type limits.
|
||||
PerTypeMaxAge map[info.EventType]time.Duration
|
||||
PerTypeMaxNumEvents map[info.EventType]int
|
||||
}
|
||||
|
||||
func DefaultStoragePolicy() StoragePolicy {
|
||||
return StoragePolicy{
|
||||
DefaultMaxAge: 24 * time.Hour,
|
||||
DefaultMaxNumEvents: 100000,
|
||||
PerTypeMaxAge: make(map[info.EventType]time.Duration),
|
||||
PerTypeMaxNumEvents: make(map[info.EventType]int),
|
||||
}
|
||||
}
|
||||
|
||||
// returns a pointer to an initialized Events object.
|
||||
func NewEventManager(storagePolicy StoragePolicy) EventManager {
|
||||
return &events{
|
||||
eventStore: make(map[info.EventType]*utils.TimedStore),
|
||||
watchers: make(map[int]*watch),
|
||||
storagePolicy: storagePolicy,
|
||||
}
|
||||
}
|
||||
|
||||
// returns a pointer to an initialized Request object
|
||||
func NewRequest() *Request {
|
||||
return &Request{
|
||||
EventType: map[info.EventType]bool{},
|
||||
IncludeSubcontainers: false,
|
||||
MaxEventsReturned: 10,
|
||||
}
|
||||
}
|
||||
|
||||
// returns a pointer to an initialized watch object
|
||||
func newWatch(request *Request, eventChannel *EventChannel) *watch {
|
||||
return &watch{
|
||||
request: request,
|
||||
eventChannel: eventChannel,
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *EventChannel) GetChannel() chan *info.Event {
|
||||
return ch.channel
|
||||
}
|
||||
|
||||
func (ch *EventChannel) GetWatchId() int {
|
||||
return ch.watchID
|
||||
}
|
||||
|
||||
// sorts and returns up to the last MaxEventsReturned chronological elements
|
||||
func getMaxEventsReturned(request *Request, eSlice []*info.Event) []*info.Event {
|
||||
sort.Sort(byTimestamp(eSlice))
|
||||
n := request.MaxEventsReturned
|
||||
if n >= len(eSlice) || n <= 0 {
|
||||
return eSlice
|
||||
}
|
||||
return eSlice[len(eSlice)-n:]
|
||||
}
|
||||
|
||||
// If the request wants all subcontainers, this returns if the request's
|
||||
// container path is a prefix of the event container path. Otherwise,
|
||||
// it checks that the container paths of the event and request are
|
||||
// equivalent
|
||||
func isSubcontainer(request *Request, event *info.Event) bool {
|
||||
if request.IncludeSubcontainers {
|
||||
return request.ContainerName == "/" || strings.HasPrefix(event.ContainerName+"/", request.ContainerName+"/")
|
||||
}
|
||||
return event.ContainerName == request.ContainerName
|
||||
}
|
||||
|
||||
// determines if an event occurs within the time set in the request object and is the right type
|
||||
func checkIfEventSatisfiesRequest(request *Request, event *info.Event) bool {
|
||||
startTime := request.StartTime
|
||||
endTime := request.EndTime
|
||||
eventTime := event.Timestamp
|
||||
if !startTime.IsZero() {
|
||||
if startTime.After(eventTime) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if !endTime.IsZero() {
|
||||
if endTime.Before(eventTime) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if !request.EventType[event.EventType] {
|
||||
return false
|
||||
}
|
||||
if request.ContainerName != "" {
|
||||
return isSubcontainer(request, event)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// method of Events object that screens Event objects found in the eventStore
|
||||
// attribute and if they fit the parameters passed by the Request object,
|
||||
// adds it to a slice of *Event objects that is returned. If both MaxEventsReturned
|
||||
// and StartTime/EndTime are specified in the request object, then only
|
||||
// up to the most recent MaxEventsReturned events in that time range are returned.
|
||||
func (e *events) GetEvents(request *Request) ([]*info.Event, error) {
|
||||
returnEventList := []*info.Event{}
|
||||
e.eventsLock.RLock()
|
||||
defer e.eventsLock.RUnlock()
|
||||
for eventType, fetch := range request.EventType {
|
||||
if !fetch {
|
||||
continue
|
||||
}
|
||||
evs, ok := e.eventStore[eventType]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
res := evs.InTimeRange(request.StartTime, request.EndTime, request.MaxEventsReturned)
|
||||
for _, in := range res {
|
||||
e := in.(*info.Event)
|
||||
if checkIfEventSatisfiesRequest(request, e) {
|
||||
returnEventList = append(returnEventList, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
returnEventList = getMaxEventsReturned(request, returnEventList)
|
||||
return returnEventList, nil
|
||||
}
|
||||
|
||||
// method of Events object that maintains an *Event channel passed by the user.
|
||||
// When an event is added by AddEvents that satisfies the parameters in the passed
|
||||
// Request object it is fed to the channel. The StartTime and EndTime of the watch
|
||||
// request should be uninitialized because the purpose is to watch indefinitely
|
||||
// for events that will happen in the future
|
||||
func (e *events) WatchEvents(request *Request) (*EventChannel, error) {
|
||||
if !request.StartTime.IsZero() || !request.EndTime.IsZero() {
|
||||
return nil, errors.New(
|
||||
"for a call to watch, request.StartTime and request.EndTime must be uninitialized")
|
||||
}
|
||||
e.watcherLock.Lock()
|
||||
defer e.watcherLock.Unlock()
|
||||
newID := e.lastID + 1
|
||||
returnEventChannel := NewEventChannel(newID)
|
||||
newWatcher := newWatch(request, returnEventChannel)
|
||||
e.watchers[newID] = newWatcher
|
||||
e.lastID = newID
|
||||
return returnEventChannel, nil
|
||||
}
|
||||
|
||||
// helper function to update the event manager's eventStore
|
||||
func (e *events) updateEventStore(event *info.Event) {
|
||||
e.eventsLock.Lock()
|
||||
defer e.eventsLock.Unlock()
|
||||
if _, ok := e.eventStore[event.EventType]; !ok {
|
||||
maxNumEvents := e.storagePolicy.DefaultMaxNumEvents
|
||||
if numEvents, ok := e.storagePolicy.PerTypeMaxNumEvents[event.EventType]; ok {
|
||||
maxNumEvents = numEvents
|
||||
}
|
||||
if maxNumEvents == 0 {
|
||||
// Event storage is disabled for event.EventType
|
||||
return
|
||||
}
|
||||
|
||||
maxAge := e.storagePolicy.DefaultMaxAge
|
||||
if age, ok := e.storagePolicy.PerTypeMaxAge[event.EventType]; ok {
|
||||
maxAge = age
|
||||
}
|
||||
|
||||
e.eventStore[event.EventType] = utils.NewTimedStore(maxAge, maxNumEvents)
|
||||
}
|
||||
e.eventStore[event.EventType].Add(event.Timestamp, event)
|
||||
}
|
||||
|
||||
func (e *events) findValidWatchers(event *info.Event) []*watch {
|
||||
watchesToSend := make([]*watch, 0)
|
||||
for _, watcher := range e.watchers {
|
||||
watchRequest := watcher.request
|
||||
if checkIfEventSatisfiesRequest(watchRequest, event) {
|
||||
watchesToSend = append(watchesToSend, watcher)
|
||||
}
|
||||
}
|
||||
return watchesToSend
|
||||
}
|
||||
|
||||
// method of Events object that adds the argument Event object to the
|
||||
// eventStore. It also feeds the event to a set of watch channels
|
||||
// held by the manager if it satisfies the request keys of the channels
|
||||
func (e *events) AddEvent(event *info.Event) error {
|
||||
e.updateEventStore(event)
|
||||
e.watcherLock.RLock()
|
||||
defer e.watcherLock.RUnlock()
|
||||
watchesToSend := e.findValidWatchers(event)
|
||||
for _, watchObject := range watchesToSend {
|
||||
watchObject.eventChannel.GetChannel() <- event
|
||||
}
|
||||
klog.V(4).Infof("Added event %v", event)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes a watch instance from the EventManager's watchers map
|
||||
func (e *events) StopWatch(watchID int) {
|
||||
e.watcherLock.Lock()
|
||||
defer e.watcherLock.Unlock()
|
||||
_, ok := e.watchers[watchID]
|
||||
if !ok {
|
||||
klog.Errorf("Could not find watcher instance %v", watchID)
|
||||
}
|
||||
close(e.watchers[watchID].eventChannel.GetChannel())
|
||||
delete(e.watchers, watchID)
|
||||
}
|
882
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
Normal file
882
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
Normal file
@ -0,0 +1,882 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Provides Filesystem Stats
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
zfs "github.com/mistifyio/go-zfs"
|
||||
mount "github.com/moby/sys/mountinfo"
|
||||
|
||||
"github.com/google/cadvisor/devicemapper"
|
||||
"github.com/google/cadvisor/utils"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelSystemRoot = "root"
|
||||
LabelDockerImages = "docker-images"
|
||||
LabelCrioImages = "crio-images"
|
||||
LabelCrioContainers = "crio-containers"
|
||||
DriverStatusPoolName = "Pool Name"
|
||||
DriverStatusDataLoopFile = "Data loop file"
|
||||
)
|
||||
|
||||
const (
|
||||
// The block size in bytes.
|
||||
statBlockSize uint64 = 512
|
||||
// The maximum number of `disk usage` tasks that can be running at once.
|
||||
maxConcurrentOps = 20
|
||||
)
|
||||
|
||||
// A pool for restricting the number of consecutive `du` and `find` tasks running.
|
||||
var pool = make(chan struct{}, maxConcurrentOps)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < maxConcurrentOps; i++ {
|
||||
releaseToken()
|
||||
}
|
||||
}
|
||||
|
||||
func claimToken() {
|
||||
<-pool
|
||||
}
|
||||
|
||||
func releaseToken() {
|
||||
pool <- struct{}{}
|
||||
}
|
||||
|
||||
type partition struct {
|
||||
mountpoint string
|
||||
major uint
|
||||
minor uint
|
||||
fsType string
|
||||
blockSize uint
|
||||
}
|
||||
|
||||
type RealFsInfo struct {
|
||||
// Map from block device path to partition information.
|
||||
partitions map[string]partition
|
||||
// Map from label to block device path.
|
||||
// Labels are intent-specific tags that are auto-detected.
|
||||
labels map[string]string
|
||||
// Map from mountpoint to mount information.
|
||||
mounts map[string]mount.Info
|
||||
// devicemapper client
|
||||
dmsetup devicemapper.DmsetupClient
|
||||
// fsUUIDToDeviceName is a map from the filesystem UUID to its device name.
|
||||
fsUUIDToDeviceName map[string]string
|
||||
}
|
||||
|
||||
func NewFsInfo(context Context) (FsInfo, error) {
|
||||
fileReader, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts, err := mount.GetMountsFromReader(fileReader, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fsUUIDToDeviceName, err := getFsUUIDToDeviceNameMap()
|
||||
if err != nil {
|
||||
// UUID is not always available across different OS distributions.
|
||||
// Do not fail if there is an error.
|
||||
klog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err)
|
||||
}
|
||||
|
||||
// Avoid devicemapper container mounts - these are tracked by the ThinPoolWatcher
|
||||
excluded := []string{fmt.Sprintf("%s/devicemapper/mnt", context.Docker.Root)}
|
||||
fsInfo := &RealFsInfo{
|
||||
partitions: processMounts(mounts, excluded),
|
||||
labels: make(map[string]string),
|
||||
mounts: make(map[string]mount.Info),
|
||||
dmsetup: devicemapper.NewDmsetupClient(),
|
||||
fsUUIDToDeviceName: fsUUIDToDeviceName,
|
||||
}
|
||||
|
||||
for _, mnt := range mounts {
|
||||
fsInfo.mounts[mnt.Mountpoint] = *mnt
|
||||
}
|
||||
|
||||
// need to call this before the log line below printing out the partitions, as this function may
|
||||
// add a "partition" for devicemapper to fsInfo.partitions
|
||||
fsInfo.addDockerImagesLabel(context, mounts)
|
||||
fsInfo.addCrioImagesLabel(context, mounts)
|
||||
|
||||
klog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName)
|
||||
klog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions)
|
||||
fsInfo.addSystemRootLabel(mounts)
|
||||
return fsInfo, nil
|
||||
}
|
||||
|
||||
// getFsUUIDToDeviceNameMap creates the filesystem uuid to device name map
|
||||
// using the information in /dev/disk/by-uuid. If the directory does not exist,
|
||||
// this function will return an empty map.
|
||||
func getFsUUIDToDeviceNameMap() (map[string]string, error) {
|
||||
const dir = "/dev/disk/by-uuid"
|
||||
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fsUUIDToDeviceName := make(map[string]string)
|
||||
for _, file := range files {
|
||||
fpath := filepath.Join(dir, file.Name())
|
||||
target, err := os.Readlink(fpath)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to resolve symlink for %q", fpath)
|
||||
continue
|
||||
}
|
||||
device, err := filepath.Abs(filepath.Join(dir, target))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve the absolute path of %q", filepath.Join(dir, target))
|
||||
}
|
||||
fsUUIDToDeviceName[file.Name()] = device
|
||||
}
|
||||
return fsUUIDToDeviceName, nil
|
||||
}
|
||||
|
||||
func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) map[string]partition {
|
||||
partitions := make(map[string]partition)
|
||||
|
||||
supportedFsType := map[string]bool{
|
||||
// all ext and nfs systems are checked through prefix
|
||||
// because there are a number of families (e.g., ext3, ext4, nfs3, nfs4...)
|
||||
"btrfs": true,
|
||||
"overlay": true,
|
||||
"tmpfs": true,
|
||||
"xfs": true,
|
||||
"zfs": true,
|
||||
}
|
||||
|
||||
for _, mnt := range mounts {
|
||||
if !strings.HasPrefix(mnt.FSType, "ext") && !strings.HasPrefix(mnt.FSType, "nfs") &&
|
||||
!supportedFsType[mnt.FSType] {
|
||||
continue
|
||||
}
|
||||
// Avoid bind mounts, exclude tmpfs.
|
||||
if _, ok := partitions[mnt.Source]; ok {
|
||||
if mnt.FSType != "tmpfs" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
hasPrefix := false
|
||||
for _, prefix := range excludedMountpointPrefixes {
|
||||
if strings.HasPrefix(mnt.Mountpoint, prefix) {
|
||||
hasPrefix = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if hasPrefix {
|
||||
continue
|
||||
}
|
||||
|
||||
// using mountpoint to replace device once fstype it tmpfs
|
||||
if mnt.FSType == "tmpfs" {
|
||||
mnt.Source = mnt.Mountpoint
|
||||
}
|
||||
// btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
|
||||
// instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
|
||||
if mnt.FSType == "btrfs" && mnt.Major == 0 && strings.HasPrefix(mnt.Source, "/dev/") {
|
||||
major, minor, err := getBtrfsMajorMinorIds(mnt)
|
||||
if err != nil {
|
||||
klog.Warningf("%s", err)
|
||||
} else {
|
||||
mnt.Major = major
|
||||
mnt.Minor = minor
|
||||
}
|
||||
}
|
||||
|
||||
// overlay fix: Making mount source unique for all overlay mounts, using the mount's major and minor ids.
|
||||
if mnt.FSType == "overlay" {
|
||||
mnt.Source = fmt.Sprintf("%s_%d-%d", mnt.Source, mnt.Major, mnt.Minor)
|
||||
}
|
||||
|
||||
partitions[mnt.Source] = partition{
|
||||
fsType: mnt.FSType,
|
||||
mountpoint: mnt.Mountpoint,
|
||||
major: uint(mnt.Major),
|
||||
minor: uint(mnt.Minor),
|
||||
}
|
||||
}
|
||||
|
||||
return partitions
|
||||
}
|
||||
|
||||
// getDockerDeviceMapperInfo returns information about the devicemapper device and "partition" if
|
||||
// docker is using devicemapper for its storage driver. If a loopback device is being used, don't
|
||||
// return any information or error, as we want to report based on the actual partition where the
|
||||
// loopback file resides, inside of the loopback file itself.
|
||||
func (i *RealFsInfo) getDockerDeviceMapperInfo(context DockerContext) (string, *partition, error) {
|
||||
if context.Driver != DeviceMapper.String() {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
dataLoopFile := context.DriverStatus[DriverStatusDataLoopFile]
|
||||
if len(dataLoopFile) > 0 {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
dev, major, minor, blockSize, err := dockerDMDevice(context.DriverStatus, i.dmsetup)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return dev, &partition{
|
||||
fsType: DeviceMapper.String(),
|
||||
major: major,
|
||||
minor: minor,
|
||||
blockSize: blockSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// addSystemRootLabel attempts to determine which device contains the mount for /.
|
||||
func (i *RealFsInfo) addSystemRootLabel(mounts []*mount.Info) {
|
||||
for _, m := range mounts {
|
||||
if m.Mountpoint == "/" {
|
||||
i.partitions[m.Source] = partition{
|
||||
fsType: m.FSType,
|
||||
mountpoint: m.Mountpoint,
|
||||
major: uint(m.Major),
|
||||
minor: uint(m.Minor),
|
||||
}
|
||||
i.labels[LabelSystemRoot] = m.Source
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addDockerImagesLabel attempts to determine which device contains the mount for docker images.
|
||||
func (i *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) {
|
||||
if context.Docker.Driver != "" {
|
||||
dockerDev, dockerPartition, err := i.getDockerDeviceMapperInfo(context.Docker)
|
||||
if err != nil {
|
||||
klog.Warningf("Could not get Docker devicemapper device: %v", err)
|
||||
}
|
||||
if len(dockerDev) > 0 && dockerPartition != nil {
|
||||
i.partitions[dockerDev] = *dockerPartition
|
||||
i.labels[LabelDockerImages] = dockerDev
|
||||
} else {
|
||||
i.updateContainerImagesPath(LabelDockerImages, mounts, getDockerImagePaths(context))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) addCrioImagesLabel(context Context, mounts []*mount.Info) {
|
||||
labelCrioImageOrContainers := LabelCrioContainers
|
||||
// If imagestore is not specified, let's fall back to the original case.
|
||||
// Everything will be stored in crio-images
|
||||
if context.Crio.ImageStore == "" {
|
||||
labelCrioImageOrContainers = LabelCrioImages
|
||||
}
|
||||
if context.Crio.Root != "" {
|
||||
crioPath := context.Crio.Root
|
||||
crioImagePaths := map[string]struct{}{
|
||||
"/": {},
|
||||
}
|
||||
imageOrContainerPath := context.Crio.Driver + "-containers"
|
||||
if context.Crio.ImageStore == "" {
|
||||
// If ImageStore is not specified then we will assume ImageFs is complete separate.
|
||||
// No need to split the image store.
|
||||
imageOrContainerPath = context.Crio.Driver + "-images"
|
||||
|
||||
}
|
||||
crioImagePaths[path.Join(crioPath, imageOrContainerPath)] = struct{}{}
|
||||
for crioPath != "/" && crioPath != "." {
|
||||
crioImagePaths[crioPath] = struct{}{}
|
||||
crioPath = filepath.Dir(crioPath)
|
||||
}
|
||||
i.updateContainerImagesPath(labelCrioImageOrContainers, mounts, crioImagePaths)
|
||||
}
|
||||
if context.Crio.ImageStore != "" {
|
||||
crioPath := context.Crio.ImageStore
|
||||
crioImagePaths := map[string]struct{}{
|
||||
"/": {},
|
||||
}
|
||||
crioImagePaths[path.Join(crioPath, context.Crio.Driver+"-images")] = struct{}{}
|
||||
for crioPath != "/" && crioPath != "." {
|
||||
crioImagePaths[crioPath] = struct{}{}
|
||||
crioPath = filepath.Dir(crioPath)
|
||||
}
|
||||
i.updateContainerImagesPath(LabelCrioImages, mounts, crioImagePaths)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a list of possible mount points for docker image management from the docker root directory.
|
||||
// Right now, we look for each type of supported graph driver directories, but we can do better by parsing
|
||||
// some of the context from `docker info`.
|
||||
func getDockerImagePaths(context Context) map[string]struct{} {
|
||||
dockerImagePaths := map[string]struct{}{
|
||||
"/": {},
|
||||
}
|
||||
|
||||
// TODO(rjnagal): Detect docker root and graphdriver directories from docker info.
|
||||
dockerRoot := context.Docker.Root
|
||||
for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "overlay2", "zfs"} {
|
||||
dockerImagePaths[path.Join(dockerRoot, dir)] = struct{}{}
|
||||
}
|
||||
for dockerRoot != "/" && dockerRoot != "." {
|
||||
dockerImagePaths[dockerRoot] = struct{}{}
|
||||
dockerRoot = filepath.Dir(dockerRoot)
|
||||
}
|
||||
return dockerImagePaths
|
||||
}
|
||||
|
||||
// This method compares the mountpoints with possible container image mount points. If a match is found,
|
||||
// the label is added to the partition.
|
||||
func (i *RealFsInfo) updateContainerImagesPath(label string, mounts []*mount.Info, containerImagePaths map[string]struct{}) {
|
||||
var useMount *mount.Info
|
||||
for _, m := range mounts {
|
||||
if _, ok := containerImagePaths[m.Mountpoint]; ok {
|
||||
if useMount == nil || (len(useMount.Mountpoint) < len(m.Mountpoint)) {
|
||||
useMount = m
|
||||
}
|
||||
}
|
||||
}
|
||||
if useMount != nil {
|
||||
i.partitions[useMount.Source] = partition{
|
||||
fsType: useMount.FSType,
|
||||
mountpoint: useMount.Mountpoint,
|
||||
major: uint(useMount.Major),
|
||||
minor: uint(useMount.Minor),
|
||||
}
|
||||
i.labels[label] = useMount.Source
|
||||
}
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetDeviceForLabel(label string) (string, error) {
|
||||
dev, ok := i.labels[label]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("non-existent label %q", label)
|
||||
}
|
||||
return dev, nil
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetLabelsForDevice(device string) ([]string, error) {
|
||||
var labels []string
|
||||
for label, dev := range i.labels {
|
||||
if dev == device {
|
||||
labels = append(labels, label)
|
||||
}
|
||||
}
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetMountpointForDevice(dev string) (string, error) {
|
||||
p, ok := i.partitions[dev]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("no partition info for device %q", dev)
|
||||
}
|
||||
return p.mountpoint, nil
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) {
|
||||
filesystems := make([]Fs, 0)
|
||||
deviceSet := make(map[string]struct{})
|
||||
diskStatsMap, err := getDiskStatsMap("/proc/diskstats")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nfsInfo := make(map[string]Fs, 0)
|
||||
for device, partition := range i.partitions {
|
||||
_, hasMount := mountSet[partition.mountpoint]
|
||||
_, hasDevice := deviceSet[device]
|
||||
if mountSet == nil || (hasMount && !hasDevice) {
|
||||
var (
|
||||
err error
|
||||
fs Fs
|
||||
)
|
||||
fsType := partition.fsType
|
||||
if strings.HasPrefix(partition.fsType, "nfs") {
|
||||
fsType = "nfs"
|
||||
}
|
||||
switch fsType {
|
||||
case DeviceMapper.String():
|
||||
fs.Capacity, fs.Free, fs.Available, err = getDMStats(device, partition.blockSize)
|
||||
klog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available)
|
||||
fs.Type = DeviceMapper
|
||||
case ZFS.String():
|
||||
if _, devzfs := os.Stat("/dev/zfs"); os.IsExist(devzfs) {
|
||||
fs.Capacity, fs.Free, fs.Available, err = getZfstats(device)
|
||||
fs.Type = ZFS
|
||||
break
|
||||
}
|
||||
// if /dev/zfs is not present default to VFS
|
||||
fallthrough
|
||||
case NFS.String():
|
||||
devId := fmt.Sprintf("%d:%d", partition.major, partition.minor)
|
||||
if v, ok := nfsInfo[devId]; ok {
|
||||
fs = v
|
||||
break
|
||||
}
|
||||
var inodes, inodesFree uint64
|
||||
fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("the file system type is %s, partition mountpoint does not exist: %v, error: %v", partition.fsType, partition.mountpoint, err)
|
||||
break
|
||||
}
|
||||
fs.Inodes = &inodes
|
||||
fs.InodesFree = &inodesFree
|
||||
fs.Type = VFS
|
||||
nfsInfo[devId] = fs
|
||||
default:
|
||||
var inodes, inodesFree uint64
|
||||
if utils.FileExists(partition.mountpoint) {
|
||||
fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint)
|
||||
fs.Inodes = &inodes
|
||||
fs.InodesFree = &inodesFree
|
||||
fs.Type = VFS
|
||||
} else {
|
||||
klog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Stat fs failed. Error: %v", err)
|
||||
} else {
|
||||
deviceSet[device] = struct{}{}
|
||||
fs.DeviceInfo = DeviceInfo{
|
||||
Device: device,
|
||||
Major: uint(partition.major),
|
||||
Minor: uint(partition.minor),
|
||||
}
|
||||
|
||||
if val, ok := diskStatsMap[device]; ok {
|
||||
fs.DiskStats = val
|
||||
} else {
|
||||
for k, v := range diskStatsMap {
|
||||
if v.MajorNum == uint64(partition.major) && v.MinorNum == uint64(partition.minor) {
|
||||
fs.DiskStats = diskStatsMap[k]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
filesystems = append(filesystems, fs)
|
||||
}
|
||||
}
|
||||
}
|
||||
return filesystems, nil
|
||||
}
|
||||
|
||||
var partitionRegex = regexp.MustCompile(`^(?:(?:s|v|xv)d[a-z]+\d*|dm-\d+)$`)
|
||||
|
||||
func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
|
||||
diskStatsMap := make(map[string]DiskStats)
|
||||
file, err := os.Open(diskStatsFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile)
|
||||
return diskStatsMap, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
words := strings.Fields(line)
|
||||
if !partitionRegex.MatchString(words[2]) {
|
||||
continue
|
||||
}
|
||||
// 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
|
||||
deviceName := path.Join("/dev", words[2])
|
||||
|
||||
var err error
|
||||
devInfo := make([]uint64, 2)
|
||||
for i := 0; i < len(devInfo); i++ {
|
||||
devInfo[i], err = strconv.ParseUint(words[i], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
wordLength := len(words)
|
||||
offset := 3
|
||||
var stats = make([]uint64, wordLength-offset)
|
||||
if len(stats) < 11 {
|
||||
return nil, fmt.Errorf("could not parse all 11 columns of /proc/diskstats")
|
||||
}
|
||||
for i := offset; i < wordLength; i++ {
|
||||
stats[i-offset], err = strconv.ParseUint(words[i], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
major64, err := strconv.ParseUint(words[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
minor64, err := strconv.ParseUint(words[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskStats := DiskStats{
|
||||
MajorNum: devInfo[0],
|
||||
MinorNum: devInfo[1],
|
||||
ReadsCompleted: stats[0],
|
||||
ReadsMerged: stats[1],
|
||||
SectorsRead: stats[2],
|
||||
ReadTime: stats[3],
|
||||
WritesCompleted: stats[4],
|
||||
WritesMerged: stats[5],
|
||||
SectorsWritten: stats[6],
|
||||
WriteTime: stats[7],
|
||||
IoInProgress: stats[8],
|
||||
IoTime: stats[9],
|
||||
WeightedIoTime: stats[10],
|
||||
Major: major64,
|
||||
Minor: minor64,
|
||||
}
|
||||
diskStatsMap[deviceName] = diskStats
|
||||
}
|
||||
return diskStatsMap, nil
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetGlobalFsInfo() ([]Fs, error) {
|
||||
return i.GetFsInfoForPath(nil)
|
||||
}
|
||||
|
||||
func major(devNumber uint64) uint {
|
||||
return uint((devNumber >> 8) & 0xfff)
|
||||
}
|
||||
|
||||
func minor(devNumber uint64) uint {
|
||||
return uint((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetDeviceInfoByFsUUID(uuid string) (*DeviceInfo, error) {
|
||||
deviceName, found := i.fsUUIDToDeviceName[uuid]
|
||||
if !found {
|
||||
return nil, ErrNoSuchDevice
|
||||
}
|
||||
p, found := i.partitions[deviceName]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("cannot find device %q in partitions", deviceName)
|
||||
}
|
||||
return &DeviceInfo{deviceName, p.major, p.minor}, nil
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) mountInfoFromDir(dir string) (*mount.Info, bool) {
|
||||
mnt, found := i.mounts[dir]
|
||||
// try the parent dir if not found until we reach the root dir
|
||||
// this is an issue on btrfs systems where the directory is not
|
||||
// the subvolume
|
||||
for !found {
|
||||
pathdir, _ := filepath.Split(dir)
|
||||
// break when we reach root
|
||||
if pathdir == "/" {
|
||||
mnt, found = i.mounts["/"]
|
||||
break
|
||||
}
|
||||
// trim "/" from the new parent path otherwise the next possible
|
||||
// filepath.Split in the loop will not split the string any further
|
||||
dir = strings.TrimSuffix(pathdir, "/")
|
||||
mnt, found = i.mounts[dir]
|
||||
}
|
||||
return &mnt, found
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
||||
buf := new(syscall.Stat_t)
|
||||
err := syscall.Stat(dir, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat failed on %s with error: %s", dir, err)
|
||||
}
|
||||
|
||||
// The type Dev in Stat_t is 32bit on mips.
|
||||
major := major(uint64(buf.Dev)) // nolint: unconvert
|
||||
minor := minor(uint64(buf.Dev)) // nolint: unconvert
|
||||
for device, partition := range i.partitions {
|
||||
if partition.major == major && partition.minor == minor {
|
||||
return &DeviceInfo{device, major, minor}, nil
|
||||
}
|
||||
}
|
||||
|
||||
mnt, found := i.mountInfoFromDir(dir)
|
||||
if found && strings.HasPrefix(mnt.Source, "/dev/") {
|
||||
major, minor := mnt.Major, mnt.Minor
|
||||
|
||||
if mnt.FSType == "btrfs" && major == 0 {
|
||||
major, minor, err = getBtrfsMajorMinorIds(mnt)
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to get btrfs mountpoint IDs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &DeviceInfo{mnt.Source, uint(major), uint(minor)}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("with major: %d, minor: %d: %w", major, minor, ErrDeviceNotInPartitionsMap)
|
||||
}
|
||||
|
||||
func GetDirUsage(dir string) (UsageInfo, error) {
|
||||
var usage UsageInfo
|
||||
|
||||
if dir == "" {
|
||||
return usage, fmt.Errorf("invalid directory")
|
||||
}
|
||||
|
||||
rootInfo, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return usage, fmt.Errorf("could not stat %q to get inode usage: %v", dir, err)
|
||||
}
|
||||
|
||||
rootStat, ok := rootInfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return usage, fmt.Errorf("unsupported fileinfo for getting inode usage of %q", dir)
|
||||
}
|
||||
|
||||
rootDevID := rootStat.Dev
|
||||
|
||||
// dedupedInode stores inodes that could be duplicates (nlink > 1)
|
||||
dedupedInodes := make(map[uint64]struct{})
|
||||
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if os.IsNotExist(err) {
|
||||
// expected if files appear/vanish
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to count inodes for part of dir %s: %s", dir, err)
|
||||
}
|
||||
|
||||
// according to the docs, Sys can be nil
|
||||
if info.Sys() == nil {
|
||||
return fmt.Errorf("fileinfo Sys is nil")
|
||||
}
|
||||
|
||||
s, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported fileinfo; could not convert to stat_t")
|
||||
}
|
||||
|
||||
if s.Dev != rootDevID {
|
||||
// don't descend into directories on other devices
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if s.Nlink > 1 {
|
||||
if _, ok := dedupedInodes[s.Ino]; !ok {
|
||||
// Dedupe things that could be hardlinks
|
||||
dedupedInodes[s.Ino] = struct{}{}
|
||||
|
||||
usage.Bytes += uint64(s.Blocks) * statBlockSize
|
||||
usage.Inodes++
|
||||
}
|
||||
} else {
|
||||
usage.Bytes += uint64(s.Blocks) * statBlockSize
|
||||
usage.Inodes++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return usage, err
|
||||
}
|
||||
|
||||
func (i *RealFsInfo) GetDirUsage(dir string) (UsageInfo, error) {
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
return GetDirUsage(dir)
|
||||
}
|
||||
|
||||
func getVfsStats(path string) (total uint64, free uint64, avail uint64, inodes uint64, inodesFree uint64, err error) {
|
||||
// timeout the context with, default is 2sec
|
||||
timeout := 2
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
type result struct {
|
||||
total uint64
|
||||
free uint64
|
||||
avail uint64
|
||||
inodes uint64
|
||||
inodesFree uint64
|
||||
err error
|
||||
}
|
||||
|
||||
resultChan := make(chan result, 1)
|
||||
|
||||
go func() {
|
||||
var s syscall.Statfs_t
|
||||
if err = syscall.Statfs(path, &s); err != nil {
|
||||
total, free, avail, inodes, inodesFree = 0, 0, 0, 0, 0
|
||||
}
|
||||
total = uint64(s.Frsize) * s.Blocks
|
||||
free = uint64(s.Frsize) * s.Bfree
|
||||
avail = uint64(s.Frsize) * s.Bavail
|
||||
inodes = uint64(s.Files)
|
||||
inodesFree = uint64(s.Ffree)
|
||||
resultChan <- result{total: total, free: free, avail: avail, inodes: inodes, inodesFree: inodesFree, err: err}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, 0, 0, 0, 0, ctx.Err()
|
||||
case res := <-resultChan:
|
||||
return res.total, res.free, res.avail, res.inodes, res.inodesFree, res.err
|
||||
}
|
||||
}
|
||||
|
||||
// Devicemapper thin provisioning is detailed at
|
||||
// https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt
|
||||
func dockerDMDevice(driverStatus map[string]string, dmsetup devicemapper.DmsetupClient) (string, uint, uint, uint, error) {
|
||||
poolName, ok := driverStatus[DriverStatusPoolName]
|
||||
if !ok || len(poolName) == 0 {
|
||||
return "", 0, 0, 0, fmt.Errorf("Could not get dm pool name")
|
||||
}
|
||||
|
||||
out, err := dmsetup.Table(poolName)
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
|
||||
major, minor, dataBlkSize, err := parseDMTable(string(out))
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
|
||||
return poolName, major, minor, dataBlkSize, nil
|
||||
}
|
||||
|
||||
// parseDMTable parses a single line of `dmsetup table` output and returns the
|
||||
// major device, minor device, block size, and an error.
|
||||
func parseDMTable(dmTable string) (uint, uint, uint, error) {
|
||||
dmTable = strings.Replace(dmTable, ":", " ", -1)
|
||||
dmFields := strings.Fields(dmTable)
|
||||
|
||||
if len(dmFields) < 8 {
|
||||
return 0, 0, 0, fmt.Errorf("Invalid dmsetup status output: %s", dmTable)
|
||||
}
|
||||
|
||||
major, err := strconv.ParseUint(dmFields[5], 10, 32)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
minor, err := strconv.ParseUint(dmFields[6], 10, 32)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
dataBlkSize, err := strconv.ParseUint(dmFields[7], 10, 32)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
return uint(major), uint(minor), uint(dataBlkSize), nil
|
||||
}
|
||||
|
||||
func getDMStats(poolName string, dataBlkSize uint) (uint64, uint64, uint64, error) {
|
||||
out, err := exec.Command("dmsetup", "status", poolName).Output()
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
used, total, err := parseDMStatus(string(out))
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
used *= 512 * uint64(dataBlkSize)
|
||||
total *= 512 * uint64(dataBlkSize)
|
||||
free := total - used
|
||||
|
||||
return total, free, free, nil
|
||||
}
|
||||
|
||||
func parseDMStatus(dmStatus string) (uint64, uint64, error) {
|
||||
dmStatus = strings.Replace(dmStatus, "/", " ", -1)
|
||||
dmFields := strings.Fields(dmStatus)
|
||||
|
||||
if len(dmFields) < 8 {
|
||||
return 0, 0, fmt.Errorf("Invalid dmsetup status output: %s", dmStatus)
|
||||
}
|
||||
|
||||
used, err := strconv.ParseUint(dmFields[6], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
total, err := strconv.ParseUint(dmFields[7], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return used, total, nil
|
||||
}
|
||||
|
||||
// getZfstats returns ZFS mount stats using zfsutils
|
||||
func getZfstats(poolName string) (uint64, uint64, uint64, error) {
|
||||
dataset, err := zfs.GetDataset(poolName)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
total := dataset.Used + dataset.Avail + dataset.Usedbydataset
|
||||
|
||||
return total, dataset.Avail, dataset.Avail, nil
|
||||
}
|
||||
|
||||
// Get major and minor Ids for a mount point using btrfs as filesystem.
|
||||
func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
|
||||
// btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
|
||||
// instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
|
||||
|
||||
buf := new(syscall.Stat_t)
|
||||
err := syscall.Stat(mount.Source, buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("stat failed on %s with error: %s", mount.Source, err)
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("btrfs mount %#v", mount)
|
||||
if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK {
|
||||
err := syscall.Stat(mount.Mountpoint, buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("stat failed on %s with error: %s", mount.Mountpoint, err)
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// The type Dev and Rdev in Stat_t are 32bit on mips.
|
||||
klog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(uint64(buf.Dev))), int(minor(uint64(buf.Dev)))) // nolint: unconvert
|
||||
klog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(uint64(buf.Rdev))), int(minor(uint64(buf.Rdev)))) // nolint: unconvert
|
||||
|
||||
return int(major(uint64(buf.Dev))), int(minor(uint64(buf.Dev))), nil // nolint: unconvert
|
||||
}
|
||||
return 0, 0, fmt.Errorf("%s is not a block device", mount.Source)
|
||||
}
|
133
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
Normal file
133
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type Context struct {
|
||||
// docker root directory.
|
||||
Docker DockerContext
|
||||
Crio CrioContext
|
||||
Podman PodmanContext
|
||||
}
|
||||
|
||||
type DockerContext struct {
|
||||
Root string
|
||||
Driver string
|
||||
DriverStatus map[string]string
|
||||
}
|
||||
|
||||
type PodmanContext struct {
|
||||
Root string
|
||||
Driver string
|
||||
DriverStatus map[string]string
|
||||
}
|
||||
|
||||
type CrioContext struct {
|
||||
Root string
|
||||
ImageStore string
|
||||
Driver string
|
||||
}
|
||||
|
||||
type DeviceInfo struct {
|
||||
Device string
|
||||
Major uint
|
||||
Minor uint
|
||||
}
|
||||
|
||||
type FsType string
|
||||
|
||||
func (ft FsType) String() string {
|
||||
return string(ft)
|
||||
}
|
||||
|
||||
const (
|
||||
ZFS FsType = "zfs"
|
||||
DeviceMapper FsType = "devicemapper"
|
||||
VFS FsType = "vfs"
|
||||
NFS FsType = "nfs"
|
||||
)
|
||||
|
||||
type Fs struct {
|
||||
DeviceInfo
|
||||
Type FsType
|
||||
Capacity uint64
|
||||
Free uint64
|
||||
Available uint64
|
||||
Inodes *uint64
|
||||
InodesFree *uint64
|
||||
DiskStats DiskStats
|
||||
}
|
||||
|
||||
type DiskStats struct {
|
||||
MajorNum uint64
|
||||
MinorNum uint64
|
||||
ReadsCompleted uint64
|
||||
ReadsMerged uint64
|
||||
SectorsRead uint64
|
||||
ReadTime uint64
|
||||
WritesCompleted uint64
|
||||
WritesMerged uint64
|
||||
SectorsWritten uint64
|
||||
WriteTime uint64
|
||||
IoInProgress uint64
|
||||
IoTime uint64
|
||||
WeightedIoTime uint64
|
||||
Major uint64
|
||||
Minor uint64
|
||||
}
|
||||
|
||||
type UsageInfo struct {
|
||||
Bytes uint64
|
||||
Inodes uint64
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrNoSuchDevice is the error indicating the requested device does not exist.
|
||||
ErrNoSuchDevice = errors.New("cadvisor: no such device")
|
||||
|
||||
// ErrDeviceNotInPartitionsMap is the error resulting if a device could not be found in the partitions map.
|
||||
ErrDeviceNotInPartitionsMap = errors.New("could not find device in cached partitions map")
|
||||
)
|
||||
|
||||
type FsInfo interface {
|
||||
// Returns capacity and free space, in bytes, of all the ext2, ext3, ext4 filesystems on the host.
|
||||
GetGlobalFsInfo() ([]Fs, error)
|
||||
|
||||
// Returns capacity and free space, in bytes, of the set of mounts passed.
|
||||
GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error)
|
||||
|
||||
// GetDirUsage returns a usage information for 'dir'.
|
||||
GetDirUsage(dir string) (UsageInfo, error)
|
||||
|
||||
// GetDeviceInfoByFsUUID returns the information of the device with the
|
||||
// specified filesystem uuid. If no such device exists, this function will
|
||||
// return the ErrNoSuchDevice error.
|
||||
GetDeviceInfoByFsUUID(uuid string) (*DeviceInfo, error)
|
||||
|
||||
// Returns the block device info of the filesystem on which 'dir' resides.
|
||||
GetDirFsDevice(dir string) (*DeviceInfo, error)
|
||||
|
||||
// Returns the device name associated with a particular label.
|
||||
GetDeviceForLabel(label string) (string, error)
|
||||
|
||||
// Returns all labels associated with a particular device name.
|
||||
GetLabelsForDevice(device string) ([]string, error)
|
||||
|
||||
// Returns the mountpoint associated with a particular device.
|
||||
GetMountpointForDevice(device string) (string, error)
|
||||
}
|
1085
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
Normal file
1085
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
38
vendor/github.com/google/cadvisor/info/v1/docker.go
generated
vendored
Normal file
38
vendor/github.com/google/cadvisor/info/v1/docker.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Types used for docker containers.
|
||||
package v1
|
||||
|
||||
type DockerStatus struct {
|
||||
Version string `json:"version"`
|
||||
APIVersion string `json:"api_version"`
|
||||
KernelVersion string `json:"kernel_version"`
|
||||
OS string `json:"os"`
|
||||
Hostname string `json:"hostname"`
|
||||
RootDir string `json:"root_dir"`
|
||||
Driver string `json:"driver"`
|
||||
DriverStatus map[string]string `json:"driver_status"`
|
||||
ExecDriver string `json:"exec_driver"`
|
||||
NumImages int `json:"num_images"`
|
||||
NumContainers int `json:"num_containers"`
|
||||
}
|
||||
|
||||
type DockerImage struct {
|
||||
ID string `json:"id"`
|
||||
RepoTags []string `json:"repo_tags"` // repository name and tags.
|
||||
Created int64 `json:"created"` // unix time since creation.
|
||||
VirtualSize int64 `json:"virtual_size"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
327
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
Normal file
327
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
Normal file
@ -0,0 +1,327 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import "time"
|
||||
|
||||
type FsInfo struct {
|
||||
// Block device associated with the filesystem.
|
||||
Device string `json:"device"`
|
||||
// DeviceMajor is the major identifier of the device, used for correlation with blkio stats
|
||||
DeviceMajor uint64 `json:"-"`
|
||||
// DeviceMinor is the minor identifier of the device, used for correlation with blkio stats
|
||||
DeviceMinor uint64 `json:"-"`
|
||||
|
||||
// Total number of bytes available on the filesystem.
|
||||
Capacity uint64 `json:"capacity"`
|
||||
|
||||
// Type of device.
|
||||
Type string `json:"type"`
|
||||
|
||||
// Total number of inodes available on the filesystem.
|
||||
Inodes uint64 `json:"inodes"`
|
||||
|
||||
// HasInodes when true, indicates that Inodes info will be available.
|
||||
HasInodes bool `json:"has_inodes"`
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Id int `json:"node_id"`
|
||||
// Per-node memory
|
||||
Memory uint64 `json:"memory"`
|
||||
HugePages []HugePagesInfo `json:"hugepages"`
|
||||
Cores []Core `json:"cores"`
|
||||
Caches []Cache `json:"caches"`
|
||||
Distances []uint64 `json:"distances"`
|
||||
}
|
||||
|
||||
type Core struct {
|
||||
Id int `json:"core_id"`
|
||||
Threads []int `json:"thread_ids"`
|
||||
Caches []Cache `json:"caches"`
|
||||
UncoreCaches []Cache `json:"uncore_caches"`
|
||||
SocketID int `json:"socket_id"`
|
||||
BookID string `json:"book_id,omitempty"`
|
||||
DrawerID string `json:"drawer_id,omitempty"`
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
// Id of memory cache
|
||||
Id int `json:"id"`
|
||||
// Size of memory cache in bytes.
|
||||
Size uint64 `json:"size"`
|
||||
// Type of memory cache: data, instruction, or unified.
|
||||
Type string `json:"type"`
|
||||
// Level (distance from cpus) in a multi-level cache hierarchy.
|
||||
Level int `json:"level"`
|
||||
}
|
||||
|
||||
func (n *Node) FindCore(id int) (bool, int) {
|
||||
for i, n := range n.Cores {
|
||||
if n.Id == id {
|
||||
return true, i
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
// FindCoreByThread returns bool if found Core with same thread as provided and it's index in Node Core array.
|
||||
// If it's not found, returns false and -1.
|
||||
func (n *Node) FindCoreByThread(thread int) (bool, int) {
|
||||
for i, n := range n.Cores {
|
||||
for _, t := range n.Threads {
|
||||
if t == thread {
|
||||
return true, i
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
func (n *Node) AddThread(thread int, core int) {
|
||||
var coreIdx int
|
||||
if core == -1 {
|
||||
// Assume one hyperthread per core when topology data is missing.
|
||||
core = thread
|
||||
}
|
||||
ok, coreIdx := n.FindCore(core)
|
||||
|
||||
if !ok {
|
||||
// New core
|
||||
core := Core{Id: core}
|
||||
n.Cores = append(n.Cores, core)
|
||||
coreIdx = len(n.Cores) - 1
|
||||
}
|
||||
n.Cores[coreIdx].Threads = append(n.Cores[coreIdx].Threads, thread)
|
||||
}
|
||||
|
||||
func (n *Node) AddNodeCache(c Cache) {
|
||||
n.Caches = append(n.Caches, c)
|
||||
}
|
||||
|
||||
func (n *Node) AddPerCoreCache(c Cache) {
|
||||
for idx := range n.Cores {
|
||||
n.Cores[idx].Caches = append(n.Cores[idx].Caches, c)
|
||||
}
|
||||
}
|
||||
|
||||
type HugePagesInfo struct {
|
||||
// huge page size (in kB)
|
||||
PageSize uint64 `json:"page_size"`
|
||||
|
||||
// number of huge pages
|
||||
NumPages uint64 `json:"num_pages"`
|
||||
}
|
||||
|
||||
type DiskInfo struct {
|
||||
// device name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Major number
|
||||
Major uint64 `json:"major"`
|
||||
|
||||
// Minor number
|
||||
Minor uint64 `json:"minor"`
|
||||
|
||||
// Size in bytes
|
||||
Size uint64 `json:"size"`
|
||||
|
||||
// I/O Scheduler - one of "none", "noop", "cfq", "deadline"
|
||||
Scheduler string `json:"scheduler"`
|
||||
}
|
||||
|
||||
type NetInfo struct {
|
||||
// Device name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Mac Address
|
||||
MacAddress string `json:"mac_address"`
|
||||
|
||||
// Speed in MBits/s
|
||||
Speed int64 `json:"speed"`
|
||||
|
||||
// Maximum Transmission Unit
|
||||
Mtu int64 `json:"mtu"`
|
||||
}
|
||||
|
||||
type CloudProvider string
|
||||
|
||||
const (
|
||||
GCE CloudProvider = "GCE"
|
||||
AWS CloudProvider = "AWS"
|
||||
Azure CloudProvider = "Azure"
|
||||
UnknownProvider CloudProvider = "Unknown"
|
||||
)
|
||||
|
||||
type InstanceType string
|
||||
|
||||
const (
|
||||
UnknownInstance = "Unknown"
|
||||
)
|
||||
|
||||
type InstanceID string
|
||||
|
||||
const (
|
||||
UnNamedInstance InstanceID = "None"
|
||||
)
|
||||
|
||||
type MachineInfo struct {
|
||||
// The time of this information point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
|
||||
// Vendor id of CPU.
|
||||
CPUVendorID string `json:"vendor_id"`
|
||||
|
||||
// The number of cores in this machine.
|
||||
NumCores int `json:"num_cores"`
|
||||
|
||||
// The number of physical cores in this machine.
|
||||
NumPhysicalCores int `json:"num_physical_cores"`
|
||||
|
||||
// The number of cpu sockets in this machine.
|
||||
NumSockets int `json:"num_sockets"`
|
||||
|
||||
// Maximum clock speed for the cores, in KHz.
|
||||
CpuFrequency uint64 `json:"cpu_frequency_khz"`
|
||||
|
||||
// The amount of memory (in bytes) in this machine
|
||||
MemoryCapacity uint64 `json:"memory_capacity"`
|
||||
|
||||
// The amount of swap (in bytes) in this machine
|
||||
SwapCapacity uint64 `json:"swap_capacity"`
|
||||
|
||||
// Memory capacity and number of DIMMs by memory type
|
||||
MemoryByType map[string]*MemoryInfo `json:"memory_by_type"`
|
||||
|
||||
NVMInfo NVMInfo `json:"nvm"`
|
||||
|
||||
// HugePages on this machine.
|
||||
HugePages []HugePagesInfo `json:"hugepages"`
|
||||
|
||||
// The machine id
|
||||
MachineID string `json:"machine_id"`
|
||||
|
||||
// The system uuid
|
||||
SystemUUID string `json:"system_uuid"`
|
||||
|
||||
// The boot id
|
||||
BootID string `json:"boot_id"`
|
||||
|
||||
// Filesystems on this machine.
|
||||
Filesystems []FsInfo `json:"filesystems"`
|
||||
|
||||
// Disk map
|
||||
DiskMap map[string]DiskInfo `json:"disk_map"`
|
||||
|
||||
// Network devices
|
||||
NetworkDevices []NetInfo `json:"network_devices"`
|
||||
|
||||
// Machine Topology
|
||||
// Describes cpu/memory layout and hierarchy.
|
||||
Topology []Node `json:"topology"`
|
||||
|
||||
// Cloud provider the machine belongs to.
|
||||
CloudProvider CloudProvider `json:"cloud_provider"`
|
||||
|
||||
// Type of cloud instance (e.g. GCE standard) the machine is.
|
||||
InstanceType InstanceType `json:"instance_type"`
|
||||
|
||||
// ID of cloud instance (e.g. instance-1) given to it by the cloud provider.
|
||||
InstanceID InstanceID `json:"instance_id"`
|
||||
}
|
||||
|
||||
func (m *MachineInfo) Clone() *MachineInfo {
|
||||
memoryByType := m.MemoryByType
|
||||
if len(m.MemoryByType) > 0 {
|
||||
memoryByType = make(map[string]*MemoryInfo)
|
||||
for memoryType, memoryInfo := range m.MemoryByType {
|
||||
memoryByType[memoryType] = memoryInfo
|
||||
}
|
||||
}
|
||||
diskMap := m.DiskMap
|
||||
if len(m.DiskMap) > 0 {
|
||||
diskMap = make(map[string]DiskInfo)
|
||||
for k, info := range m.DiskMap {
|
||||
diskMap[k] = info
|
||||
}
|
||||
}
|
||||
copy := MachineInfo{
|
||||
CPUVendorID: m.CPUVendorID,
|
||||
Timestamp: m.Timestamp,
|
||||
NumCores: m.NumCores,
|
||||
NumPhysicalCores: m.NumPhysicalCores,
|
||||
NumSockets: m.NumSockets,
|
||||
CpuFrequency: m.CpuFrequency,
|
||||
MemoryCapacity: m.MemoryCapacity,
|
||||
SwapCapacity: m.SwapCapacity,
|
||||
MemoryByType: memoryByType,
|
||||
NVMInfo: m.NVMInfo,
|
||||
HugePages: m.HugePages,
|
||||
MachineID: m.MachineID,
|
||||
SystemUUID: m.SystemUUID,
|
||||
BootID: m.BootID,
|
||||
Filesystems: m.Filesystems,
|
||||
DiskMap: diskMap,
|
||||
NetworkDevices: m.NetworkDevices,
|
||||
Topology: m.Topology,
|
||||
CloudProvider: m.CloudProvider,
|
||||
InstanceType: m.InstanceType,
|
||||
InstanceID: m.InstanceID,
|
||||
}
|
||||
return ©
|
||||
}
|
||||
|
||||
type MemoryInfo struct {
|
||||
// The amount of memory (in bytes).
|
||||
Capacity uint64 `json:"capacity"`
|
||||
|
||||
// Number of memory DIMMs.
|
||||
DimmCount uint `json:"dimm_count"`
|
||||
}
|
||||
|
||||
type NVMInfo struct {
|
||||
// The total NVM capacity in bytes for memory mode.
|
||||
MemoryModeCapacity uint64 `json:"memory_mode_capacity"`
|
||||
|
||||
//The total NVM capacity in bytes for app direct mode.
|
||||
AppDirectModeCapacity uint64 `json:"app direct_mode_capacity"`
|
||||
|
||||
// Average power budget in watts for NVM devices configured in BIOS.
|
||||
AvgPowerBudget uint `json:"avg_power_budget"`
|
||||
}
|
||||
|
||||
type VersionInfo struct {
|
||||
// Kernel version.
|
||||
KernelVersion string `json:"kernel_version"`
|
||||
|
||||
// OS image being used for cadvisor container, or host image if running on host directly.
|
||||
ContainerOsVersion string `json:"container_os_version"`
|
||||
|
||||
// Docker version.
|
||||
DockerVersion string `json:"docker_version"`
|
||||
|
||||
// Docker API Version
|
||||
DockerAPIVersion string `json:"docker_api_version"`
|
||||
|
||||
// cAdvisor version.
|
||||
CadvisorVersion string `json:"cadvisor_version"`
|
||||
// cAdvisor git revision.
|
||||
CadvisorRevision string `json:"cadvisor_revision"`
|
||||
}
|
||||
|
||||
type MachineInfoFactory interface {
|
||||
GetMachineInfo() (*MachineInfo, error)
|
||||
GetVersionInfo() (*VersionInfo, error)
|
||||
}
|
77
vendor/github.com/google/cadvisor/info/v1/metric.go
generated
vendored
Normal file
77
vendor/github.com/google/cadvisor/info/v1/metric.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Type of metric being exported.
|
||||
type MetricType string
|
||||
|
||||
const (
|
||||
// Instantaneous value. May increase or decrease.
|
||||
MetricGauge MetricType = "gauge"
|
||||
|
||||
// A counter-like value that is only expected to increase.
|
||||
MetricCumulative MetricType = "cumulative"
|
||||
)
|
||||
|
||||
// DataType for metric being exported.
|
||||
type DataType string
|
||||
|
||||
const (
|
||||
IntType DataType = "int"
|
||||
FloatType DataType = "float"
|
||||
)
|
||||
|
||||
// Spec for custom metric.
|
||||
type MetricSpec struct {
|
||||
// The name of the metric.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Type of the metric.
|
||||
Type MetricType `json:"type"`
|
||||
|
||||
// Data Type for the stats.
|
||||
Format DataType `json:"format"`
|
||||
|
||||
// Display Units for the stats.
|
||||
Units string `json:"units"`
|
||||
}
|
||||
|
||||
// An exported metric.
|
||||
type MetricValBasic struct {
|
||||
// Time at which the metric was queried
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
|
||||
// The value of the metric at this point.
|
||||
IntValue int64 `json:"int_value,omitempty"`
|
||||
FloatValue float64 `json:"float_value,omitempty"`
|
||||
}
|
||||
|
||||
// An exported metric.
|
||||
type MetricVal struct {
|
||||
// Label associated with a metric
|
||||
Label string `json:"label,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
|
||||
// Time at which the metric was queried
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
|
||||
// The value of the metric at this point.
|
||||
IntValue int64 `json:"int_value,omitempty"`
|
||||
FloatValue float64 `json:"float_value,omitempty"`
|
||||
}
|
358
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
Normal file
358
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
Normal file
@ -0,0 +1,358 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
// TODO(rjnagal): Remove dependency after moving all stats structs from v1.
|
||||
// using v1 now for easy conversion.
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeName = "name"
|
||||
TypeDocker = "docker"
|
||||
TypePodman = "podman"
|
||||
)
|
||||
|
||||
type CpuSpec struct {
|
||||
// Requested cpu shares. Default is 1024.
|
||||
Limit uint64 `json:"limit"`
|
||||
// Requested cpu hard limit. Default is unlimited (0).
|
||||
// Units: milli-cpus.
|
||||
MaxLimit uint64 `json:"max_limit"`
|
||||
// Cpu affinity mask.
|
||||
// TODO(rjnagal): Add a library to convert mask string to set of cpu bitmask.
|
||||
Mask string `json:"mask,omitempty"`
|
||||
// CPUQuota Default is disabled
|
||||
Quota uint64 `json:"quota,omitempty"`
|
||||
// Period is the CPU reference time in ns e.g the quota is compared against this.
|
||||
Period uint64 `json:"period,omitempty"`
|
||||
}
|
||||
|
||||
type MemorySpec struct {
|
||||
// The amount of memory requested. Default is unlimited (-1).
|
||||
// Units: bytes.
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
|
||||
// The amount of guaranteed memory. Default is 0.
|
||||
// Units: bytes.
|
||||
Reservation uint64 `json:"reservation,omitempty"`
|
||||
|
||||
// The amount of swap space requested. Default is unlimited (-1).
|
||||
// Units: bytes.
|
||||
SwapLimit uint64 `json:"swap_limit,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerInfo struct {
|
||||
// Describes the container.
|
||||
Spec ContainerSpec `json:"spec,omitempty"`
|
||||
|
||||
// Historical statistics gathered from the container.
|
||||
Stats []*ContainerStats `json:"stats,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerSpec struct {
|
||||
// Time at which the container was created.
|
||||
CreationTime time.Time `json:"creation_time,omitempty"`
|
||||
|
||||
// Other names by which the container is known within a certain namespace.
|
||||
// This is unique within that namespace.
|
||||
Aliases []string `json:"aliases,omitempty"`
|
||||
|
||||
// Namespace under which the aliases of a container are unique.
|
||||
// An example of a namespace is "docker" for Docker containers.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
// Metadata labels associated with this container.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
// Metadata envs associated with this container. Only whitelisted envs are added.
|
||||
Envs map[string]string `json:"envs,omitempty"`
|
||||
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu CpuSpec `json:"cpu,omitempty"`
|
||||
|
||||
HasMemory bool `json:"has_memory"`
|
||||
Memory MemorySpec `json:"memory,omitempty"`
|
||||
|
||||
HasHugetlb bool `json:"has_hugetlb"`
|
||||
|
||||
HasCustomMetrics bool `json:"has_custom_metrics"`
|
||||
CustomMetrics []v1.MetricSpec `json:"custom_metrics,omitempty"`
|
||||
|
||||
HasProcesses bool `json:"has_processes"`
|
||||
Processes v1.ProcessSpec `json:"processes,omitempty"`
|
||||
|
||||
// Following resources have no associated spec, but are being isolated.
|
||||
HasNetwork bool `json:"has_network"`
|
||||
HasFilesystem bool `json:"has_filesystem"`
|
||||
HasDiskIo bool `json:"has_diskio"`
|
||||
|
||||
// Image name used for this container.
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
type DeprecatedContainerStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// CPU statistics
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
// In nanoseconds (aggregated)
|
||||
Cpu v1.CpuStats `json:"cpu,omitempty"`
|
||||
// In nanocores per second (instantaneous)
|
||||
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
|
||||
// Disk IO statistics
|
||||
HasDiskIo bool `json:"has_diskio"`
|
||||
DiskIo v1.DiskIoStats `json:"diskio,omitempty"`
|
||||
// Memory statistics
|
||||
HasMemory bool `json:"has_memory"`
|
||||
Memory v1.MemoryStats `json:"memory,omitempty"`
|
||||
// Hugepage statistics
|
||||
HasHugetlb bool `json:"has_hugetlb"`
|
||||
Hugetlb map[string]v1.HugetlbStats `json:"hugetlb,omitempty"`
|
||||
// Network statistics
|
||||
HasNetwork bool `json:"has_network"`
|
||||
Network NetworkStats `json:"network,omitempty"`
|
||||
// Processes statistics
|
||||
HasProcesses bool `json:"has_processes"`
|
||||
Processes v1.ProcessStats `json:"processes,omitempty"`
|
||||
// Filesystem statistics
|
||||
HasFilesystem bool `json:"has_filesystem"`
|
||||
Filesystem []v1.FsStats `json:"filesystem,omitempty"`
|
||||
// Task load statistics
|
||||
HasLoad bool `json:"has_load"`
|
||||
Load v1.LoadStats `json:"load_stats,omitempty"`
|
||||
// Custom Metrics
|
||||
HasCustomMetrics bool `json:"has_custom_metrics"`
|
||||
CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"`
|
||||
// Perf events counters
|
||||
PerfStats []v1.PerfStat `json:"perf_stats,omitempty"`
|
||||
// Statistics originating from perf uncore events.
|
||||
// Applies only for root container.
|
||||
PerfUncoreStats []v1.PerfUncoreStat `json:"perf_uncore_stats,omitempty"`
|
||||
// Referenced memory
|
||||
ReferencedMemory uint64 `json:"referenced_memory,omitempty"`
|
||||
// Resource Control (resctrl) statistics
|
||||
Resctrl v1.ResctrlStats `json:"resctrl,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// CPU statistics
|
||||
// In nanoseconds (aggregated)
|
||||
Cpu *v1.CpuStats `json:"cpu,omitempty"`
|
||||
// In nanocores per second (instantaneous)
|
||||
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
|
||||
// Disk IO statistics
|
||||
DiskIo *v1.DiskIoStats `json:"diskio,omitempty"`
|
||||
// Memory statistics
|
||||
Memory *v1.MemoryStats `json:"memory,omitempty"`
|
||||
// Hugepage statistics
|
||||
Hugetlb *map[string]v1.HugetlbStats `json:"hugetlb,omitempty"`
|
||||
// Network statistics
|
||||
Network *NetworkStats `json:"network,omitempty"`
|
||||
// Processes statistics
|
||||
Processes *v1.ProcessStats `json:"processes,omitempty"`
|
||||
// Filesystem statistics
|
||||
Filesystem *FilesystemStats `json:"filesystem,omitempty"`
|
||||
// Task load statistics
|
||||
Load *v1.LoadStats `json:"load_stats,omitempty"`
|
||||
// Metrics for Accelerators. Each Accelerator corresponds to one element in the array.
|
||||
Accelerators []v1.AcceleratorStats `json:"accelerators,omitempty"`
|
||||
// Custom Metrics
|
||||
CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"`
|
||||
// Perf events counters
|
||||
PerfStats []v1.PerfStat `json:"perf_stats,omitempty"`
|
||||
// Statistics originating from perf uncore events.
|
||||
// Applies only for root container.
|
||||
PerfUncoreStats []v1.PerfUncoreStat `json:"perf_uncore_stats,omitempty"`
|
||||
// Referenced memory
|
||||
ReferencedMemory uint64 `json:"referenced_memory,omitempty"`
|
||||
// Resource Control (resctrl) statistics
|
||||
Resctrl v1.ResctrlStats `json:"resctrl,omitempty"`
|
||||
}
|
||||
|
||||
type Percentiles struct {
|
||||
// Indicates whether the stats are present or not.
|
||||
// If true, values below do not have any data.
|
||||
Present bool `json:"present"`
|
||||
// Average over the collected sample.
|
||||
Mean uint64 `json:"mean"`
|
||||
// Max seen over the collected sample.
|
||||
Max uint64 `json:"max"`
|
||||
// 50th percentile over the collected sample.
|
||||
Fifty uint64 `json:"fifty"`
|
||||
// 90th percentile over the collected sample.
|
||||
Ninety uint64 `json:"ninety"`
|
||||
// 95th percentile over the collected sample.
|
||||
NinetyFive uint64 `json:"ninetyfive"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
// Indicates amount of data available [0-100].
|
||||
// If we have data for half a day, we'll still process DayUsage,
|
||||
// but set PercentComplete to 50.
|
||||
PercentComplete int32 `json:"percent_complete"`
|
||||
// Mean, Max, and 90p cpu rate value in milliCpus/seconds. Converted to milliCpus to avoid floats.
|
||||
Cpu Percentiles `json:"cpu"`
|
||||
// Mean, Max, and 90p memory size in bytes.
|
||||
Memory Percentiles `json:"memory"`
|
||||
}
|
||||
|
||||
// latest sample collected for a container.
|
||||
type InstantUsage struct {
|
||||
// cpu rate in cpu milliseconds/second.
|
||||
Cpu uint64 `json:"cpu"`
|
||||
// Memory usage in bytes.
|
||||
Memory uint64 `json:"memory"`
|
||||
}
|
||||
|
||||
type DerivedStats struct {
|
||||
// Time of generation of these stats.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// Latest instantaneous sample.
|
||||
LatestUsage InstantUsage `json:"latest_usage"`
|
||||
// Percentiles in last observed minute.
|
||||
MinuteUsage Usage `json:"minute_usage"`
|
||||
// Percentile in last hour.
|
||||
HourUsage Usage `json:"hour_usage"`
|
||||
// Percentile in last day.
|
||||
DayUsage Usage `json:"day_usage"`
|
||||
}
|
||||
|
||||
type FsInfo struct {
|
||||
// Time of generation of these stats.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
|
||||
// The block device name associated with the filesystem.
|
||||
Device string `json:"device"`
|
||||
|
||||
// Path where the filesystem is mounted.
|
||||
Mountpoint string `json:"mountpoint"`
|
||||
|
||||
// Filesystem usage in bytes.
|
||||
Capacity uint64 `json:"capacity"`
|
||||
|
||||
// Bytes available for non-root use.
|
||||
Available uint64 `json:"available"`
|
||||
|
||||
// Number of bytes used on this filesystem.
|
||||
Usage uint64 `json:"usage"`
|
||||
|
||||
// Labels associated with this filesystem.
|
||||
Labels []string `json:"labels"`
|
||||
|
||||
// Number of Inodes.
|
||||
Inodes *uint64 `json:"inodes,omitempty"`
|
||||
|
||||
// Number of available Inodes (if known)
|
||||
InodesFree *uint64 `json:"inodes_free,omitempty"`
|
||||
}
|
||||
|
||||
type RequestOptions struct {
|
||||
// Type of container identifier specified - TypeName (default) or TypeDocker
|
||||
IdType string `json:"type"`
|
||||
// Number of stats to return, -1 means no limit.
|
||||
Count int `json:"count"`
|
||||
// Whether to include stats for child subcontainers.
|
||||
Recursive bool `json:"recursive"`
|
||||
// Update stats if they are older than MaxAge
|
||||
// nil indicates no update, and 0 will always trigger an update.
|
||||
MaxAge *time.Duration `json:"max_age"`
|
||||
}
|
||||
|
||||
type ProcessInfo struct {
|
||||
User string `json:"user"`
|
||||
Pid int `json:"pid"`
|
||||
Ppid int `json:"parent_pid"`
|
||||
StartTime string `json:"start_time"`
|
||||
PercentCpu float32 `json:"percent_cpu"`
|
||||
PercentMemory float32 `json:"percent_mem"`
|
||||
RSS uint64 `json:"rss"`
|
||||
VirtualSize uint64 `json:"virtual_size"`
|
||||
Status string `json:"status"`
|
||||
RunningTime string `json:"running_time"`
|
||||
CgroupPath string `json:"cgroup_path"`
|
||||
Cmd string `json:"cmd"`
|
||||
FdCount int `json:"fd_count"`
|
||||
Psr int `json:"psr"`
|
||||
}
|
||||
|
||||
type TcpStat struct {
|
||||
Established uint64
|
||||
SynSent uint64
|
||||
SynRecv uint64
|
||||
FinWait1 uint64
|
||||
FinWait2 uint64
|
||||
TimeWait uint64
|
||||
Close uint64
|
||||
CloseWait uint64
|
||||
LastAck uint64
|
||||
Listen uint64
|
||||
Closing uint64
|
||||
}
|
||||
|
||||
type NetworkStats struct {
|
||||
// Network stats by interface.
|
||||
Interfaces []v1.InterfaceStats `json:"interfaces,omitempty"`
|
||||
// TCP connection stats (Established, Listen...)
|
||||
Tcp TcpStat `json:"tcp"`
|
||||
// TCP6 connection stats (Established, Listen...)
|
||||
Tcp6 TcpStat `json:"tcp6"`
|
||||
// UDP connection stats
|
||||
Udp v1.UdpStat `json:"udp"`
|
||||
// UDP6 connection stats
|
||||
Udp6 v1.UdpStat `json:"udp6"`
|
||||
// TCP advanced stats
|
||||
TcpAdvanced v1.TcpAdvancedStat `json:"tcp_advanced"`
|
||||
}
|
||||
|
||||
// Instantaneous CPU stats
|
||||
type CpuInstStats struct {
|
||||
Usage CpuInstUsage `json:"usage"`
|
||||
}
|
||||
|
||||
// CPU usage time statistics.
|
||||
type CpuInstUsage struct {
|
||||
// Total CPU usage.
|
||||
// Units: nanocores per second
|
||||
Total uint64 `json:"total"`
|
||||
|
||||
// Per CPU/core usage of the container.
|
||||
// Unit: nanocores per second
|
||||
PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
|
||||
|
||||
// Time spent in user space.
|
||||
// Unit: nanocores per second
|
||||
User uint64 `json:"user"`
|
||||
|
||||
// Time spent in kernel space.
|
||||
// Unit: nanocores per second
|
||||
System uint64 `json:"system"`
|
||||
}
|
||||
|
||||
// Filesystem usage statistics.
|
||||
type FilesystemStats struct {
|
||||
// Total Number of bytes consumed by container.
|
||||
TotalUsageBytes *uint64 `json:"totalUsageBytes,omitempty"`
|
||||
// Number of bytes consumed by a container through its root filesystem.
|
||||
BaseUsageBytes *uint64 `json:"baseUsageBytes,omitempty"`
|
||||
// Number of inodes used within the container's root filesystem.
|
||||
// This only accounts for inodes that are shared across containers,
|
||||
// and does not include inodes used in mounted directories.
|
||||
InodeUsage *uint64 `json:"containter_inode_usage,omitempty"`
|
||||
}
|
316
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
Normal file
316
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
Normal file
@ -0,0 +1,316 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
|
||||
var result []MachineFsStats
|
||||
for i := range fsStats {
|
||||
stat := fsStats[i]
|
||||
readDuration := time.Millisecond * time.Duration(stat.ReadTime)
|
||||
writeDuration := time.Millisecond * time.Duration(stat.WriteTime)
|
||||
ioDuration := time.Millisecond * time.Duration(stat.IoTime)
|
||||
weightedDuration := time.Millisecond * time.Duration(stat.WeightedIoTime)
|
||||
machineFsStat := MachineFsStats{
|
||||
Device: stat.Device,
|
||||
Type: stat.Type,
|
||||
Capacity: &stat.Limit,
|
||||
Usage: &stat.Usage,
|
||||
Available: &stat.Available,
|
||||
DiskStats: DiskStats{
|
||||
ReadsCompleted: &stat.ReadsCompleted,
|
||||
ReadsMerged: &stat.ReadsMerged,
|
||||
SectorsRead: &stat.SectorsRead,
|
||||
ReadDuration: &readDuration,
|
||||
WritesCompleted: &stat.WritesCompleted,
|
||||
WritesMerged: &stat.WritesMerged,
|
||||
SectorsWritten: &stat.SectorsWritten,
|
||||
WriteDuration: &writeDuration,
|
||||
IoInProgress: &stat.IoInProgress,
|
||||
IoDuration: &ioDuration,
|
||||
WeightedIoDuration: &weightedDuration,
|
||||
},
|
||||
}
|
||||
if stat.HasInodes {
|
||||
machineFsStat.InodesFree = &stat.InodesFree
|
||||
}
|
||||
result = append(result, machineFsStat)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats {
|
||||
var stats []MachineStats
|
||||
var last *v1.ContainerStats
|
||||
for i := range cont.Stats {
|
||||
val := cont.Stats[i]
|
||||
stat := MachineStats{
|
||||
Timestamp: val.Timestamp,
|
||||
}
|
||||
if cont.Spec.HasCpu {
|
||||
stat.Cpu = &val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
klog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if cont.Spec.HasMemory {
|
||||
stat.Memory = &val.Memory
|
||||
}
|
||||
if cont.Spec.HasNetwork {
|
||||
stat.Network = &NetworkStats{
|
||||
// FIXME: Use reflection instead.
|
||||
Tcp: TcpStat(val.Network.Tcp),
|
||||
Tcp6: TcpStat(val.Network.Tcp6),
|
||||
Interfaces: val.Network.Interfaces,
|
||||
}
|
||||
}
|
||||
if cont.Spec.HasFilesystem {
|
||||
stat.Filesystem = machineFsStatsFromV1(val.Filesystem)
|
||||
}
|
||||
// TODO(rjnagal): Handle load stats.
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []*v1.ContainerStats) []*ContainerStats {
|
||||
newStats := make([]*ContainerStats, 0, len(stats))
|
||||
var last *v1.ContainerStats
|
||||
for _, val := range stats {
|
||||
stat := &ContainerStats{
|
||||
Timestamp: val.Timestamp,
|
||||
ReferencedMemory: val.ReferencedMemory,
|
||||
}
|
||||
if spec.HasCpu {
|
||||
stat.Cpu = &val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
klog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if spec.HasMemory {
|
||||
stat.Memory = &val.Memory
|
||||
}
|
||||
if spec.HasHugetlb {
|
||||
stat.Hugetlb = &val.Hugetlb
|
||||
}
|
||||
if spec.HasNetwork {
|
||||
// TODO: Handle TcpStats
|
||||
stat.Network = &NetworkStats{
|
||||
Tcp: TcpStat(val.Network.Tcp),
|
||||
Tcp6: TcpStat(val.Network.Tcp6),
|
||||
Interfaces: val.Network.Interfaces,
|
||||
}
|
||||
}
|
||||
if spec.HasProcesses {
|
||||
stat.Processes = &val.Processes
|
||||
}
|
||||
if spec.HasFilesystem {
|
||||
if len(val.Filesystem) == 1 {
|
||||
stat.Filesystem = &FilesystemStats{
|
||||
TotalUsageBytes: &val.Filesystem[0].Usage,
|
||||
BaseUsageBytes: &val.Filesystem[0].BaseUsage,
|
||||
InodeUsage: &val.Filesystem[0].Inodes,
|
||||
}
|
||||
} else if len(val.Filesystem) > 1 && containerName != "/" {
|
||||
// Cannot handle multiple devices per container.
|
||||
klog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
}
|
||||
}
|
||||
if spec.HasDiskIo {
|
||||
stat.DiskIo = &val.DiskIo
|
||||
}
|
||||
if spec.HasCustomMetrics {
|
||||
stat.CustomMetrics = val.CustomMetrics
|
||||
}
|
||||
if len(val.Accelerators) > 0 {
|
||||
stat.Accelerators = val.Accelerators
|
||||
}
|
||||
if len(val.PerfStats) > 0 {
|
||||
stat.PerfStats = val.PerfStats
|
||||
}
|
||||
if len(val.PerfUncoreStats) > 0 {
|
||||
stat.PerfUncoreStats = val.PerfUncoreStats
|
||||
}
|
||||
if len(val.Resctrl.MemoryBandwidth) > 0 || len(val.Resctrl.Cache) > 0 {
|
||||
stat.Resctrl = val.Resctrl
|
||||
}
|
||||
// TODO(rjnagal): Handle load stats.
|
||||
newStats = append(newStats, stat)
|
||||
}
|
||||
return newStats
|
||||
}
|
||||
|
||||
func DeprecatedStatsFromV1(cont *v1.ContainerInfo) []DeprecatedContainerStats {
|
||||
stats := make([]DeprecatedContainerStats, 0, len(cont.Stats))
|
||||
var last *v1.ContainerStats
|
||||
for _, val := range cont.Stats {
|
||||
stat := DeprecatedContainerStats{
|
||||
Timestamp: val.Timestamp,
|
||||
HasCpu: cont.Spec.HasCpu,
|
||||
HasMemory: cont.Spec.HasMemory,
|
||||
HasHugetlb: cont.Spec.HasHugetlb,
|
||||
HasNetwork: cont.Spec.HasNetwork,
|
||||
HasFilesystem: cont.Spec.HasFilesystem,
|
||||
HasDiskIo: cont.Spec.HasDiskIo,
|
||||
HasCustomMetrics: cont.Spec.HasCustomMetrics,
|
||||
ReferencedMemory: val.ReferencedMemory,
|
||||
}
|
||||
if stat.HasCpu {
|
||||
stat.Cpu = val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
klog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if stat.HasMemory {
|
||||
stat.Memory = val.Memory
|
||||
}
|
||||
if stat.HasHugetlb {
|
||||
stat.Hugetlb = val.Hugetlb
|
||||
}
|
||||
if stat.HasNetwork {
|
||||
stat.Network.Interfaces = val.Network.Interfaces
|
||||
}
|
||||
if stat.HasProcesses {
|
||||
stat.Processes = val.Processes
|
||||
}
|
||||
if stat.HasFilesystem {
|
||||
stat.Filesystem = val.Filesystem
|
||||
}
|
||||
if stat.HasDiskIo {
|
||||
stat.DiskIo = val.DiskIo
|
||||
}
|
||||
if stat.HasCustomMetrics {
|
||||
stat.CustomMetrics = val.CustomMetrics
|
||||
}
|
||||
if len(val.PerfStats) > 0 {
|
||||
stat.PerfStats = val.PerfStats
|
||||
}
|
||||
if len(val.PerfUncoreStats) > 0 {
|
||||
stat.PerfUncoreStats = val.PerfUncoreStats
|
||||
}
|
||||
if len(val.Resctrl.MemoryBandwidth) > 0 || len(val.Resctrl.Cache) > 0 {
|
||||
stat.Resctrl = val.Resctrl
|
||||
}
|
||||
// TODO(rjnagal): Handle load stats.
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func InstCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) {
|
||||
if last == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !cur.Timestamp.After(last.Timestamp) {
|
||||
return nil, fmt.Errorf("container stats move backwards in time")
|
||||
}
|
||||
if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) {
|
||||
return nil, fmt.Errorf("different number of cpus")
|
||||
}
|
||||
timeDelta := cur.Timestamp.Sub(last.Timestamp)
|
||||
// Nanoseconds to gain precision and avoid having zero seconds if the
|
||||
// difference between the timestamps is just under a second
|
||||
timeDeltaNs := uint64(timeDelta.Nanoseconds())
|
||||
convertToRate := func(lastValue, curValue uint64) (uint64, error) {
|
||||
if curValue < lastValue {
|
||||
return 0, fmt.Errorf("cumulative stats decrease")
|
||||
}
|
||||
valueDelta := curValue - lastValue
|
||||
// Use float64 to keep precision
|
||||
return uint64(float64(valueDelta) / float64(timeDeltaNs) * 1e9), nil
|
||||
}
|
||||
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
percpu := make([]uint64, len(last.Cpu.Usage.PerCpu))
|
||||
for i := range percpu {
|
||||
var err error
|
||||
percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CpuInstStats{
|
||||
Usage: CpuInstUsage{
|
||||
Total: total,
|
||||
PerCpu: percpu,
|
||||
User: user,
|
||||
System: system,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get V2 container spec from v1 container info.
|
||||
func ContainerSpecFromV1(specV1 *v1.ContainerSpec, aliases []string, namespace string) ContainerSpec {
|
||||
specV2 := ContainerSpec{
|
||||
CreationTime: specV1.CreationTime,
|
||||
HasCpu: specV1.HasCpu,
|
||||
HasMemory: specV1.HasMemory,
|
||||
HasHugetlb: specV1.HasHugetlb,
|
||||
HasFilesystem: specV1.HasFilesystem,
|
||||
HasNetwork: specV1.HasNetwork,
|
||||
HasProcesses: specV1.HasProcesses,
|
||||
HasDiskIo: specV1.HasDiskIo,
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
Labels: specV1.Labels,
|
||||
Envs: specV1.Envs,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit
|
||||
specV2.Cpu.Mask = specV1.Cpu.Mask
|
||||
}
|
||||
if specV1.HasMemory {
|
||||
specV2.Memory.Limit = specV1.Memory.Limit
|
||||
specV2.Memory.Reservation = specV1.Memory.Reservation
|
||||
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
|
||||
}
|
||||
if specV1.HasCustomMetrics {
|
||||
specV2.CustomMetrics = specV1.CustomMetrics
|
||||
}
|
||||
specV2.Aliases = aliases
|
||||
specV2.Namespace = namespace
|
||||
return specV2
|
||||
}
|
198
vendor/github.com/google/cadvisor/info/v2/machine.go
generated
vendored
Normal file
198
vendor/github.com/google/cadvisor/info/v2/machine.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
// TODO(rjnagal): Move structs from v1.
|
||||
"time"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type Attributes struct {
|
||||
// Kernel version.
|
||||
KernelVersion string `json:"kernel_version"`
|
||||
|
||||
// OS image being used for cadvisor container, or host image if running on host directly.
|
||||
ContainerOsVersion string `json:"container_os_version"`
|
||||
|
||||
// Docker version.
|
||||
DockerVersion string `json:"docker_version"`
|
||||
|
||||
// Docker API version.
|
||||
DockerAPIVersion string `json:"docker_api_version"`
|
||||
|
||||
// cAdvisor version.
|
||||
CadvisorVersion string `json:"cadvisor_version"`
|
||||
|
||||
// The number of cores in this machine.
|
||||
NumCores int `json:"num_cores"`
|
||||
|
||||
// Maximum clock speed for the cores, in KHz.
|
||||
CpuFrequency uint64 `json:"cpu_frequency_khz"`
|
||||
|
||||
// The amount of memory (in bytes) in this machine
|
||||
MemoryCapacity uint64 `json:"memory_capacity"`
|
||||
|
||||
// The machine id
|
||||
MachineID string `json:"machine_id"`
|
||||
|
||||
// The system uuid
|
||||
SystemUUID string `json:"system_uuid"`
|
||||
|
||||
// HugePages on this machine.
|
||||
HugePages []v1.HugePagesInfo `json:"hugepages"`
|
||||
|
||||
// Filesystems on this machine.
|
||||
Filesystems []v1.FsInfo `json:"filesystems"`
|
||||
|
||||
// Disk map
|
||||
DiskMap map[string]v1.DiskInfo `json:"disk_map"`
|
||||
|
||||
// Network devices
|
||||
NetworkDevices []v1.NetInfo `json:"network_devices"`
|
||||
|
||||
// Machine Topology
|
||||
// Describes cpu/memory layout and hierarchy.
|
||||
Topology []v1.Node `json:"topology"`
|
||||
|
||||
// Cloud provider the machine belongs to
|
||||
CloudProvider v1.CloudProvider `json:"cloud_provider"`
|
||||
|
||||
// Type of cloud instance (e.g. GCE standard) the machine is.
|
||||
InstanceType v1.InstanceType `json:"instance_type"`
|
||||
}
|
||||
|
||||
func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes {
|
||||
return Attributes{
|
||||
KernelVersion: vi.KernelVersion,
|
||||
ContainerOsVersion: vi.ContainerOsVersion,
|
||||
DockerVersion: vi.DockerVersion,
|
||||
DockerAPIVersion: vi.DockerAPIVersion,
|
||||
CadvisorVersion: vi.CadvisorVersion,
|
||||
NumCores: mi.NumCores,
|
||||
CpuFrequency: mi.CpuFrequency,
|
||||
MemoryCapacity: mi.MemoryCapacity,
|
||||
MachineID: mi.MachineID,
|
||||
SystemUUID: mi.SystemUUID,
|
||||
HugePages: mi.HugePages,
|
||||
Filesystems: mi.Filesystems,
|
||||
DiskMap: mi.DiskMap,
|
||||
NetworkDevices: mi.NetworkDevices,
|
||||
Topology: mi.Topology,
|
||||
CloudProvider: mi.CloudProvider,
|
||||
InstanceType: mi.InstanceType,
|
||||
}
|
||||
}
|
||||
|
||||
// MachineStats contains usage statistics for the entire machine.
|
||||
type MachineStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// In nanoseconds (aggregated)
|
||||
Cpu *v1.CpuStats `json:"cpu,omitempty"`
|
||||
// In nanocores per second (instantaneous)
|
||||
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
|
||||
// Memory statistics
|
||||
Memory *v1.MemoryStats `json:"memory,omitempty"`
|
||||
// Network statistics
|
||||
Network *NetworkStats `json:"network,omitempty"`
|
||||
// Filesystem statistics
|
||||
Filesystem []MachineFsStats `json:"filesystem,omitempty"`
|
||||
// Task load statistics
|
||||
Load *v1.LoadStats `json:"load_stats,omitempty"`
|
||||
}
|
||||
|
||||
// MachineFsStats contains per filesystem capacity and usage information.
|
||||
type MachineFsStats struct {
|
||||
// The block device name associated with the filesystem.
|
||||
Device string `json:"device"`
|
||||
|
||||
// Type of filesystem.
|
||||
Type string `json:"type"`
|
||||
|
||||
// Number of bytes that can be consumed on this filesystem.
|
||||
Capacity *uint64 `json:"capacity,omitempty"`
|
||||
|
||||
// Number of bytes that is currently consumed on this filesystem.
|
||||
Usage *uint64 `json:"usage,omitempty"`
|
||||
|
||||
// Number of bytes available for non-root user on this filesystem.
|
||||
Available *uint64 `json:"available,omitempty"`
|
||||
|
||||
// Number of inodes that are available on this filesystem.
|
||||
InodesFree *uint64 `json:"inodes_free,omitempty"`
|
||||
|
||||
// DiskStats for this device.
|
||||
DiskStats `json:"inline"`
|
||||
}
|
||||
|
||||
// DiskStats contains per partition usage information.
|
||||
// This information is only available at the machine level.
|
||||
type DiskStats struct {
|
||||
// Number of reads completed
|
||||
// This is the total number of reads completed successfully.
|
||||
ReadsCompleted *uint64 `json:"reads_completed,omitempty"`
|
||||
|
||||
// Number of reads merged
|
||||
// Reads and writes which are adjacent to each other may be merged for
|
||||
// efficiency. Thus two 4K reads may become one 8K read before it is
|
||||
// ultimately handed to the disk, and so it will be counted (and queued)
|
||||
// as only one I/O. This field lets you know how often this was done.
|
||||
ReadsMerged *uint64 `json:"reads_merged,omitempty"`
|
||||
|
||||
// Number of sectors read
|
||||
// This is the total number of sectors read successfully.
|
||||
SectorsRead *uint64 `json:"sectors_read,omitempty"`
|
||||
|
||||
// Time spent reading
|
||||
// This is the total number of milliseconds spent by all reads (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
ReadDuration *time.Duration `json:"read_duration,omitempty"`
|
||||
|
||||
// Number of writes completed
|
||||
// This is the total number of writes completed successfully.
|
||||
WritesCompleted *uint64 `json:"writes_completed,omitempty"`
|
||||
|
||||
// Number of writes merged
|
||||
// See the description of reads merged.
|
||||
WritesMerged *uint64 `json:"writes_merged,omitempty"`
|
||||
|
||||
// Number of sectors written
|
||||
// This is the total number of sectors written successfully.
|
||||
SectorsWritten *uint64 `json:"sectors_written,omitempty"`
|
||||
|
||||
// Time spent writing
|
||||
// This is the total number of milliseconds spent by all writes (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
WriteDuration *time.Duration `json:"write_duration,omitempty"`
|
||||
|
||||
// Number of I/Os currently in progress
|
||||
// The only field that should go to zero. Incremented as requests are
|
||||
// given to appropriate struct request_queue and decremented as they finish.
|
||||
IoInProgress *uint64 `json:"io_in_progress,omitempty"`
|
||||
|
||||
// Time spent doing I/Os
|
||||
// This field increases so long as field 9 is nonzero.
|
||||
IoDuration *time.Duration `json:"io_duration,omitempty"`
|
||||
|
||||
// weighted time spent doing I/Os
|
||||
// This field is incremented at each I/O start, I/O completion, I/O
|
||||
// merge, or read of these stats by the number of I/Os in progress
|
||||
// (field 9) times the number of milliseconds spent doing I/O since the
|
||||
// last update of this field. This can provide an easy measure of both
|
||||
// I/O completion time and the backlog that may be accumulating.
|
||||
WeightedIoDuration *time.Duration `json:"weighted_io_duration,omitempty"`
|
||||
}
|
178
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
Normal file
178
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/nvm"
|
||||
"github.com/google/cadvisor/utils/cloudinfo"
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const hugepagesDirectory = "/sys/kernel/mm/hugepages/"
|
||||
const memoryControllerPath = "/sys/devices/system/edac/mc/"
|
||||
|
||||
var machineIDFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.")
|
||||
var bootIDFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.")
|
||||
|
||||
func getInfoFromFiles(filePaths string) string {
|
||||
if len(filePaths) == 0 {
|
||||
return ""
|
||||
}
|
||||
for _, file := range strings.Split(filePaths, ",") {
|
||||
id, err := os.ReadFile(file)
|
||||
if err == nil {
|
||||
return strings.TrimSpace(string(id))
|
||||
}
|
||||
}
|
||||
klog.Warningf("Couldn't collect info from any of the files in %q", filePaths)
|
||||
return ""
|
||||
}
|
||||
|
||||
func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.MachineInfo, error) {
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
|
||||
cpuinfo, err := os.ReadFile(filepath.Join(rootFs, "/proc/cpuinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clockSpeed, err := GetClockSpeed(cpuinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
memoryCapacity, err := GetMachineMemoryCapacity()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
memoryByType, err := GetMachineMemoryByType(memoryControllerPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
swapCapacity, err := GetMachineSwapCapacity()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nvmInfo, err := nvm.GetInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hugePagesInfo, err := sysinfo.GetHugePagesInfo(sysFs, hugepagesDirectory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesystems, err := fsInfo.GetGlobalFsInfo()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get global filesystem information: %v", err)
|
||||
}
|
||||
|
||||
diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get disk map: %v", err)
|
||||
}
|
||||
|
||||
netDevices, err := sysinfo.GetNetworkDevices(sysFs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get network devices: %v", err)
|
||||
}
|
||||
|
||||
topology, numCores, err := GetTopology(sysFs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get topology information: %v", err)
|
||||
}
|
||||
|
||||
systemUUID, err := sysinfo.GetSystemUUID(sysFs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get system UUID: %v", err)
|
||||
}
|
||||
|
||||
realCloudInfo := cloudinfo.NewRealCloudInfo()
|
||||
cloudProvider := realCloudInfo.GetCloudProvider()
|
||||
instanceType := realCloudInfo.GetInstanceType()
|
||||
instanceID := realCloudInfo.GetInstanceID()
|
||||
|
||||
machineInfo := &info.MachineInfo{
|
||||
Timestamp: time.Now(),
|
||||
CPUVendorID: GetCPUVendorID(cpuinfo),
|
||||
NumCores: numCores,
|
||||
NumPhysicalCores: GetPhysicalCores(cpuinfo),
|
||||
NumSockets: GetSockets(cpuinfo),
|
||||
CpuFrequency: clockSpeed,
|
||||
MemoryCapacity: memoryCapacity,
|
||||
MemoryByType: memoryByType,
|
||||
SwapCapacity: swapCapacity,
|
||||
NVMInfo: nvmInfo,
|
||||
HugePages: hugePagesInfo,
|
||||
DiskMap: diskMap,
|
||||
NetworkDevices: netDevices,
|
||||
Topology: topology,
|
||||
MachineID: getInfoFromFiles(filepath.Join(rootFs, *machineIDFilePath)),
|
||||
SystemUUID: systemUUID,
|
||||
BootID: getInfoFromFiles(filepath.Join(rootFs, *bootIDFilePath)),
|
||||
CloudProvider: cloudProvider,
|
||||
InstanceType: instanceType,
|
||||
InstanceID: instanceID,
|
||||
}
|
||||
|
||||
for i := range filesystems {
|
||||
fs := filesystems[i]
|
||||
inodes := uint64(0)
|
||||
if fs.Inodes != nil {
|
||||
inodes = *fs.Inodes
|
||||
}
|
||||
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, DeviceMajor: uint64(fs.Major), DeviceMinor: uint64(fs.Minor), Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: inodes, HasInodes: fs.Inodes != nil})
|
||||
}
|
||||
|
||||
return machineInfo, nil
|
||||
}
|
||||
|
||||
func ContainerOsVersion() string {
|
||||
os, err := getOperatingSystem()
|
||||
if err != nil {
|
||||
os = "Unknown"
|
||||
}
|
||||
return os
|
||||
}
|
||||
|
||||
func KernelVersion() string {
|
||||
uname := &unix.Utsname{}
|
||||
|
||||
if err := unix.Uname(uname); err != nil {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
return string(uname.Release[:bytes.IndexByte(uname.Release[:], 0)])
|
||||
}
|
286
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
Normal file
286
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
Normal file
@ -0,0 +1,286 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The machine package contains functions that extract machine-level specs.
|
||||
package machine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
coreRegExp = regexp.MustCompile(`(?m)^core id\s*:\s*([0-9]+)$`)
|
||||
nodeRegExp = regexp.MustCompile(`(?m)^physical id\s*:\s*([0-9]+)$`)
|
||||
// Power systems have a different format so cater for both
|
||||
cpuClockSpeedMHz = regexp.MustCompile(`(?:cpu MHz|CPU MHz|clock)\s*:\s*([0-9]+\.[0-9]+)(?:MHz)?`)
|
||||
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
|
||||
swapCapacityRegexp = regexp.MustCompile(`SwapTotal:\s*([0-9]+) kB`)
|
||||
vendorIDRegexp = regexp.MustCompile(`vendor_id\s*:\s*(\w+)`)
|
||||
|
||||
cpuAttributesPath = "/sys/devices/system/cpu/"
|
||||
isMemoryController = regexp.MustCompile("mc[0-9]+")
|
||||
isDimm = regexp.MustCompile("dimm[0-9]+")
|
||||
machineArch = getMachineArch()
|
||||
maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
|
||||
)
|
||||
|
||||
const memTypeFileName = "dimm_mem_type"
|
||||
const sizeFileName = "size"
|
||||
|
||||
// GetCPUVendorID returns "vendor_id" reading /proc/cpuinfo file.
|
||||
func GetCPUVendorID(procInfo []byte) string {
|
||||
vendorID := ""
|
||||
|
||||
matches := vendorIDRegexp.FindSubmatch(procInfo)
|
||||
if len(matches) != 2 {
|
||||
klog.V(4).Info("Cannot read vendor id correctly, set empty.")
|
||||
return vendorID
|
||||
}
|
||||
|
||||
vendorID = string(matches[1])
|
||||
|
||||
return vendorID
|
||||
}
|
||||
|
||||
// GetPhysicalCores returns number of CPU cores reading /proc/cpuinfo file or if needed information from sysfs cpu path
|
||||
func GetPhysicalCores(procInfo []byte) int {
|
||||
numCores := getUniqueMatchesCount(string(procInfo), coreRegExp)
|
||||
if numCores == 0 {
|
||||
// read number of cores from /sys/bus/cpu/devices/cpu*/topology/core_id to deal with processors
|
||||
// for which 'core id' is not available in /proc/cpuinfo
|
||||
numCores = sysfs.GetUniqueCPUPropertyCount(cpuAttributesPath, sysfs.CPUCoreID)
|
||||
}
|
||||
if numCores == 0 {
|
||||
klog.Errorf("Cannot read number of physical cores correctly, number of cores set to %d", numCores)
|
||||
}
|
||||
return numCores
|
||||
}
|
||||
|
||||
// GetSockets returns number of CPU sockets reading /proc/cpuinfo file or if needed information from sysfs cpu path
|
||||
func GetSockets(procInfo []byte) int {
|
||||
numSocket := getUniqueMatchesCount(string(procInfo), nodeRegExp)
|
||||
if numSocket == 0 {
|
||||
// read number of sockets from /sys/bus/cpu/devices/cpu*/topology/physical_package_id to deal with processors
|
||||
// for which 'physical id' is not available in /proc/cpuinfo
|
||||
numSocket = sysfs.GetUniqueCPUPropertyCount(cpuAttributesPath, sysfs.CPUPhysicalPackageID)
|
||||
}
|
||||
if numSocket == 0 {
|
||||
klog.Errorf("Cannot read number of sockets correctly, number of sockets set to %d", numSocket)
|
||||
}
|
||||
return numSocket
|
||||
}
|
||||
|
||||
// GetClockSpeed returns the CPU clock speed, given a []byte formatted as the /proc/cpuinfo file.
|
||||
func GetClockSpeed(procInfo []byte) (uint64, error) {
|
||||
// First look through sys to find a max supported cpu frequency.
|
||||
if utils.FileExists(maxFreqFile) {
|
||||
val, err := os.ReadFile(maxFreqFile)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var maxFreq uint64
|
||||
n, err := fmt.Sscanf(string(val), "%d", &maxFreq)
|
||||
if err != nil || n != 1 {
|
||||
return 0, fmt.Errorf("could not parse frequency %q", val)
|
||||
}
|
||||
return maxFreq, nil
|
||||
}
|
||||
// s390/s390x, mips64, riscv64, aarch64 and arm32 changes
|
||||
if isMips64() || isSystemZ() || isAArch64() || isArm32() || isRiscv64() {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Fall back to /proc/cpuinfo
|
||||
matches := cpuClockSpeedMHz.FindSubmatch(procInfo)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo))
|
||||
}
|
||||
|
||||
speed, err := strconv.ParseFloat(string(matches[1]), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Convert to kHz
|
||||
return uint64(speed * 1000), nil
|
||||
}
|
||||
|
||||
// GetMachineMemoryCapacity returns the machine's total memory from /proc/meminfo.
|
||||
// Returns the total memory capacity as an uint64 (number of bytes).
|
||||
func GetMachineMemoryCapacity() (uint64, error) {
|
||||
out, err := os.ReadFile("/proc/meminfo")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
memoryCapacity, err := parseCapacity(out, memoryCapacityRegexp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return memoryCapacity, err
|
||||
}
|
||||
|
||||
// GetMachineMemoryByType returns information about memory capacity and number of DIMMs.
|
||||
// Information is retrieved from sysfs edac per-DIMM API (/sys/devices/system/edac/mc/)
|
||||
// introduced in kernel 3.6. Documentation can be found at
|
||||
// https://www.kernel.org/doc/Documentation/admin-guide/ras.rst.
|
||||
// Full list of memory types can be found in edac_mc.c
|
||||
// (https://github.com/torvalds/linux/blob/v5.5/drivers/edac/edac_mc.c#L198)
|
||||
func GetMachineMemoryByType(edacPath string) (map[string]*info.MemoryInfo, error) {
|
||||
memory := map[string]*info.MemoryInfo{}
|
||||
names, err := os.ReadDir(edacPath)
|
||||
// On some architectures (such as ARM) memory controller device may not exist.
|
||||
// If this is the case then we ignore error and return empty slice.
|
||||
_, ok := err.(*os.PathError)
|
||||
if err != nil && ok {
|
||||
return memory, nil
|
||||
} else if err != nil {
|
||||
return memory, err
|
||||
}
|
||||
for _, controllerDir := range names {
|
||||
controller := controllerDir.Name()
|
||||
if !isMemoryController.MatchString(controller) {
|
||||
continue
|
||||
}
|
||||
dimms, err := os.ReadDir(path.Join(edacPath, controllerDir.Name()))
|
||||
if err != nil {
|
||||
return map[string]*info.MemoryInfo{}, err
|
||||
}
|
||||
for _, dimmDir := range dimms {
|
||||
dimm := dimmDir.Name()
|
||||
if !isDimm.MatchString(dimm) {
|
||||
continue
|
||||
}
|
||||
memType, err := os.ReadFile(path.Join(edacPath, controller, dimm, memTypeFileName))
|
||||
if err != nil {
|
||||
return map[string]*info.MemoryInfo{}, err
|
||||
}
|
||||
readableMemType := strings.TrimSpace(string(memType))
|
||||
if _, exists := memory[readableMemType]; !exists {
|
||||
memory[readableMemType] = &info.MemoryInfo{}
|
||||
}
|
||||
size, err := os.ReadFile(path.Join(edacPath, controller, dimm, sizeFileName))
|
||||
if err != nil {
|
||||
return map[string]*info.MemoryInfo{}, err
|
||||
}
|
||||
capacity, err := strconv.Atoi(strings.TrimSpace(string(size)))
|
||||
if err != nil {
|
||||
return map[string]*info.MemoryInfo{}, err
|
||||
}
|
||||
memory[readableMemType].Capacity += uint64(mbToBytes(capacity))
|
||||
memory[readableMemType].DimmCount++
|
||||
}
|
||||
}
|
||||
|
||||
return memory, nil
|
||||
}
|
||||
|
||||
func mbToBytes(megabytes int) int {
|
||||
return megabytes * 1024 * 1024
|
||||
}
|
||||
|
||||
// GetMachineSwapCapacity returns the machine's total swap from /proc/meminfo.
|
||||
// Returns the total swap capacity as an uint64 (number of bytes).
|
||||
func GetMachineSwapCapacity() (uint64, error) {
|
||||
out, err := os.ReadFile("/proc/meminfo")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
swapCapacity, err := parseCapacity(out, swapCapacityRegexp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return swapCapacity, err
|
||||
}
|
||||
|
||||
// GetTopology returns CPU topology reading information from sysfs
|
||||
func GetTopology(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
return sysinfo.GetNodesInfo(sysFs)
|
||||
}
|
||||
|
||||
// parseCapacity matches a Regexp in a []byte, returning the resulting value in bytes.
|
||||
// Assumes that the value matched by the Regexp is in KB.
|
||||
func parseCapacity(b []byte, r *regexp.Regexp) (uint64, error) {
|
||||
matches := r.FindSubmatch(b)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("failed to match regexp in output: %q", string(b))
|
||||
}
|
||||
m, err := strconv.ParseUint(string(matches[1]), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Convert to bytes.
|
||||
return m * 1024, err
|
||||
}
|
||||
|
||||
// getUniqueMatchesCount returns number of unique matches in given argument using provided regular expression
|
||||
func getUniqueMatchesCount(s string, r *regexp.Regexp) int {
|
||||
matches := r.FindAllString(s, -1)
|
||||
uniques := make(map[string]bool)
|
||||
for _, match := range matches {
|
||||
uniques[match] = true
|
||||
}
|
||||
return len(uniques)
|
||||
}
|
||||
|
||||
func getMachineArch() string {
|
||||
uname := unix.Utsname{}
|
||||
err := unix.Uname(&uname)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot get machine architecture, err: %v", err)
|
||||
return ""
|
||||
}
|
||||
return string(uname.Machine[:])
|
||||
}
|
||||
|
||||
// arm32 changes
|
||||
func isArm32() bool {
|
||||
return strings.Contains(machineArch, "arm")
|
||||
}
|
||||
|
||||
// aarch64 changes
|
||||
func isAArch64() bool {
|
||||
return strings.Contains(machineArch, "aarch64")
|
||||
}
|
||||
|
||||
// s390/s390x changes
|
||||
func isSystemZ() bool {
|
||||
return strings.Contains(machineArch, "390")
|
||||
}
|
||||
|
||||
// riscv64 changes
|
||||
func isRiscv64() bool {
|
||||
return strings.Contains(machineArch, "riscv64")
|
||||
}
|
||||
|
||||
// mips64 changes
|
||||
func isMips64() bool {
|
||||
return strings.Contains(machineArch, "mips64")
|
||||
}
|
54
vendor/github.com/google/cadvisor/machine/operatingsystem_unix.go
generated
vendored
Normal file
54
vendor/github.com/google/cadvisor/machine/operatingsystem_unix.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build freebsd || darwin || linux
|
||||
// +build freebsd darwin linux
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var rex = regexp.MustCompile("(PRETTY_NAME)=(.*)")
|
||||
|
||||
// getOperatingSystem gets the name of the current operating system.
|
||||
func getOperatingSystem() (string, error) {
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" {
|
||||
cmd := exec.Command("uname", "-s")
|
||||
osName, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(osName), nil
|
||||
}
|
||||
bytes, err := os.ReadFile("/etc/os-release")
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// /usr/lib/os-release in stateless systems like Clear Linux
|
||||
bytes, err = os.ReadFile("/usr/lib/os-release")
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening file : %v", err)
|
||||
}
|
||||
line := rex.FindAllStringSubmatch(string(bytes), -1)
|
||||
if len(line) > 0 {
|
||||
return strings.Trim(line[0][2], "\""), nil
|
||||
}
|
||||
return "Linux", nil
|
||||
}
|
54
vendor/github.com/google/cadvisor/machine/operatingsystem_windows.go
generated
vendored
Normal file
54
vendor/github.com/google/cadvisor/machine/operatingsystem_windows.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package machine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
func getOperatingSystem() (string, error) {
|
||||
system := "Windows"
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
return system, err
|
||||
}
|
||||
defer k.Close()
|
||||
|
||||
productName, _, err := k.GetStringValue("ProductName")
|
||||
if err != nil {
|
||||
return system, nil
|
||||
}
|
||||
|
||||
releaseId, _, err := k.GetStringValue("ReleaseId")
|
||||
if err != nil {
|
||||
return system, err
|
||||
}
|
||||
|
||||
currentBuildNumber, _, err := k.GetStringValue("CurrentBuildNumber")
|
||||
if err != nil {
|
||||
return system, err
|
||||
}
|
||||
revision, _, err := k.GetIntegerValue("UBR")
|
||||
if err != nil {
|
||||
return system, err
|
||||
}
|
||||
|
||||
system = fmt.Sprintf("%s Version %s (OS Build %s.%d)",
|
||||
productName, releaseId, currentBuildNumber, revision)
|
||||
|
||||
return system, nil
|
||||
}
|
763
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
Normal file
763
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
Normal file
@ -0,0 +1,763 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/cache/memory"
|
||||
"github.com/google/cadvisor/collector"
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
v2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/google/cadvisor/stats"
|
||||
"github.com/google/cadvisor/summary"
|
||||
"github.com/google/cadvisor/utils/cpuload"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// Housekeeping interval.
|
||||
var enableLoadReader = flag.Bool("enable_load_reader", false, "Whether to enable cpu load reader")
|
||||
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")
|
||||
|
||||
// TODO: replace regular expressions with something simpler, such as strings.Split().
|
||||
// cgroup type chosen to fetch the cgroup path of a process.
|
||||
// Memory has been chosen, as it is one of the default cgroups that is enabled for most containers...
|
||||
var cgroupMemoryPathRegExp = regexp.MustCompile(`memory[^:]*:(.*?)[,;$]`)
|
||||
|
||||
// ... but there are systems (e.g. Raspberry Pi 4) where memory cgroup controller is disabled by default.
|
||||
// We should check cpu cgroup then.
|
||||
var cgroupCPUPathRegExp = regexp.MustCompile(`cpu[^:]*:(.*?)[,;$]`)
|
||||
|
||||
type containerInfo struct {
|
||||
info.ContainerReference
|
||||
Subcontainers []info.ContainerReference
|
||||
Spec info.ContainerSpec
|
||||
}
|
||||
|
||||
type containerData struct {
|
||||
oomEvents uint64
|
||||
handler container.ContainerHandler
|
||||
info containerInfo
|
||||
memoryCache *memory.InMemoryCache
|
||||
lock sync.Mutex
|
||||
loadReader cpuload.CpuLoadReader
|
||||
summaryReader *summary.StatsSummary
|
||||
loadAvg float64 // smoothed load average seen so far.
|
||||
loadDAvg float64 // smoothed load.d average seen so far.
|
||||
housekeepingInterval time.Duration
|
||||
maxHousekeepingInterval time.Duration
|
||||
allowDynamicHousekeeping bool
|
||||
infoLastUpdatedTime time.Time
|
||||
statsLastUpdatedTime time.Time
|
||||
lastErrorTime time.Time
|
||||
// used to track time
|
||||
clock clock.Clock
|
||||
|
||||
// Decay value used for load average smoothing. Interval length of 10 seconds is used.
|
||||
loadDecay float64
|
||||
|
||||
// Whether to log the usage of this container when it is updated.
|
||||
logUsage bool
|
||||
|
||||
// Tells the container to stop.
|
||||
stop chan struct{}
|
||||
|
||||
// Tells the container to immediately collect stats
|
||||
onDemandChan chan chan struct{}
|
||||
|
||||
// Runs custom metric collectors.
|
||||
collectorManager collector.CollectorManager
|
||||
|
||||
// perfCollector updates stats for perf_event cgroup controller.
|
||||
perfCollector stats.Collector
|
||||
|
||||
// resctrlCollector updates stats for resctrl controller.
|
||||
resctrlCollector stats.Collector
|
||||
}
|
||||
|
||||
// jitter returns a time.Duration between duration and duration + maxFactor * duration,
|
||||
// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a
|
||||
// suggested default value will be chosen.
|
||||
func jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
if maxFactor <= 0.0 {
|
||||
maxFactor = 1.0
|
||||
}
|
||||
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
|
||||
return wait
|
||||
}
|
||||
|
||||
func (cd *containerData) Start() error {
|
||||
go cd.housekeeping()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cd *containerData) Stop() error {
|
||||
err := cd.memoryCache.RemoveContainer(cd.info.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
close(cd.stop)
|
||||
cd.perfCollector.Destroy()
|
||||
cd.resctrlCollector.Destroy()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cd *containerData) allowErrorLogging() bool {
|
||||
if cd.clock.Since(cd.lastErrorTime) > time.Minute {
|
||||
cd.lastErrorTime = cd.clock.Now()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// OnDemandHousekeeping performs housekeeping on the container and blocks until it has completed.
|
||||
// It is designed to be used in conjunction with periodic housekeeping, and will cause the timer for
|
||||
// periodic housekeeping to reset. This should be used sparingly, as calling OnDemandHousekeeping frequently
|
||||
// can have serious performance costs.
|
||||
func (cd *containerData) OnDemandHousekeeping(maxAge time.Duration) {
|
||||
cd.lock.Lock()
|
||||
timeSinceStatsLastUpdate := cd.clock.Since(cd.statsLastUpdatedTime)
|
||||
cd.lock.Unlock()
|
||||
if timeSinceStatsLastUpdate > maxAge {
|
||||
housekeepingFinishedChan := make(chan struct{})
|
||||
cd.onDemandChan <- housekeepingFinishedChan
|
||||
select {
|
||||
case <-cd.stop:
|
||||
case <-housekeepingFinishedChan:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// notifyOnDemand notifies all calls to OnDemandHousekeeping that housekeeping is finished
|
||||
func (cd *containerData) notifyOnDemand() {
|
||||
for {
|
||||
select {
|
||||
case finishedChan := <-cd.onDemandChan:
|
||||
close(finishedChan)
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cd *containerData) GetInfo(shouldUpdateSubcontainers bool) (*containerInfo, error) {
|
||||
// Get spec and subcontainers.
|
||||
if cd.clock.Since(cd.infoLastUpdatedTime) > 5*time.Second || shouldUpdateSubcontainers {
|
||||
err := cd.updateSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if shouldUpdateSubcontainers {
|
||||
err = cd.updateSubcontainers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cd.infoLastUpdatedTime = cd.clock.Now()
|
||||
}
|
||||
cd.lock.Lock()
|
||||
defer cd.lock.Unlock()
|
||||
cInfo := containerInfo{
|
||||
Subcontainers: cd.info.Subcontainers,
|
||||
Spec: cd.info.Spec,
|
||||
}
|
||||
cInfo.Id = cd.info.Id
|
||||
cInfo.Name = cd.info.Name
|
||||
cInfo.Aliases = cd.info.Aliases
|
||||
cInfo.Namespace = cd.info.Namespace
|
||||
return &cInfo, nil
|
||||
}
|
||||
|
||||
func (cd *containerData) DerivedStats() (v2.DerivedStats, error) {
|
||||
if cd.summaryReader == nil {
|
||||
return v2.DerivedStats{}, fmt.Errorf("derived stats not enabled for container %q", cd.info.Name)
|
||||
}
|
||||
return cd.summaryReader.DerivedStats()
|
||||
}
|
||||
|
||||
func (cd *containerData) getCgroupPath(cgroups string) string {
|
||||
if cgroups == "-" {
|
||||
return "/"
|
||||
}
|
||||
if strings.HasPrefix(cgroups, "0::") {
|
||||
return cgroups[3:]
|
||||
}
|
||||
matches := cgroupMemoryPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
klog.V(3).Infof(
|
||||
"failed to get memory cgroup path from %q, will try to get cpu cgroup path",
|
||||
cgroups,
|
||||
)
|
||||
// On some systems (e.g. Raspberry PI 4) cgroup memory controlled is disabled by default.
|
||||
matches = cgroupCPUPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
klog.V(3).Infof("failed to get cpu cgroup path from %q; assuming root cgroup", cgroups)
|
||||
// return root in case of failures - memory hierarchy might not be enabled.
|
||||
return "/"
|
||||
}
|
||||
}
|
||||
return string(matches[1])
|
||||
}
|
||||
|
||||
// Returns contents of a file inside the container root.
|
||||
// Takes in a path relative to container root.
|
||||
func (cd *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte, error) {
|
||||
pids, err := cd.getContainerPids(inHostNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(rjnagal): Optimize by just reading container's cgroup.proc file when in host namespace.
|
||||
rootfs := "/"
|
||||
if !inHostNamespace {
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
for _, pid := range pids {
|
||||
filePath := path.Join(rootfs, "/proc", pid, "/root", filepath)
|
||||
klog.V(3).Infof("Trying path %q", filePath)
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err == nil {
|
||||
return data, err
|
||||
}
|
||||
}
|
||||
// No process paths could be found. Declare config non-existent.
|
||||
return nil, fmt.Errorf("file %q does not exist", filepath)
|
||||
}
|
||||
|
||||
// Return output for ps command in host /proc with specified format
|
||||
func (cd *containerData) getPsOutput(inHostNamespace bool, format string) ([]byte, error) {
|
||||
args := []string{}
|
||||
command := "ps"
|
||||
if !inHostNamespace {
|
||||
command = "/usr/sbin/chroot"
|
||||
args = append(args, "/rootfs", "ps")
|
||||
}
|
||||
args = append(args, "-e", "-o", format)
|
||||
out, err := exec.Command(command, args...).Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute %q command: %v", command, err)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Get pids of processes in this container.
|
||||
// A slightly lighterweight call than GetProcessList if other details are not required.
|
||||
func (cd *containerData) getContainerPids(inHostNamespace bool) ([]string, error) {
|
||||
format := "pid,cgroup"
|
||||
out, err := cd.getPsOutput(inHostNamespace, format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedFields := 2
|
||||
lines := strings.Split(string(out), "\n")
|
||||
pids := []string{}
|
||||
for _, line := range lines[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < expectedFields {
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
pid := fields[0]
|
||||
cgroup := cd.getCgroupPath(fields[1])
|
||||
if cd.info.Name == cgroup {
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func (cd *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
|
||||
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,psr,cgroup"
|
||||
out, err := cd.getPsOutput(inHostNamespace, format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cd.parseProcessList(cadvisorContainer, inHostNamespace, out)
|
||||
}
|
||||
|
||||
func (cd *containerData) parseProcessList(cadvisorContainer string, inHostNamespace bool, out []byte) ([]v2.ProcessInfo, error) {
|
||||
rootfs := "/"
|
||||
if !inHostNamespace {
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
processes := []v2.ProcessInfo{}
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines[1:] {
|
||||
processInfo, err := cd.parsePsLine(line, cadvisorContainer, inHostNamespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse line %s: %v", line, err)
|
||||
}
|
||||
if processInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var fdCount int
|
||||
dirPath := path.Join(rootfs, "/proc", strconv.Itoa(processInfo.Pid), "fd")
|
||||
fds, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
|
||||
continue
|
||||
}
|
||||
fdCount = len(fds)
|
||||
processInfo.FdCount = fdCount
|
||||
|
||||
processes = append(processes, *processInfo)
|
||||
}
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
func (cd *containerData) isRoot() bool {
|
||||
return cd.info.Name == "/"
|
||||
}
|
||||
|
||||
func (cd *containerData) parsePsLine(line, cadvisorContainer string, inHostNamespace bool) (*v2.ProcessInfo, error) {
|
||||
const expectedFields = 13
|
||||
if len(line) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
info := v2.ProcessInfo{}
|
||||
var err error
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < expectedFields {
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
info.User = fields[0]
|
||||
info.StartTime = fields[3]
|
||||
info.Status = fields[8]
|
||||
info.RunningTime = fields[9]
|
||||
|
||||
info.Pid, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pid %q: %v", fields[1], err)
|
||||
}
|
||||
info.Ppid, err = strconv.Atoi(fields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ppid %q: %v", fields[2], err)
|
||||
}
|
||||
|
||||
percentCPU, err := strconv.ParseFloat(fields[4], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu percent %q: %v", fields[4], err)
|
||||
}
|
||||
info.PercentCpu = float32(percentCPU)
|
||||
percentMem, err := strconv.ParseFloat(fields[5], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory percent %q: %v", fields[5], err)
|
||||
}
|
||||
info.PercentMemory = float32(percentMem)
|
||||
|
||||
info.RSS, err = strconv.ParseUint(fields[6], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rss %q: %v", fields[6], err)
|
||||
}
|
||||
info.VirtualSize, err = strconv.ParseUint(fields[7], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid virtual size %q: %v", fields[7], err)
|
||||
}
|
||||
// convert to bytes
|
||||
info.RSS *= 1024
|
||||
info.VirtualSize *= 1024
|
||||
|
||||
// According to `man ps`: The following user-defined format specifiers may contain spaces: args, cmd, comm, command,
|
||||
// fname, ucmd, ucomm, lstart, bsdstart, start.
|
||||
// Therefore we need to be able to parse comm that consists of multiple space-separated parts.
|
||||
info.Cmd = strings.Join(fields[10:len(fields)-2], " ")
|
||||
|
||||
// These are last two parts of the line. We create a subslice of `fields` to handle comm that includes spaces.
|
||||
lastTwoFields := fields[len(fields)-2:]
|
||||
info.Psr, err = strconv.Atoi(lastTwoFields[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid psr %q: %v", lastTwoFields[0], err)
|
||||
}
|
||||
info.CgroupPath = cd.getCgroupPath(lastTwoFields[1])
|
||||
|
||||
// Remove the ps command we just ran from cadvisor container.
|
||||
// Not necessary, but makes the cadvisor page look cleaner.
|
||||
if !inHostNamespace && cadvisorContainer == info.CgroupPath && info.Cmd == "ps" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Do not report processes from other containers when non-root container requested.
|
||||
if !cd.isRoot() && info.CgroupPath != cd.info.Name {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Remove cgroup information when non-root container requested.
|
||||
if !cd.isRoot() {
|
||||
info.CgroupPath = ""
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, clock clock.Clock) (*containerData, error) {
|
||||
if memoryCache == nil {
|
||||
return nil, fmt.Errorf("nil memory storage")
|
||||
}
|
||||
if handler == nil {
|
||||
return nil, fmt.Errorf("nil container handler")
|
||||
}
|
||||
ref, err := handler.ContainerReference()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cont := &containerData{
|
||||
handler: handler,
|
||||
memoryCache: memoryCache,
|
||||
housekeepingInterval: *HousekeepingInterval,
|
||||
maxHousekeepingInterval: maxHousekeepingInterval,
|
||||
allowDynamicHousekeeping: allowDynamicHousekeeping,
|
||||
logUsage: logUsage,
|
||||
loadAvg: -1.0, // negative value indicates uninitialized.
|
||||
loadDAvg: -1.0, // negative value indicates uninitialized.
|
||||
stop: make(chan struct{}),
|
||||
collectorManager: collectorManager,
|
||||
onDemandChan: make(chan chan struct{}, 100),
|
||||
clock: clock,
|
||||
perfCollector: &stats.NoopCollector{},
|
||||
resctrlCollector: &stats.NoopCollector{},
|
||||
}
|
||||
cont.info.ContainerReference = ref
|
||||
|
||||
cont.loadDecay = math.Exp(float64(-cont.housekeepingInterval.Seconds() / 10))
|
||||
|
||||
if *enableLoadReader {
|
||||
// Create cpu load reader.
|
||||
loadReader, err := cpuload.New()
|
||||
if err != nil {
|
||||
klog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err)
|
||||
} else {
|
||||
cont.loadReader = loadReader
|
||||
}
|
||||
}
|
||||
|
||||
err = cont.updateSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cont.summaryReader, err = summary.New(cont.info.Spec)
|
||||
if err != nil {
|
||||
cont.summaryReader = nil
|
||||
klog.V(5).Infof("Failed to create summary reader for %q: %v", ref.Name, err)
|
||||
}
|
||||
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
// Determine when the next housekeeping should occur.
|
||||
func (cd *containerData) nextHousekeepingInterval() time.Duration {
|
||||
if cd.allowDynamicHousekeeping {
|
||||
var empty time.Time
|
||||
stats, err := cd.memoryCache.RecentStats(cd.info.Name, empty, empty, 2)
|
||||
if err != nil {
|
||||
if cd.allowErrorLogging() {
|
||||
klog.V(4).Infof("Failed to get RecentStats(%q) while determining the next housekeeping: %v", cd.info.Name, err)
|
||||
}
|
||||
} else if len(stats) == 2 {
|
||||
// TODO(vishnuk): Use no processes as a signal.
|
||||
// Raise the interval if usage hasn't changed in the last housekeeping.
|
||||
if stats[0].StatsEq(stats[1]) && (cd.housekeepingInterval < cd.maxHousekeepingInterval) {
|
||||
cd.housekeepingInterval *= 2
|
||||
if cd.housekeepingInterval > cd.maxHousekeepingInterval {
|
||||
cd.housekeepingInterval = cd.maxHousekeepingInterval
|
||||
}
|
||||
} else if cd.housekeepingInterval != *HousekeepingInterval {
|
||||
// Lower interval back to the baseline.
|
||||
cd.housekeepingInterval = *HousekeepingInterval
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return jitter(cd.housekeepingInterval, 1.0)
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Implement stats collecting as a custom collector.
|
||||
func (cd *containerData) housekeeping() {
|
||||
// Start any background goroutines - must be cleaned up in cd.handler.Cleanup().
|
||||
cd.handler.Start()
|
||||
defer cd.handler.Cleanup()
|
||||
|
||||
// Initialize cpuload reader - must be cleaned up in cd.loadReader.Stop()
|
||||
if cd.loadReader != nil {
|
||||
err := cd.loadReader.Start()
|
||||
if err != nil {
|
||||
klog.Warningf("Could not start cpu load stat collector for %q: %s", cd.info.Name, err)
|
||||
}
|
||||
defer cd.loadReader.Stop()
|
||||
}
|
||||
|
||||
// Long housekeeping is either 100ms or half of the housekeeping interval.
|
||||
longHousekeeping := 100 * time.Millisecond
|
||||
if *HousekeepingInterval/2 < longHousekeeping {
|
||||
longHousekeeping = *HousekeepingInterval / 2
|
||||
}
|
||||
|
||||
// Housekeep every second.
|
||||
klog.V(3).Infof("Start housekeeping for container %q\n", cd.info.Name)
|
||||
houseKeepingTimer := cd.clock.NewTimer(0 * time.Second)
|
||||
defer houseKeepingTimer.Stop()
|
||||
for {
|
||||
if !cd.housekeepingTick(houseKeepingTimer.C(), longHousekeeping) {
|
||||
return
|
||||
}
|
||||
// Stop and drain the timer so that it is safe to reset it
|
||||
if !houseKeepingTimer.Stop() {
|
||||
select {
|
||||
case <-houseKeepingTimer.C():
|
||||
default:
|
||||
}
|
||||
}
|
||||
// Log usage if asked to do so.
|
||||
if cd.logUsage {
|
||||
const numSamples = 60
|
||||
var empty time.Time
|
||||
stats, err := cd.memoryCache.RecentStats(cd.info.Name, empty, empty, numSamples)
|
||||
if err != nil {
|
||||
if cd.allowErrorLogging() {
|
||||
klog.Warningf("[%s] Failed to get recent stats for logging usage: %v", cd.info.Name, err)
|
||||
}
|
||||
} else if len(stats) < numSamples {
|
||||
// Ignore, not enough stats yet.
|
||||
} else {
|
||||
usageCPUNs := uint64(0)
|
||||
for i := range stats {
|
||||
if i > 0 {
|
||||
usageCPUNs += stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total
|
||||
}
|
||||
}
|
||||
usageMemory := stats[numSamples-1].Memory.Usage
|
||||
|
||||
instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds())
|
||||
usageInCores := float64(usageCPUNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
|
||||
usageInHuman := units.HumanSize(float64(usageMemory))
|
||||
// Don't set verbosity since this is already protected by the logUsage flag.
|
||||
klog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", cd.info.Name, instantUsageInCores, usageInCores, usageInHuman)
|
||||
}
|
||||
}
|
||||
houseKeepingTimer.Reset(cd.nextHousekeepingInterval())
|
||||
}
|
||||
}
|
||||
|
||||
func (cd *containerData) housekeepingTick(timer <-chan time.Time, longHousekeeping time.Duration) bool {
|
||||
select {
|
||||
case <-cd.stop:
|
||||
// Stop housekeeping when signaled.
|
||||
return false
|
||||
case finishedChan := <-cd.onDemandChan:
|
||||
// notify the calling function once housekeeping has completed
|
||||
defer close(finishedChan)
|
||||
case <-timer:
|
||||
}
|
||||
start := cd.clock.Now()
|
||||
err := cd.updateStats()
|
||||
if err != nil {
|
||||
if cd.allowErrorLogging() {
|
||||
klog.Warningf("Failed to update stats for container \"%s\": %s", cd.info.Name, err)
|
||||
}
|
||||
}
|
||||
// Log if housekeeping took too long.
|
||||
duration := cd.clock.Since(start)
|
||||
if duration >= longHousekeeping {
|
||||
klog.V(3).Infof("[%s] Housekeeping took %s", cd.info.Name, duration)
|
||||
}
|
||||
cd.notifyOnDemand()
|
||||
cd.lock.Lock()
|
||||
defer cd.lock.Unlock()
|
||||
cd.statsLastUpdatedTime = cd.clock.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
func (cd *containerData) updateSpec() error {
|
||||
spec, err := cd.handler.GetSpec()
|
||||
if err != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !cd.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
customMetrics, err := cd.collectorManager.GetSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(customMetrics) > 0 {
|
||||
spec.HasCustomMetrics = true
|
||||
spec.CustomMetrics = customMetrics
|
||||
}
|
||||
cd.lock.Lock()
|
||||
defer cd.lock.Unlock()
|
||||
cd.info.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculate new smoothed load average using the new sample of runnable threads.
|
||||
// The decay used ensures that the load will stabilize on a new constant value within
|
||||
// 10 seconds.
|
||||
func (cd *containerData) updateLoad(newLoad uint64) {
|
||||
if cd.loadAvg < 0 {
|
||||
cd.loadAvg = float64(newLoad) // initialize to the first seen sample for faster stabilization.
|
||||
} else {
|
||||
cd.loadAvg = cd.loadAvg*cd.loadDecay + float64(newLoad)*(1.0-cd.loadDecay)
|
||||
}
|
||||
}
|
||||
|
||||
func (cd *containerData) updateLoadD(newLoad uint64) {
|
||||
if cd.loadDAvg < 0 {
|
||||
cd.loadDAvg = float64(newLoad) // initialize to the first seen sample for faster stabilization.
|
||||
} else {
|
||||
cd.loadDAvg = cd.loadDAvg*cd.loadDecay + float64(newLoad)*(1.0-cd.loadDecay)
|
||||
}
|
||||
}
|
||||
|
||||
func (cd *containerData) updateStats() error {
|
||||
stats, statsErr := cd.handler.GetStats()
|
||||
if statsErr != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !cd.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stats may be partially populated, push those before we return an error.
|
||||
statsErr = fmt.Errorf("%v, continuing to push stats", statsErr)
|
||||
}
|
||||
if stats == nil {
|
||||
return statsErr
|
||||
}
|
||||
if cd.loadReader != nil {
|
||||
// TODO(vmarmol): Cache this path.
|
||||
path, err := cd.handler.GetCgroupPath("cpu")
|
||||
if err == nil {
|
||||
loadStats, err := cd.loadReader.GetCpuLoad(cd.info.Name, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get load stat for %q - path %q, error %s", cd.info.Name, path, err)
|
||||
}
|
||||
stats.TaskStats = loadStats
|
||||
cd.updateLoad(loadStats.NrRunning)
|
||||
// convert to 'milliLoad' to avoid floats and preserve precision.
|
||||
stats.Cpu.LoadAverage = int32(cd.loadAvg * 1000)
|
||||
|
||||
cd.updateLoadD(loadStats.NrUninterruptible)
|
||||
// convert to 'milliLoad' to avoid floats and preserve precision.
|
||||
stats.Cpu.LoadDAverage = int32(cd.loadDAvg * 1000)
|
||||
}
|
||||
}
|
||||
if cd.summaryReader != nil {
|
||||
err := cd.summaryReader.AddSample(*stats)
|
||||
if err != nil {
|
||||
// Ignore summary errors for now.
|
||||
klog.V(2).Infof("Failed to add summary stats for %q: %v", cd.info.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
stats.OOMEvents = atomic.LoadUint64(&cd.oomEvents)
|
||||
|
||||
var customStatsErr error
|
||||
cm := cd.collectorManager.(*collector.GenericCollectorManager)
|
||||
if len(cm.Collectors) > 0 {
|
||||
if cm.NextCollectionTime.Before(cd.clock.Now()) {
|
||||
customStats, err := cd.updateCustomStats()
|
||||
if customStats != nil {
|
||||
stats.CustomMetrics = customStats
|
||||
}
|
||||
if err != nil {
|
||||
customStatsErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
perfStatsErr := cd.perfCollector.UpdateStats(stats)
|
||||
|
||||
resctrlStatsErr := cd.resctrlCollector.UpdateStats(stats)
|
||||
|
||||
ref, err := cd.handler.ContainerReference()
|
||||
if err != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !cd.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
cInfo := info.ContainerInfo{
|
||||
ContainerReference: ref,
|
||||
}
|
||||
|
||||
err = cd.memoryCache.AddStats(&cInfo, stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if statsErr != nil {
|
||||
return statsErr
|
||||
}
|
||||
if perfStatsErr != nil {
|
||||
klog.Errorf("error occurred while collecting perf stats for container %s: %s", cInfo.Name, err)
|
||||
return perfStatsErr
|
||||
}
|
||||
if resctrlStatsErr != nil {
|
||||
klog.Errorf("error occurred while collecting resctrl stats for container %s: %s", cInfo.Name, resctrlStatsErr)
|
||||
return resctrlStatsErr
|
||||
}
|
||||
return customStatsErr
|
||||
}
|
||||
|
||||
func (cd *containerData) updateCustomStats() (map[string][]info.MetricVal, error) {
|
||||
_, customStats, customStatsErr := cd.collectorManager.Collect()
|
||||
if customStatsErr != nil {
|
||||
if !cd.handler.Exists() {
|
||||
return customStats, nil
|
||||
}
|
||||
customStatsErr = fmt.Errorf("%v, continuing to push custom stats", customStatsErr)
|
||||
}
|
||||
return customStats, customStatsErr
|
||||
}
|
||||
|
||||
func (cd *containerData) updateSubcontainers() error {
|
||||
var subcontainers info.ContainerReferenceSlice
|
||||
subcontainers, err := cd.handler.ListContainers(container.ListSelf)
|
||||
if err != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !cd.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
sort.Sort(subcontainers)
|
||||
cd.lock.Lock()
|
||||
defer cd.lock.Unlock()
|
||||
cd.info.Subcontainers = subcontainers
|
||||
return nil
|
||||
}
|
1421
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
Normal file
1421
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
138
vendor/github.com/google/cadvisor/nvm/machine_libipmctl.go
generated
vendored
Normal file
138
vendor/github.com/google/cadvisor/nvm/machine_libipmctl.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
//go:build libipmctl && cgo
|
||||
// +build libipmctl,cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nvm
|
||||
|
||||
// #cgo pkg-config: libipmctl
|
||||
// #include <nvm_management.h>
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
isNVMLibInitialized = false
|
||||
nvmLibMutex = sync.Mutex{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
nvmLibMutex.Lock()
|
||||
defer nvmLibMutex.Unlock()
|
||||
cErr := C.nvm_init()
|
||||
if cErr != C.NVM_SUCCESS {
|
||||
// Unfortunately klog does not seem to work here. I believe it's better to
|
||||
// output information using fmt rather then let it disappear silently.
|
||||
fmt.Printf("libipmctl initialization failed with status %d", cErr)
|
||||
return
|
||||
}
|
||||
isNVMLibInitialized = true
|
||||
}
|
||||
|
||||
// getAvgPowerBudget retrieves configured power budget
|
||||
// (in watts) for NVM devices. When libipmctl is not available
|
||||
// zero is returned.
|
||||
func getAvgPowerBudget() (uint, error) {
|
||||
// Get number of devices on the platform
|
||||
// see: https://github.com/intel/ipmctl/blob/v01.00.00.3497/src/os/nvm_api/nvm_management.h#L1478
|
||||
count := C.uint(0)
|
||||
err := C.nvm_get_number_of_devices(&count)
|
||||
if err != C.NVM_SUCCESS {
|
||||
klog.Warningf("Unable to get number of NVM devices. Status code: %d", err)
|
||||
return uint(0), fmt.Errorf("Unable to get number of NVM devices. Status code: %d", err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
klog.V(4).Infof("There are no NVM devices.")
|
||||
return uint(0), nil
|
||||
}
|
||||
|
||||
// Load basic device information for all the devices
|
||||
// to obtain UID of the first one.
|
||||
devices := make([]C.struct_device_discovery, count)
|
||||
err = C.nvm_get_devices(&devices[0], C.uchar(count))
|
||||
if err != C.NVM_SUCCESS {
|
||||
klog.Warningf("Unable to get all NVM devices. Status code: %d", err)
|
||||
return uint(0), fmt.Errorf("Unable to get all NVM devices. Status code: %d", err)
|
||||
}
|
||||
|
||||
// Power budget is same for all the devices
|
||||
// so we can rely on any of them.
|
||||
device := C.struct_device_details{}
|
||||
err = C.nvm_get_device_details(&devices[0].uid[0], &device)
|
||||
if err != C.NVM_SUCCESS {
|
||||
uid := C.GoString(&devices[0].uid[0])
|
||||
klog.Warningf("Unable to get details of NVM device %q. Status code: %d", uid, err)
|
||||
return uint(0), fmt.Errorf("Unable to get details of NVM device %q. Status code: %d", uid, err)
|
||||
}
|
||||
|
||||
return uint(device.avg_power_budget / 1000), nil
|
||||
}
|
||||
|
||||
// getCapacities retrieves the total NVM capacity in bytes for memory mode and app direct mode
|
||||
func getCapacities() (uint64, uint64, error) {
|
||||
caps := C.struct_device_capacities{}
|
||||
err := C.nvm_get_nvm_capacities(&caps)
|
||||
if err != C.NVM_SUCCESS {
|
||||
klog.Warningf("Unable to get NVM capacity. Status code: %d", err)
|
||||
return uint64(0), uint64(0), fmt.Errorf("Unable to get NVM capacity. Status code: %d", err)
|
||||
}
|
||||
return uint64(caps.memory_capacity), uint64(caps.app_direct_capacity), nil
|
||||
}
|
||||
|
||||
// GetInfo returns information specific for non-volatile memory modules
|
||||
func GetInfo() (info.NVMInfo, error) {
|
||||
nvmLibMutex.Lock()
|
||||
defer nvmLibMutex.Unlock()
|
||||
|
||||
nvmInfo := info.NVMInfo{}
|
||||
if !isNVMLibInitialized {
|
||||
klog.V(1).Info("libipmctl has not been initialized. NVM information will not be available")
|
||||
return nvmInfo, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
nvmInfo.MemoryModeCapacity, nvmInfo.AppDirectModeCapacity, err = getCapacities()
|
||||
if err != nil {
|
||||
return info.NVMInfo{}, fmt.Errorf("Unable to get NVM capacities, err: %s", err)
|
||||
}
|
||||
|
||||
nvmInfo.AvgPowerBudget, err = getAvgPowerBudget()
|
||||
if err != nil {
|
||||
return info.NVMInfo{}, fmt.Errorf("Unable to get NVM average power budget, err: %s", err)
|
||||
}
|
||||
return nvmInfo, nil
|
||||
}
|
||||
|
||||
// Finalize un-initializes libipmctl. See https://github.com/google/cadvisor/issues/2457.
|
||||
func Finalize() {
|
||||
nvmLibMutex.Lock()
|
||||
defer nvmLibMutex.Unlock()
|
||||
|
||||
klog.V(1).Info("Attempting to un-initialize libipmctl")
|
||||
if !isNVMLibInitialized {
|
||||
klog.V(1).Info("libipmctl has not been initialized; not un-initializing.")
|
||||
return
|
||||
}
|
||||
|
||||
C.nvm_uninit()
|
||||
isNVMLibInitialized = false
|
||||
}
|
36
vendor/github.com/google/cadvisor/nvm/machine_no_libipmctl.go
generated
vendored
Normal file
36
vendor/github.com/google/cadvisor/nvm/machine_no_libipmctl.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
//go:build !libipmctl || !cgo
|
||||
// +build !libipmctl !cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nvm
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
// GetInfo returns information specific for non-volatile memory modules.
|
||||
// When libipmctl is not available zero value is returned.
|
||||
func GetInfo() (info.NVMInfo, error) {
|
||||
return info.NVMInfo{}, nil
|
||||
}
|
||||
|
||||
// Finalize un-initializes libipmctl. See https://github.com/google/cadvisor/issues/2457.
|
||||
// When libipmctl is not available it just logs that it's being called.
|
||||
func Finalize() {
|
||||
klog.V(4).Info("libipmctl not available, doing nothing.")
|
||||
}
|
456
vendor/github.com/google/cadvisor/perf/collector_libpfm.go
generated
vendored
Normal file
456
vendor/github.com/google/cadvisor/perf/collector_libpfm.go
generated
vendored
Normal file
@ -0,0 +1,456 @@
|
||||
//go:build libpfm && cgo
|
||||
// +build libpfm,cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Collector of perf events for a container.
|
||||
package perf
|
||||
|
||||
// #cgo CFLAGS: -I/usr/include
|
||||
// #cgo LDFLAGS: -lpfm
|
||||
// #include <perfmon/pfmlib.h>
|
||||
// #include <stdlib.h>
|
||||
// #include <string.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/stats"
|
||||
)
|
||||
|
||||
type collector struct {
|
||||
cgroupPath string
|
||||
events PerfEvents
|
||||
cpuFiles map[int]group
|
||||
cpuFilesLock sync.Mutex
|
||||
onlineCPUs []int
|
||||
eventToCustomEvent map[Event]*CustomEvent
|
||||
uncore stats.Collector
|
||||
|
||||
// Handle for mocking purposes.
|
||||
perfEventOpen func(attr *unix.PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
|
||||
ioctlSetInt func(fd int, req uint, value int) error
|
||||
}
|
||||
|
||||
type group struct {
|
||||
cpuFiles map[string]map[int]readerCloser
|
||||
names []string
|
||||
leaderName string
|
||||
}
|
||||
|
||||
var (
|
||||
isLibpfmInitialized = false
|
||||
libpfmMutex = sync.Mutex{}
|
||||
)
|
||||
|
||||
const (
|
||||
groupLeaderFileDescriptor = -1
|
||||
)
|
||||
|
||||
func init() {
|
||||
libpfmMutex.Lock()
|
||||
defer libpfmMutex.Unlock()
|
||||
pErr := C.pfm_initialize()
|
||||
if pErr != C.PFM_SUCCESS {
|
||||
klog.Errorf("unable to initialize libpfm: %d", int(pErr))
|
||||
return
|
||||
}
|
||||
isLibpfmInitialized = true
|
||||
}
|
||||
|
||||
func newCollector(cgroupPath string, events PerfEvents, onlineCPUs []int, cpuToSocket map[int]int) *collector {
|
||||
collector := &collector{cgroupPath: cgroupPath, events: events, onlineCPUs: onlineCPUs, cpuFiles: map[int]group{}, uncore: NewUncoreCollector(cgroupPath, events, cpuToSocket), perfEventOpen: unix.PerfEventOpen, ioctlSetInt: unix.IoctlSetInt}
|
||||
mapEventsToCustomEvents(collector)
|
||||
return collector
|
||||
}
|
||||
|
||||
func (c *collector) UpdateStats(stats *info.ContainerStats) error {
|
||||
err := c.uncore.UpdateStats(stats)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get uncore perf event stats: %v", err)
|
||||
}
|
||||
|
||||
c.cpuFilesLock.Lock()
|
||||
defer c.cpuFilesLock.Unlock()
|
||||
|
||||
stats.PerfStats = []info.PerfStat{}
|
||||
klog.V(5).Infof("Attempting to update perf_event stats from cgroup %q", c.cgroupPath)
|
||||
|
||||
for _, group := range c.cpuFiles {
|
||||
for cpu, file := range group.cpuFiles[group.leaderName] {
|
||||
stat, err := readGroupPerfStat(file, group, cpu, c.cgroupPath)
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to read from perf_event_file (event: %q, CPU: %d) for %q: %q", group.leaderName, cpu, c.cgroupPath, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
stats.PerfStats = append(stats.PerfStats, stat...)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readGroupPerfStat(file readerCloser, group group, cpu int, cgroupPath string) ([]info.PerfStat, error) {
|
||||
values, err := getPerfValues(file, group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
perfStats := make([]info.PerfStat, len(values))
|
||||
for i, value := range values {
|
||||
klog.V(5).Infof("Read metric for event %q for cpu %d from cgroup %q: %d", value.Name, cpu, cgroupPath, value.Value)
|
||||
perfStats[i] = info.PerfStat{
|
||||
PerfValue: value,
|
||||
Cpu: cpu,
|
||||
}
|
||||
}
|
||||
|
||||
return perfStats, nil
|
||||
}
|
||||
|
||||
func getPerfValues(file readerCloser, group group) ([]info.PerfValue, error) {
|
||||
// 24 bytes of GroupReadFormat struct.
|
||||
// 16 bytes of Values struct for each element in group.
|
||||
// See https://man7.org/linux/man-pages/man2/perf_event_open.2.html section "Reading results" with PERF_FORMAT_GROUP specified.
|
||||
buf := make([]byte, 24+16*len(group.names))
|
||||
_, err := file.Read(buf)
|
||||
if err != nil {
|
||||
return []info.PerfValue{}, fmt.Errorf("unable to read perf event group ( leader = %s ): %w", group.leaderName, err)
|
||||
}
|
||||
perfData := &GroupReadFormat{}
|
||||
reader := bytes.NewReader(buf[:24])
|
||||
err = binary.Read(reader, binary.LittleEndian, perfData)
|
||||
if err != nil {
|
||||
return []info.PerfValue{}, fmt.Errorf("unable to decode perf event group ( leader = %s ): %w", group.leaderName, err)
|
||||
}
|
||||
values := make([]Values, perfData.Nr)
|
||||
reader = bytes.NewReader(buf[24:])
|
||||
err = binary.Read(reader, binary.LittleEndian, values)
|
||||
if err != nil {
|
||||
return []info.PerfValue{}, fmt.Errorf("unable to decode perf event group values ( leader = %s ): %w", group.leaderName, err)
|
||||
}
|
||||
|
||||
scalingRatio := 1.0
|
||||
if perfData.TimeRunning != 0 && perfData.TimeEnabled != 0 {
|
||||
scalingRatio = float64(perfData.TimeRunning) / float64(perfData.TimeEnabled)
|
||||
}
|
||||
|
||||
perfValues := make([]info.PerfValue, perfData.Nr)
|
||||
if scalingRatio != float64(0) {
|
||||
for i, name := range group.names {
|
||||
perfValues[i] = info.PerfValue{
|
||||
ScalingRatio: scalingRatio,
|
||||
Value: uint64(float64(values[i].Value) / scalingRatio),
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i, name := range group.names {
|
||||
perfValues[i] = info.PerfValue{
|
||||
ScalingRatio: scalingRatio,
|
||||
Value: values[i].Value,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return perfValues, nil
|
||||
}
|
||||
|
||||
func (c *collector) setup() error {
|
||||
cgroup, err := os.Open(c.cgroupPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open cgroup directory %s: %s", c.cgroupPath, err)
|
||||
}
|
||||
defer cgroup.Close()
|
||||
|
||||
c.cpuFilesLock.Lock()
|
||||
defer c.cpuFilesLock.Unlock()
|
||||
cgroupFd := int(cgroup.Fd())
|
||||
groupIndex := 0
|
||||
for _, group := range c.events.Core.Events {
|
||||
// CPUs file descriptors of group leader needed for perf_event_open.
|
||||
leaderFileDescriptors := make(map[int]int, len(c.onlineCPUs))
|
||||
for _, cpu := range c.onlineCPUs {
|
||||
leaderFileDescriptors[cpu] = groupLeaderFileDescriptor
|
||||
}
|
||||
|
||||
leaderFileDescriptors, err := c.createLeaderFileDescriptors(group.events, cgroupFd, groupIndex, leaderFileDescriptors)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot count perf event group %v: %v", group.events, err)
|
||||
c.deleteGroup(groupIndex)
|
||||
continue
|
||||
} else {
|
||||
groupIndex++
|
||||
}
|
||||
|
||||
// Group is prepared so we should reset and enable counting.
|
||||
for _, fd := range leaderFileDescriptors {
|
||||
err = c.ioctlSetInt(fd, unix.PERF_EVENT_IOC_RESET, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.ioctlSetInt(fd, unix.PERF_EVENT_IOC_ENABLE, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) createLeaderFileDescriptors(events []Event, cgroupFd int, groupIndex int, leaderFileDescriptors map[int]int) (map[int]int, error) {
|
||||
for j, event := range events {
|
||||
// First element is group leader.
|
||||
isGroupLeader := j == 0
|
||||
customEvent, ok := c.eventToCustomEvent[event]
|
||||
var err error
|
||||
if ok {
|
||||
config := c.createConfigFromRawEvent(customEvent)
|
||||
leaderFileDescriptors, err = c.registerEvent(eventInfo{string(customEvent.Name), config, cgroupFd, groupIndex, isGroupLeader}, leaderFileDescriptors)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot register perf event: %v", err)
|
||||
}
|
||||
} else {
|
||||
config, err := c.createConfigFromEvent(event)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create config from perf event: %v", err)
|
||||
|
||||
}
|
||||
leaderFileDescriptors, err = c.registerEvent(eventInfo{string(event), config, cgroupFd, groupIndex, isGroupLeader}, leaderFileDescriptors)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot register perf event: %v", err)
|
||||
}
|
||||
// Clean memory allocated by C code.
|
||||
C.free(unsafe.Pointer(config))
|
||||
}
|
||||
}
|
||||
return leaderFileDescriptors, nil
|
||||
}
|
||||
|
||||
func readPerfEventAttr(name string, pfmGetOsEventEncoding func(string, unsafe.Pointer) error) (*unix.PerfEventAttr, error) {
|
||||
perfEventAttrMemory := C.malloc(C.size_t(unsafe.Sizeof(unix.PerfEventAttr{})))
|
||||
// Fill memory with 0 values.
|
||||
C.memset(perfEventAttrMemory, 0, C.size_t(unsafe.Sizeof(unix.PerfEventAttr{})))
|
||||
err := pfmGetOsEventEncoding(name, unsafe.Pointer(perfEventAttrMemory))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return (*unix.PerfEventAttr)(perfEventAttrMemory), nil
|
||||
}
|
||||
|
||||
func pfmGetOsEventEncoding(name string, perfEventAttrMemory unsafe.Pointer) error {
|
||||
event := pfmPerfEncodeArgT{}
|
||||
fstr := C.CString("")
|
||||
defer C.free(unsafe.Pointer(fstr))
|
||||
event.fstr = unsafe.Pointer(fstr)
|
||||
event.attr = perfEventAttrMemory
|
||||
event.size = C.size_t(unsafe.Sizeof(event))
|
||||
cSafeName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cSafeName))
|
||||
pErr := C.pfm_get_os_event_encoding(cSafeName, C.PFM_PLM0|C.PFM_PLM3, C.PFM_OS_PERF_EVENT, unsafe.Pointer(&event))
|
||||
if pErr != C.PFM_SUCCESS {
|
||||
return fmt.Errorf("unable to transform event name %s to perf_event_attr: %d", name, int(pErr))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type eventInfo struct {
|
||||
name string
|
||||
config *unix.PerfEventAttr
|
||||
pid int
|
||||
groupIndex int
|
||||
isGroupLeader bool
|
||||
}
|
||||
|
||||
func (c *collector) registerEvent(event eventInfo, leaderFileDescriptors map[int]int) (map[int]int, error) {
|
||||
newLeaderFileDescriptors := make(map[int]int, len(c.onlineCPUs))
|
||||
var pid, flags int
|
||||
if event.isGroupLeader {
|
||||
pid = event.pid
|
||||
flags = unix.PERF_FLAG_FD_CLOEXEC | unix.PERF_FLAG_PID_CGROUP
|
||||
} else {
|
||||
pid = -1
|
||||
flags = unix.PERF_FLAG_FD_CLOEXEC
|
||||
}
|
||||
|
||||
setAttributes(event.config, event.isGroupLeader)
|
||||
|
||||
for _, cpu := range c.onlineCPUs {
|
||||
fd, err := c.perfEventOpen(event.config, pid, cpu, leaderFileDescriptors[cpu], flags)
|
||||
if err != nil {
|
||||
return leaderFileDescriptors, fmt.Errorf("setting up perf event %#v failed: %q", event.config, err)
|
||||
}
|
||||
perfFile := os.NewFile(uintptr(fd), event.name)
|
||||
if perfFile == nil {
|
||||
return leaderFileDescriptors, fmt.Errorf("unable to create os.File from file descriptor %#v", fd)
|
||||
}
|
||||
|
||||
c.addEventFile(event.groupIndex, event.name, cpu, perfFile)
|
||||
|
||||
// If group leader, save fd for others.
|
||||
if event.isGroupLeader {
|
||||
newLeaderFileDescriptors[cpu] = fd
|
||||
}
|
||||
}
|
||||
|
||||
if event.isGroupLeader {
|
||||
return newLeaderFileDescriptors, nil
|
||||
}
|
||||
return leaderFileDescriptors, nil
|
||||
}
|
||||
|
||||
func (c *collector) addEventFile(index int, name string, cpu int, perfFile *os.File) {
|
||||
_, ok := c.cpuFiles[index]
|
||||
if !ok {
|
||||
c.cpuFiles[index] = group{
|
||||
leaderName: name,
|
||||
cpuFiles: map[string]map[int]readerCloser{},
|
||||
}
|
||||
}
|
||||
|
||||
_, ok = c.cpuFiles[index].cpuFiles[name]
|
||||
if !ok {
|
||||
c.cpuFiles[index].cpuFiles[name] = map[int]readerCloser{}
|
||||
}
|
||||
|
||||
c.cpuFiles[index].cpuFiles[name][cpu] = perfFile
|
||||
|
||||
// Check if name is already stored.
|
||||
for _, have := range c.cpuFiles[index].names {
|
||||
if name == have {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise save it.
|
||||
c.cpuFiles[index] = group{
|
||||
cpuFiles: c.cpuFiles[index].cpuFiles,
|
||||
names: append(c.cpuFiles[index].names, name),
|
||||
leaderName: c.cpuFiles[index].leaderName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *collector) deleteGroup(index int) {
|
||||
for name, files := range c.cpuFiles[index].cpuFiles {
|
||||
for cpu, file := range files {
|
||||
klog.V(5).Infof("Closing perf event file descriptor for cgroup %q, event %q and CPU %d", c.cgroupPath, name, cpu)
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to close perf event file descriptor for cgroup %q, event %q and CPU %d", c.cgroupPath, name, cpu)
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(c.cpuFiles, index)
|
||||
}
|
||||
|
||||
func createPerfEventAttr(event CustomEvent) *unix.PerfEventAttr {
|
||||
length := len(event.Config)
|
||||
|
||||
config := &unix.PerfEventAttr{
|
||||
Type: event.Type,
|
||||
Config: event.Config[0],
|
||||
}
|
||||
if length >= 2 {
|
||||
config.Ext1 = event.Config[1]
|
||||
}
|
||||
if length == 3 {
|
||||
config.Ext2 = event.Config[2]
|
||||
}
|
||||
|
||||
klog.V(5).Infof("perf_event_attr struct prepared: %#v", config)
|
||||
return config
|
||||
}
|
||||
|
||||
func setAttributes(config *unix.PerfEventAttr, leader bool) {
|
||||
config.Sample_type = unix.PERF_SAMPLE_IDENTIFIER
|
||||
config.Read_format = unix.PERF_FORMAT_TOTAL_TIME_ENABLED | unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_GROUP | unix.PERF_FORMAT_ID
|
||||
config.Bits = unix.PerfBitInherit
|
||||
|
||||
// Group leader should have this flag set to disable counting until all group would be prepared.
|
||||
if leader {
|
||||
config.Bits |= unix.PerfBitDisabled
|
||||
}
|
||||
|
||||
config.Size = uint32(unsafe.Sizeof(unix.PerfEventAttr{}))
|
||||
}
|
||||
|
||||
func (c *collector) Destroy() {
|
||||
c.uncore.Destroy()
|
||||
c.cpuFilesLock.Lock()
|
||||
defer c.cpuFilesLock.Unlock()
|
||||
|
||||
for i := range c.cpuFiles {
|
||||
c.deleteGroup(i)
|
||||
}
|
||||
}
|
||||
|
||||
// Finalize terminates libpfm4 to free resources.
|
||||
func Finalize() {
|
||||
libpfmMutex.Lock()
|
||||
defer libpfmMutex.Unlock()
|
||||
|
||||
klog.V(1).Info("Attempting to terminate libpfm4")
|
||||
if !isLibpfmInitialized {
|
||||
klog.V(1).Info("libpfm4 has not been initialized; not terminating.")
|
||||
return
|
||||
}
|
||||
|
||||
C.pfm_terminate()
|
||||
isLibpfmInitialized = false
|
||||
}
|
||||
|
||||
func mapEventsToCustomEvents(collector *collector) {
|
||||
collector.eventToCustomEvent = map[Event]*CustomEvent{}
|
||||
for key, event := range collector.events.Core.CustomEvents {
|
||||
collector.eventToCustomEvent[event.Name] = &collector.events.Core.CustomEvents[key]
|
||||
}
|
||||
}
|
||||
|
||||
func (c *collector) createConfigFromRawEvent(event *CustomEvent) *unix.PerfEventAttr {
|
||||
klog.V(5).Infof("Setting up raw perf event %#v", event)
|
||||
|
||||
config := createPerfEventAttr(*event)
|
||||
|
||||
klog.V(5).Infof("perf_event_attr: %#v", config)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func (c *collector) createConfigFromEvent(event Event) (*unix.PerfEventAttr, error) {
|
||||
klog.V(5).Infof("Setting up perf event %s", string(event))
|
||||
|
||||
config, err := readPerfEventAttr(string(event), pfmGetOsEventEncoding)
|
||||
if err != nil {
|
||||
C.free((unsafe.Pointer)(config))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(5).Infof("perf_event_attr: %#v", config)
|
||||
|
||||
return config, nil
|
||||
}
|
34
vendor/github.com/google/cadvisor/perf/collector_no_libpfm.go
generated
vendored
Normal file
34
vendor/github.com/google/cadvisor/perf/collector_no_libpfm.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
//go:build !libpfm || !cgo
|
||||
// +build !libpfm !cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Collector of perf events for a container.
|
||||
package perf
|
||||
|
||||
import (
|
||||
"github.com/google/cadvisor/stats"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func NewCollector(cgroupPath string, events Events, numCores int) stats.Collector {
|
||||
return &stats.NoopCollector{}
|
||||
}
|
||||
|
||||
// Finalize terminates libpfm4 to free resources.
|
||||
func Finalize() {
|
||||
klog.V(1).Info("cAdvisor is build without cgo and/or libpfm support. Nothing to be finalized")
|
||||
}
|
127
vendor/github.com/google/cadvisor/perf/config.go
generated
vendored
Normal file
127
vendor/github.com/google/cadvisor/perf/config.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Configuration for perf event manager.
|
||||
package perf
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type PerfEvents struct {
|
||||
// Core perf events to be measured.
|
||||
Core Events `json:"core,omitempty"`
|
||||
|
||||
// Uncore perf events to be measured.
|
||||
Uncore Events `json:"uncore,omitempty"`
|
||||
}
|
||||
|
||||
type Events struct {
|
||||
// List of perf events' names to be measured.
|
||||
Events []Group `json:"events"`
|
||||
|
||||
// List of custom perf events' to be measured. It is impossible to
|
||||
// specify some events using their names and in such case you have
|
||||
// to provide lower level configuration.
|
||||
CustomEvents []CustomEvent `json:"custom_events"`
|
||||
}
|
||||
|
||||
type Event string
|
||||
|
||||
type CustomEvent struct {
|
||||
// Type of the event. See perf_event_attr documentation
|
||||
// at man perf_event_open.
|
||||
Type uint32 `json:"type,omitempty"`
|
||||
|
||||
// Symbolically formed event like:
|
||||
// pmu/config=PerfEvent.Config[0],config1=PerfEvent.Config[1],config2=PerfEvent.Config[2]
|
||||
// as described in man perf-stat.
|
||||
Config Config `json:"config"`
|
||||
|
||||
// Human readable name of metric that will be created from the event.
|
||||
Name Event `json:"name"`
|
||||
}
|
||||
|
||||
type Config []uint64
|
||||
|
||||
func (c *Config) UnmarshalJSON(b []byte) error {
|
||||
config := []string{}
|
||||
err := json.Unmarshal(b, &config)
|
||||
if err != nil {
|
||||
klog.Errorf("Unmarshalling %s into slice of strings failed: %q", b, err)
|
||||
return fmt.Errorf("unmarshalling %s into slice of strings failed: %q", b, err)
|
||||
}
|
||||
intermediate := []uint64{}
|
||||
for _, v := range config {
|
||||
uintValue, err := strconv.ParseUint(v, 0, 64)
|
||||
if err != nil {
|
||||
klog.Errorf("Parsing %#v into uint64 failed: %q", v, err)
|
||||
return fmt.Errorf("parsing %#v into uint64 failed: %q", v, err)
|
||||
}
|
||||
intermediate = append(intermediate, uintValue)
|
||||
}
|
||||
*c = intermediate
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseConfig(file *os.File) (events PerfEvents, err error) {
|
||||
decoder := json.NewDecoder(file)
|
||||
err = decoder.Decode(&events)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to load perf events configuration from %q: %q", file.Name(), err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
events []Event
|
||||
array bool
|
||||
}
|
||||
|
||||
func (g *Group) UnmarshalJSON(b []byte) error {
|
||||
var jsonObj interface{}
|
||||
err := json.Unmarshal(b, &jsonObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch obj := jsonObj.(type) {
|
||||
case string:
|
||||
*g = Group{
|
||||
events: []Event{Event(obj)},
|
||||
array: false,
|
||||
}
|
||||
return nil
|
||||
case []interface{}:
|
||||
group := Group{
|
||||
events: make([]Event, 0, len(obj)),
|
||||
array: true,
|
||||
}
|
||||
for _, v := range obj {
|
||||
value, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot unmarshal %v", value)
|
||||
}
|
||||
group.events = append(group.events, Event(value))
|
||||
}
|
||||
*g = group
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported type")
|
||||
}
|
75
vendor/github.com/google/cadvisor/perf/manager_libpfm.go
generated
vendored
Normal file
75
vendor/github.com/google/cadvisor/perf/manager_libpfm.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
//go:build libpfm && cgo
|
||||
// +build libpfm,cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Manager of perf events for containers.
|
||||
package perf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/stats"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
)
|
||||
|
||||
type manager struct {
|
||||
events PerfEvents
|
||||
onlineCPUs []int
|
||||
cpuToSocket map[int]int
|
||||
stats.NoopDestroy
|
||||
}
|
||||
|
||||
func NewManager(configFile string, topology []info.Node) (stats.Manager, error) {
|
||||
if configFile == "" {
|
||||
return &stats.NoopManager{}, nil
|
||||
}
|
||||
|
||||
file, err := os.Open(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read configuration file %q: %w", configFile, err)
|
||||
}
|
||||
|
||||
config, err := parseConfig(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse configuration file %q: %w", configFile, err)
|
||||
}
|
||||
|
||||
if len(config.Core.Events) == 0 && len(config.Uncore.Events) == 0 {
|
||||
return nil, fmt.Errorf("there is no events in config file %q", configFile)
|
||||
}
|
||||
|
||||
onlineCPUs := sysinfo.GetOnlineCPUs(topology)
|
||||
|
||||
cpuToSocket := make(map[int]int)
|
||||
|
||||
for _, cpu := range onlineCPUs {
|
||||
cpuToSocket[cpu] = sysinfo.GetSocketFromCPU(topology, cpu)
|
||||
}
|
||||
|
||||
return &manager{events: config, onlineCPUs: onlineCPUs, cpuToSocket: cpuToSocket}, nil
|
||||
}
|
||||
|
||||
func (m *manager) GetCollector(cgroupPath string) (stats.Collector, error) {
|
||||
collector := newCollector(cgroupPath, m.events, m.onlineCPUs, m.cpuToSocket)
|
||||
err := collector.setup()
|
||||
if err != nil {
|
||||
collector.Destroy()
|
||||
return &stats.NoopCollector{}, err
|
||||
}
|
||||
return collector, nil
|
||||
}
|
31
vendor/github.com/google/cadvisor/perf/manager_no_libpfm.go
generated
vendored
Normal file
31
vendor/github.com/google/cadvisor/perf/manager_no_libpfm.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
//go:build !libpfm || !cgo
|
||||
// +build !libpfm !cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Manager of perf events for containers.
|
||||
package perf
|
||||
|
||||
import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/stats"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func NewManager(configFile string, topology []info.Node) (stats.Manager, error) {
|
||||
klog.V(1).Info("cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available.")
|
||||
return &stats.NoopManager{}, nil
|
||||
}
|
54
vendor/github.com/google/cadvisor/perf/types_libpfm.go
generated
vendored
Normal file
54
vendor/github.com/google/cadvisor/perf/types_libpfm.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
//go:build libpfm && cgo
|
||||
// +build libpfm,cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Types related to handling perf events that are missing from unix package.
|
||||
package perf
|
||||
|
||||
import "C"
|
||||
import (
|
||||
"io"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// GroupReadFormat allows to read perf event's values for grouped events.
|
||||
// See https://man7.org/linux/man-pages/man2/perf_event_open.2.html section "Reading results" with PERF_FORMAT_GROUP specified.
|
||||
type GroupReadFormat struct {
|
||||
Nr uint64 /* The number of events */
|
||||
TimeEnabled uint64 /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
|
||||
TimeRunning uint64 /* if PERF_FORMAT_TOTAL_TIME_RUNNING */
|
||||
}
|
||||
|
||||
type Values struct {
|
||||
Value uint64 /* The value of the event */
|
||||
ID uint64 /* if PERF_FORMAT_ID */
|
||||
}
|
||||
|
||||
// pfmPerfEncodeArgT represents structure that is used to parse perf event nam
|
||||
// into perf_event_attr using libpfm.
|
||||
type pfmPerfEncodeArgT struct {
|
||||
attr unsafe.Pointer
|
||||
fstr unsafe.Pointer
|
||||
size C.size_t
|
||||
_ C.int // idx
|
||||
_ C.int // cpu
|
||||
_ C.int // flags
|
||||
}
|
||||
|
||||
type readerCloser interface {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
519
vendor/github.com/google/cadvisor/perf/uncore_libpfm.go
generated
vendored
Normal file
519
vendor/github.com/google/cadvisor/perf/uncore_libpfm.go
generated
vendored
Normal file
@ -0,0 +1,519 @@
|
||||
//go:build libpfm && cgo
|
||||
// +build libpfm,cgo
|
||||
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Uncore perf events logic.
|
||||
package perf
|
||||
|
||||
// #cgo CFLAGS: -I/usr/include
|
||||
// #cgo LDFLAGS: -lpfm
|
||||
// #include <perfmon/pfmlib.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/stats"
|
||||
)
|
||||
|
||||
type pmu struct {
|
||||
name string
|
||||
typeOf uint32
|
||||
cpus []uint32
|
||||
}
|
||||
|
||||
const (
|
||||
uncorePMUPrefix = "uncore"
|
||||
pmuTypeFilename = "type"
|
||||
pmuCpumaskFilename = "cpumask"
|
||||
systemDevicesPath = "/sys/devices"
|
||||
rootPerfEventPath = "/sys/fs/cgroup/perf_event"
|
||||
uncorePID = -1
|
||||
)
|
||||
|
||||
func getPMU(pmus uncorePMUs, gotType uint32) (*pmu, error) {
|
||||
for _, pmu := range pmus {
|
||||
if pmu.typeOf == gotType {
|
||||
return &pmu, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("there is no pmu with event type: %#v", gotType)
|
||||
}
|
||||
|
||||
type uncorePMUs map[string]pmu
|
||||
|
||||
func readUncorePMU(path string, name string, cpumaskRegexp *regexp.Regexp) (*pmu, error) {
|
||||
buf, err := os.ReadFile(filepath.Join(path, pmuTypeFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
typeString := strings.TrimSpace(string(buf))
|
||||
eventType, err := strconv.ParseUint(typeString, 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err = os.ReadFile(filepath.Join(path, pmuCpumaskFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cpus []uint32
|
||||
cpumask := strings.TrimSpace(string(buf))
|
||||
for _, cpu := range cpumaskRegexp.Split(cpumask, -1) {
|
||||
parsedCPU, err := strconv.ParseUint(cpu, 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpus = append(cpus, uint32(parsedCPU))
|
||||
}
|
||||
|
||||
return &pmu{name: name, typeOf: uint32(eventType), cpus: cpus}, nil
|
||||
}
|
||||
|
||||
func getUncorePMUs(devicesPath string) (uncorePMUs, error) {
|
||||
pmus := make(uncorePMUs)
|
||||
|
||||
// Depends on platform, cpu mask could be for example in form "0-1" or "0,1".
|
||||
cpumaskRegexp := regexp.MustCompile("[-,\n]")
|
||||
err := filepath.Walk(devicesPath, func(path string, info os.FileInfo, err error) error {
|
||||
// Skip root path.
|
||||
if path == devicesPath {
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
if strings.HasPrefix(info.Name(), uncorePMUPrefix) {
|
||||
pmu, err := readUncorePMU(path, info.Name(), cpumaskRegexp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pmus[info.Name()] = *pmu
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pmus, nil
|
||||
}
|
||||
|
||||
type uncoreCollector struct {
|
||||
cpuFilesLock sync.Mutex
|
||||
cpuFiles map[int]map[string]group
|
||||
events []Group
|
||||
eventToCustomEvent map[Event]*CustomEvent
|
||||
cpuToSocket map[int]int
|
||||
|
||||
// Handle for mocking purposes.
|
||||
perfEventOpen func(attr *unix.PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
|
||||
ioctlSetInt func(fd int, req uint, value int) error
|
||||
}
|
||||
|
||||
func NewUncoreCollector(cgroupPath string, events PerfEvents, cpuToSocket map[int]int) stats.Collector {
|
||||
|
||||
if cgroupPath != rootPerfEventPath {
|
||||
// Uncore metric doesn't exists for cgroups, only for entire platform.
|
||||
return &stats.NoopCollector{}
|
||||
}
|
||||
|
||||
collector := &uncoreCollector{
|
||||
cpuToSocket: cpuToSocket,
|
||||
perfEventOpen: unix.PerfEventOpen,
|
||||
ioctlSetInt: unix.IoctlSetInt,
|
||||
}
|
||||
|
||||
err := collector.setup(events, systemDevicesPath)
|
||||
if err != nil {
|
||||
klog.Errorf("Perf uncore metrics will not be available: unable to setup uncore perf event collector: %v", err)
|
||||
return &stats.NoopCollector{}
|
||||
}
|
||||
|
||||
return collector
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) createLeaderFileDescriptors(events []Event, groupIndex int, groupPMUs map[Event]uncorePMUs,
|
||||
leaderFileDescriptors map[string]map[uint32]int) (map[string]map[uint32]int, error) {
|
||||
var err error
|
||||
for _, event := range events {
|
||||
eventName, _ := parseEventName(string(event))
|
||||
customEvent, ok := c.eventToCustomEvent[event]
|
||||
if ok {
|
||||
err = c.setupRawEvent(customEvent, groupPMUs[event], groupIndex, leaderFileDescriptors)
|
||||
} else {
|
||||
err = c.setupEvent(eventName, groupPMUs[event], groupIndex, leaderFileDescriptors)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
c.deleteGroup(groupIndex)
|
||||
return nil, fmt.Errorf("cannot create config from perf event: %v", err)
|
||||
}
|
||||
return leaderFileDescriptors, nil
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) setup(events PerfEvents, devicesPath string) error {
|
||||
readUncorePMUs, err := getUncorePMUs(devicesPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.cpuFiles = make(map[int]map[string]group)
|
||||
c.events = events.Uncore.Events
|
||||
c.eventToCustomEvent = parseUncoreEvents(events.Uncore)
|
||||
c.cpuFilesLock.Lock()
|
||||
defer c.cpuFilesLock.Unlock()
|
||||
|
||||
for i, group := range c.events {
|
||||
// Check what PMUs are needed.
|
||||
groupPMUs, err := parsePMUs(group, readUncorePMUs, c.eventToCustomEvent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkGroup(group, groupPMUs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// CPUs file descriptors of group leader needed for perf_event_open.
|
||||
leaderFileDescriptors := make(map[string]map[uint32]int)
|
||||
for _, pmu := range readUncorePMUs {
|
||||
leaderFileDescriptors[pmu.name] = make(map[uint32]int)
|
||||
for _, cpu := range pmu.cpus {
|
||||
leaderFileDescriptors[pmu.name][cpu] = groupLeaderFileDescriptor
|
||||
}
|
||||
}
|
||||
leaderFileDescriptors, err = c.createLeaderFileDescriptors(group.events, i, groupPMUs, leaderFileDescriptors)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
continue
|
||||
}
|
||||
// Group is prepared so we should reset and enable counting.
|
||||
for _, pmuCPUs := range leaderFileDescriptors {
|
||||
for _, fd := range pmuCPUs {
|
||||
// Call only for used PMUs.
|
||||
if fd != groupLeaderFileDescriptor {
|
||||
err = c.ioctlSetInt(fd, unix.PERF_EVENT_IOC_RESET, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.ioctlSetInt(fd, unix.PERF_EVENT_IOC_ENABLE, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkGroup(group Group, eventPMUs map[Event]uncorePMUs) error {
|
||||
if group.array {
|
||||
var pmu uncorePMUs
|
||||
for _, event := range group.events {
|
||||
if len(eventPMUs[event]) > 1 {
|
||||
return fmt.Errorf("the events in group usually have to be from single PMU, try reorganizing the \"%v\" group", group.events)
|
||||
}
|
||||
if len(eventPMUs[event]) == 1 {
|
||||
if pmu == nil {
|
||||
pmu = eventPMUs[event]
|
||||
continue
|
||||
}
|
||||
|
||||
eq := reflect.DeepEqual(pmu, eventPMUs[event])
|
||||
if !eq {
|
||||
return fmt.Errorf("the events in group usually have to be from the same PMU, try reorganizing the \"%v\" group", group.events)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if len(eventPMUs[group.events[0]]) < 1 {
|
||||
return fmt.Errorf("the event %q don't have any PMU to count with", group.events[0])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseEventName(eventName string) (string, string) {
|
||||
// First "/" separate pmu prefix and event name
|
||||
// ex. "uncore_imc_0/cas_count_read" -> uncore_imc_0 and cas_count_read.
|
||||
splittedEvent := strings.SplitN(eventName, "/", 2)
|
||||
var pmuPrefix = ""
|
||||
if len(splittedEvent) == 2 {
|
||||
pmuPrefix = splittedEvent[0]
|
||||
eventName = splittedEvent[1]
|
||||
}
|
||||
return eventName, pmuPrefix
|
||||
}
|
||||
|
||||
func parsePMUs(group Group, pmus uncorePMUs, customEvents map[Event]*CustomEvent) (map[Event]uncorePMUs, error) {
|
||||
eventPMUs := make(map[Event]uncorePMUs)
|
||||
for _, event := range group.events {
|
||||
_, prefix := parseEventName(string(event))
|
||||
custom, ok := customEvents[event]
|
||||
if ok {
|
||||
if custom.Type != 0 {
|
||||
pmu, err := getPMU(pmus, custom.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventPMUs[event] = uncorePMUs{pmu.name: *pmu}
|
||||
continue
|
||||
}
|
||||
}
|
||||
eventPMUs[event] = obtainPMUs(prefix, pmus)
|
||||
}
|
||||
|
||||
return eventPMUs, nil
|
||||
}
|
||||
|
||||
func obtainPMUs(want string, gotPMUs uncorePMUs) uncorePMUs {
|
||||
pmus := make(uncorePMUs)
|
||||
if want == "" {
|
||||
return pmus
|
||||
}
|
||||
for _, pmu := range gotPMUs {
|
||||
if strings.HasPrefix(pmu.name, want) {
|
||||
pmus[pmu.name] = pmu
|
||||
}
|
||||
}
|
||||
|
||||
return pmus
|
||||
}
|
||||
|
||||
func parseUncoreEvents(events Events) map[Event]*CustomEvent {
|
||||
eventToCustomEvent := map[Event]*CustomEvent{}
|
||||
for _, group := range events.Events {
|
||||
for _, uncoreEvent := range group.events {
|
||||
for _, customEvent := range events.CustomEvents {
|
||||
if uncoreEvent == customEvent.Name {
|
||||
eventToCustomEvent[customEvent.Name] = &customEvent
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return eventToCustomEvent
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) Destroy() {
|
||||
c.cpuFilesLock.Lock()
|
||||
defer c.cpuFilesLock.Unlock()
|
||||
|
||||
for groupIndex := range c.cpuFiles {
|
||||
c.deleteGroup(groupIndex)
|
||||
delete(c.cpuFiles, groupIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) UpdateStats(stats *info.ContainerStats) error {
|
||||
klog.V(5).Info("Attempting to update uncore perf_event stats")
|
||||
|
||||
for _, groupPMUs := range c.cpuFiles {
|
||||
for pmu, group := range groupPMUs {
|
||||
for cpu, file := range group.cpuFiles[group.leaderName] {
|
||||
stat, err := readPerfUncoreStat(file, group, cpu, pmu, c.cpuToSocket)
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to read from perf_event_file (event: %q, CPU: %d) for %q: %q", group.leaderName, cpu, pmu, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
stats.PerfUncoreStats = append(stats.PerfUncoreStats, stat...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) setupEvent(name string, pmus uncorePMUs, groupIndex int, leaderFileDescriptors map[string]map[uint32]int) error {
|
||||
if !isLibpfmInitialized {
|
||||
return fmt.Errorf("libpfm4 is not initialized, cannot proceed with setting perf events up")
|
||||
}
|
||||
|
||||
klog.V(5).Infof("Setting up uncore perf event %s", name)
|
||||
|
||||
config, err := readPerfEventAttr(name, pfmGetOsEventEncoding)
|
||||
if err != nil {
|
||||
C.free((unsafe.Pointer)(config))
|
||||
return err
|
||||
}
|
||||
|
||||
// Register event for all memory controllers.
|
||||
for _, pmu := range pmus {
|
||||
config.Type = pmu.typeOf
|
||||
isGroupLeader := leaderFileDescriptors[pmu.name][pmu.cpus[0]] == groupLeaderFileDescriptor
|
||||
setAttributes(config, isGroupLeader)
|
||||
leaderFileDescriptors[pmu.name], err = c.registerEvent(eventInfo{name, config, uncorePID, groupIndex, isGroupLeader}, pmu, leaderFileDescriptors[pmu.name])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Clean memory allocated by C code.
|
||||
C.free(unsafe.Pointer(config))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) registerEvent(eventInfo eventInfo, pmu pmu, leaderFileDescriptors map[uint32]int) (map[uint32]int, error) {
|
||||
newLeaderFileDescriptors := make(map[uint32]int)
|
||||
isGroupLeader := false
|
||||
for _, cpu := range pmu.cpus {
|
||||
groupFd, flags := leaderFileDescriptors[cpu], 0
|
||||
fd, err := c.perfEventOpen(eventInfo.config, eventInfo.pid, int(cpu), groupFd, flags)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up perf event %#v failed: %q | (pmu: %q, groupFd: %d, cpu: %d)", eventInfo.config, err, pmu, groupFd, cpu)
|
||||
}
|
||||
perfFile := os.NewFile(uintptr(fd), eventInfo.name)
|
||||
if perfFile == nil {
|
||||
return nil, fmt.Errorf("unable to create os.File from file descriptor %#v", fd)
|
||||
}
|
||||
|
||||
c.addEventFile(eventInfo.groupIndex, eventInfo.name, pmu.name, int(cpu), perfFile)
|
||||
|
||||
// If group leader, save fd for others.
|
||||
if leaderFileDescriptors[cpu] == groupLeaderFileDescriptor {
|
||||
newLeaderFileDescriptors[cpu] = fd
|
||||
isGroupLeader = true
|
||||
}
|
||||
}
|
||||
|
||||
if isGroupLeader {
|
||||
return newLeaderFileDescriptors, nil
|
||||
}
|
||||
return leaderFileDescriptors, nil
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) addEventFile(index int, name string, pmu string, cpu int, perfFile *os.File) {
|
||||
_, ok := c.cpuFiles[index]
|
||||
if !ok {
|
||||
c.cpuFiles[index] = map[string]group{}
|
||||
}
|
||||
|
||||
_, ok = c.cpuFiles[index][pmu]
|
||||
if !ok {
|
||||
c.cpuFiles[index][pmu] = group{
|
||||
cpuFiles: map[string]map[int]readerCloser{},
|
||||
leaderName: name,
|
||||
}
|
||||
}
|
||||
|
||||
_, ok = c.cpuFiles[index][pmu].cpuFiles[name]
|
||||
if !ok {
|
||||
c.cpuFiles[index][pmu].cpuFiles[name] = map[int]readerCloser{}
|
||||
}
|
||||
|
||||
c.cpuFiles[index][pmu].cpuFiles[name][cpu] = perfFile
|
||||
|
||||
// Check if name is already stored.
|
||||
for _, have := range c.cpuFiles[index][pmu].names {
|
||||
if name == have {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise save it.
|
||||
c.cpuFiles[index][pmu] = group{
|
||||
cpuFiles: c.cpuFiles[index][pmu].cpuFiles,
|
||||
names: append(c.cpuFiles[index][pmu].names, name),
|
||||
leaderName: c.cpuFiles[index][pmu].leaderName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) setupRawEvent(event *CustomEvent, pmus uncorePMUs, groupIndex int, leaderFileDescriptors map[string]map[uint32]int) error {
|
||||
klog.V(5).Infof("Setting up raw perf uncore event %#v", event)
|
||||
|
||||
for _, pmu := range pmus {
|
||||
newEvent := CustomEvent{
|
||||
Type: pmu.typeOf,
|
||||
Config: event.Config,
|
||||
Name: event.Name,
|
||||
}
|
||||
config := createPerfEventAttr(newEvent)
|
||||
isGroupLeader := leaderFileDescriptors[pmu.name][pmu.cpus[0]] == groupLeaderFileDescriptor
|
||||
setAttributes(config, isGroupLeader)
|
||||
var err error
|
||||
leaderFileDescriptors[pmu.name], err = c.registerEvent(eventInfo{string(newEvent.Name), config, uncorePID, groupIndex, isGroupLeader}, pmu, leaderFileDescriptors[pmu.name])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *uncoreCollector) deleteGroup(groupIndex int) {
|
||||
groupPMUs := c.cpuFiles[groupIndex]
|
||||
for pmu, group := range groupPMUs {
|
||||
for name, cpus := range group.cpuFiles {
|
||||
for cpu, file := range cpus {
|
||||
klog.V(5).Infof("Closing uncore perf event file descriptor for event %q, PMU %s and CPU %d", name, pmu, cpu)
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to close perf event file descriptor for event %q, PMU %s and CPU %d", name, pmu, cpu)
|
||||
}
|
||||
}
|
||||
delete(group.cpuFiles, name)
|
||||
}
|
||||
delete(groupPMUs, pmu)
|
||||
}
|
||||
delete(c.cpuFiles, groupIndex)
|
||||
}
|
||||
|
||||
func readPerfUncoreStat(file readerCloser, group group, cpu int, pmu string, cpuToSocket map[int]int) ([]info.PerfUncoreStat, error) {
|
||||
values, err := getPerfValues(file, group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
socket, ok := cpuToSocket[cpu]
|
||||
if !ok {
|
||||
// Socket is unknown.
|
||||
socket = -1
|
||||
}
|
||||
|
||||
perfUncoreStats := make([]info.PerfUncoreStat, len(values))
|
||||
for i, value := range values {
|
||||
klog.V(5).Infof("Read metric for event %q for cpu %d from pmu %q: %d", value.Name, cpu, pmu, value.Value)
|
||||
perfUncoreStats[i] = info.PerfUncoreStat{
|
||||
PerfValue: value,
|
||||
Socket: socket,
|
||||
PMU: pmu,
|
||||
}
|
||||
}
|
||||
|
||||
return perfUncoreStats, nil
|
||||
}
|
171
vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
Normal file
171
vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Collector of resctrl for a container.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
const noInterval = 0
|
||||
|
||||
type collector struct {
|
||||
id string
|
||||
interval time.Duration
|
||||
getContainerPids func() ([]string, error)
|
||||
resctrlPath string
|
||||
running bool
|
||||
destroyed bool
|
||||
numberOfNUMANodes int
|
||||
vendorID string
|
||||
mu sync.Mutex
|
||||
inHostNamespace bool
|
||||
}
|
||||
|
||||
func newCollector(id string, getContainerPids func() ([]string, error), interval time.Duration, numberOfNUMANodes int, vendorID string, inHostNamespace bool) *collector {
|
||||
return &collector{id: id, interval: interval, getContainerPids: getContainerPids, numberOfNUMANodes: numberOfNUMANodes,
|
||||
vendorID: vendorID, mu: sync.Mutex{}, inHostNamespace: inHostNamespace}
|
||||
}
|
||||
|
||||
func (c *collector) setup() error {
|
||||
var err error
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
|
||||
if c.interval != noInterval {
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
} else {
|
||||
c.running = true
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(c.interval)
|
||||
c.mu.Lock()
|
||||
if c.destroyed {
|
||||
break
|
||||
}
|
||||
klog.V(5).Infof("Trying to check %q containers control group.", c.id)
|
||||
if c.running {
|
||||
err = c.checkMonitoringGroup()
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to check %q resctrl collector control group: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
} else {
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// There is no interval set, if setup fail, stop.
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to setup container %q resctrl collector: %w", c.id, err)
|
||||
}
|
||||
c.running = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) checkMonitoringGroup() error {
|
||||
newPath, err := prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't obtain mon_group path: %v", err)
|
||||
}
|
||||
|
||||
// Check if container moved between control groups.
|
||||
if newPath != c.resctrlPath {
|
||||
err = c.clear()
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear previous monitoring group: %w", err)
|
||||
}
|
||||
c.resctrlPath = newPath
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) UpdateStats(stats *info.ContainerStats) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.running {
|
||||
stats.Resctrl = info.ResctrlStats{}
|
||||
|
||||
resctrlStats, err := getIntelRDTStatsFrom(c.resctrlPath, c.vendorID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats.Resctrl.MemoryBandwidth = make([]info.MemoryBandwidthStats, 0, c.numberOfNUMANodes)
|
||||
stats.Resctrl.Cache = make([]info.CacheStats, 0, c.numberOfNUMANodes)
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.MBMStats {
|
||||
stats.Resctrl.MemoryBandwidth = append(stats.Resctrl.MemoryBandwidth,
|
||||
info.MemoryBandwidthStats{
|
||||
TotalBytes: numaNodeStats.MBMTotalBytes,
|
||||
LocalBytes: numaNodeStats.MBMLocalBytes,
|
||||
})
|
||||
}
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.CMTStats {
|
||||
stats.Resctrl.Cache = append(stats.Resctrl.Cache,
|
||||
info.CacheStats{LLCOccupancy: numaNodeStats.LLCOccupancy})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) Destroy() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.running = false
|
||||
err := c.clear()
|
||||
if err != nil {
|
||||
klog.Errorf("trying to destroy %q resctrl collector but: %v", c.id, err)
|
||||
}
|
||||
c.destroyed = true
|
||||
}
|
||||
|
||||
func (c *collector) clear() error {
|
||||
// Not allowed to remove root or undefined resctrl directory.
|
||||
if c.id != rootContainer && c.resctrlPath != "" {
|
||||
// Remove only own prepared mon group.
|
||||
if strings.HasPrefix(filepath.Base(c.resctrlPath), monGroupPrefix) {
|
||||
err := os.RemoveAll(c.resctrlPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear mon_group: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
79
vendor/github.com/google/cadvisor/resctrl/manager.go
generated
vendored
Normal file
79
vendor/github.com/google/cadvisor/resctrl/manager.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Manager of resctrl for containers.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container/raw"
|
||||
"github.com/google/cadvisor/stats"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
Destroy()
|
||||
GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error)
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
stats.NoopDestroy
|
||||
interval time.Duration
|
||||
vendorID string
|
||||
inHostNamespace bool
|
||||
}
|
||||
|
||||
func (m *manager) GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error) {
|
||||
collector := newCollector(containerName, getContainerPids, m.interval, numberOfNUMANodes, m.vendorID, m.inHostNamespace)
|
||||
err := collector.setup()
|
||||
if err != nil {
|
||||
return &stats.NoopCollector{}, err
|
||||
}
|
||||
|
||||
return collector, nil
|
||||
}
|
||||
|
||||
func NewManager(interval time.Duration, setup func() error, vendorID string, inHostNamespace bool) (Manager, error) {
|
||||
err := setup()
|
||||
if err != nil {
|
||||
return &NoopManager{}, err
|
||||
}
|
||||
|
||||
if !isResctrlInitialized {
|
||||
return &NoopManager{}, errors.New("the resctrl isn't initialized")
|
||||
}
|
||||
if !(enabledCMT || enabledMBM) {
|
||||
return &NoopManager{}, errors.New("there are no monitoring features available")
|
||||
}
|
||||
|
||||
if !*raw.DockerOnly {
|
||||
klog.Warning("--docker_only should be set when collecting Resctrl metrics! See the runtime docs.")
|
||||
}
|
||||
|
||||
return &manager{interval: interval, vendorID: vendorID, inHostNamespace: inHostNamespace}, nil
|
||||
}
|
||||
|
||||
type NoopManager struct {
|
||||
stats.NoopDestroy
|
||||
}
|
||||
|
||||
func (np *NoopManager) GetCollector(_ string, _ func() ([]string, error), _ int) (stats.Collector, error) {
|
||||
return &stats.NoopCollector{}, nil
|
||||
}
|
369
vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
Normal file
369
vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
Normal file
@ -0,0 +1,369 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Utilities.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/runc/libcontainer/intelrdt"
|
||||
)
|
||||
|
||||
const (
|
||||
cpuCgroup = "cpu"
|
||||
rootContainer = "/"
|
||||
monitoringGroupDir = "mon_groups"
|
||||
processTask = "task"
|
||||
cpusFileName = "cpus"
|
||||
cpusListFileName = "cpus_list"
|
||||
schemataFileName = "schemata"
|
||||
tasksFileName = "tasks"
|
||||
modeFileName = "mode"
|
||||
sizeFileName = "size"
|
||||
infoDirName = "info"
|
||||
monDataDirName = "mon_data"
|
||||
monGroupsDirName = "mon_groups"
|
||||
noPidsPassedError = "there are no pids passed"
|
||||
noContainerNameError = "there are no container name passed"
|
||||
noControlGroupFoundError = "couldn't find control group matching container"
|
||||
llcOccupancyFileName = "llc_occupancy"
|
||||
mbmLocalBytesFileName = "mbm_local_bytes"
|
||||
mbmTotalBytesFileName = "mbm_total_bytes"
|
||||
containerPrefix = '/'
|
||||
minContainerNameLen = 2 // "/<container_name>" e.g. "/a"
|
||||
unavailable = "Unavailable"
|
||||
monGroupPrefix = "cadvisor"
|
||||
)
|
||||
|
||||
var (
|
||||
rootResctrl = ""
|
||||
pidsPath = ""
|
||||
processPath = "/proc"
|
||||
enabledMBM = false
|
||||
enabledCMT = false
|
||||
isResctrlInitialized = false
|
||||
groupDirectories = map[string]struct{}{
|
||||
cpusFileName: {},
|
||||
cpusListFileName: {},
|
||||
infoDirName: {},
|
||||
monDataDirName: {},
|
||||
monGroupsDirName: {},
|
||||
schemataFileName: {},
|
||||
tasksFileName: {},
|
||||
modeFileName: {},
|
||||
sizeFileName: {},
|
||||
}
|
||||
)
|
||||
|
||||
func Setup() error {
|
||||
var err error
|
||||
rootResctrl, err = intelrdt.Root()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize resctrl: %v", err)
|
||||
}
|
||||
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
pidsPath = fs2.UnifiedMountpoint
|
||||
} else {
|
||||
pidsPath = filepath.Join(fs2.UnifiedMountpoint, cpuCgroup)
|
||||
}
|
||||
|
||||
enabledMBM = intelrdt.IsMBMEnabled()
|
||||
enabledCMT = intelrdt.IsCMTEnabled()
|
||||
|
||||
isResctrlInitialized = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareMonitoringGroup(containerName string, getContainerPids func() ([]string, error), inHostNamespace bool) (string, error) {
|
||||
if containerName == rootContainer {
|
||||
return rootResctrl, nil
|
||||
}
|
||||
|
||||
pids, err := getContainerPids()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(pids) == 0 {
|
||||
return "", fmt.Errorf("couldn't obtain %q container pids: there is no pids in cgroup", containerName)
|
||||
}
|
||||
|
||||
// Firstly, find the control group to which the container belongs.
|
||||
// Consider the root group.
|
||||
controlGroupPath, err := findGroup(rootResctrl, pids, true, false)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%q %q: %q", noControlGroupFoundError, containerName, err)
|
||||
}
|
||||
if controlGroupPath == "" {
|
||||
return "", fmt.Errorf("%q %q", noControlGroupFoundError, containerName)
|
||||
}
|
||||
|
||||
// Check if there is any monitoring group.
|
||||
monGroupPath, err := findGroup(filepath.Join(controlGroupPath, monGroupsDirName), pids, false, true)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't find monitoring group matching %q container: %v", containerName, err)
|
||||
}
|
||||
|
||||
// Prepare new one if not exists.
|
||||
if monGroupPath == "" {
|
||||
// Remove leading prefix.
|
||||
// e.g. /my/container -> my/container
|
||||
if len(containerName) >= minContainerNameLen && containerName[0] == containerPrefix {
|
||||
containerName = containerName[1:]
|
||||
}
|
||||
|
||||
// Add own prefix and use `-` instead `/`.
|
||||
// e.g. my/container -> cadvisor-my-container
|
||||
properContainerName := fmt.Sprintf("%s-%s", monGroupPrefix, strings.Replace(containerName, "/", "-", -1))
|
||||
monGroupPath = filepath.Join(controlGroupPath, monitoringGroupDir, properContainerName)
|
||||
|
||||
err = os.MkdirAll(monGroupPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't create monitoring group directory for %q container: %w", containerName, err)
|
||||
}
|
||||
|
||||
if !inHostNamespace {
|
||||
processPath = "/rootfs/proc"
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
processThreads, err := getAllProcessThreads(filepath.Join(processPath, pid, processTask))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, thread := range processThreads {
|
||||
err = intelrdt.WriteIntelRdtTasks(monGroupPath, thread)
|
||||
if err != nil {
|
||||
secondError := os.Remove(monGroupPath)
|
||||
if secondError != nil {
|
||||
return "", fmt.Errorf(
|
||||
"coudn't assign pids to %q container monitoring group: %w \n couldn't clear %q monitoring group: %v",
|
||||
containerName, err, containerName, secondError)
|
||||
}
|
||||
return "", fmt.Errorf("coudn't assign pids to %q container monitoring group: %w", containerName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return monGroupPath, nil
|
||||
}
|
||||
|
||||
func getPids(containerName string) ([]int, error) {
|
||||
if len(containerName) == 0 {
|
||||
// No container name passed.
|
||||
return nil, fmt.Errorf(noContainerNameError)
|
||||
}
|
||||
pids, err := cgroups.GetAllPids(filepath.Join(pidsPath, containerName))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't obtain pids for %q container: %v", containerName, err)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// getAllProcessThreads obtains all available processes from directory.
|
||||
// e.g. ls /proc/4215/task/ -> 4215, 4216, 4217, 4218
|
||||
// func will return [4215, 4216, 4217, 4218].
|
||||
func getAllProcessThreads(path string) ([]int, error) {
|
||||
processThreads := make([]int, 0)
|
||||
|
||||
threadDirs, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return processThreads, err
|
||||
}
|
||||
|
||||
for _, dir := range threadDirs {
|
||||
pid, err := strconv.Atoi(dir.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't parse %q dir: %v", dir.Name(), err)
|
||||
}
|
||||
processThreads = append(processThreads, pid)
|
||||
}
|
||||
|
||||
return processThreads, nil
|
||||
}
|
||||
|
||||
// findGroup returns the path of a control/monitoring group in which the pids are.
|
||||
func findGroup(group string, pids []string, includeGroup bool, exclusive bool) (string, error) {
|
||||
if len(pids) == 0 {
|
||||
return "", fmt.Errorf(noPidsPassedError)
|
||||
}
|
||||
|
||||
availablePaths := make([]string, 0)
|
||||
if includeGroup {
|
||||
availablePaths = append(availablePaths, group)
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(group)
|
||||
for _, file := range files {
|
||||
if _, ok := groupDirectories[file.Name()]; !ok {
|
||||
availablePaths = append(availablePaths, filepath.Join(group, file.Name()))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't obtain groups paths: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range availablePaths {
|
||||
groupFound, err := arePIDsInGroup(path, pids, exclusive)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if groupFound {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// arePIDsInGroup returns true if all of the pids are within control group.
|
||||
func arePIDsInGroup(path string, pids []string, exclusive bool) (bool, error) {
|
||||
if len(pids) == 0 {
|
||||
return false, fmt.Errorf("couldn't obtain pids from %q path: %v", path, noPidsPassedError)
|
||||
}
|
||||
|
||||
tasks, err := readTasksFile(filepath.Join(path, tasksFileName))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
any := false
|
||||
for _, pid := range pids {
|
||||
_, ok := tasks[pid]
|
||||
if !ok {
|
||||
// There are missing pids within group.
|
||||
if any {
|
||||
return false, fmt.Errorf("there should be all pids in group")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
any = true
|
||||
}
|
||||
|
||||
// Check if there should be only passed pids in group.
|
||||
if exclusive {
|
||||
if len(tasks) != len(pids) {
|
||||
return false, fmt.Errorf("group should have container pids only")
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// readTasksFile returns pids map from given tasks path.
|
||||
func readTasksFile(tasksPath string) (map[string]struct{}, error) {
|
||||
tasks := make(map[string]struct{})
|
||||
|
||||
tasksFile, err := os.Open(tasksPath)
|
||||
if err != nil {
|
||||
return tasks, fmt.Errorf("couldn't read tasks file from %q path: %w", tasksPath, err)
|
||||
}
|
||||
defer tasksFile.Close()
|
||||
|
||||
scanner := bufio.NewScanner(tasksFile)
|
||||
for scanner.Scan() {
|
||||
tasks[scanner.Text()] = struct{}{}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return tasks, fmt.Errorf("couldn't obtain pids from %q path: %w", tasksPath, err)
|
||||
}
|
||||
|
||||
return tasks, nil
|
||||
}
|
||||
|
||||
func readStatFrom(path string, vendorID string) (uint64, error) {
|
||||
context, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
contextString := string(bytes.TrimSpace(context))
|
||||
|
||||
if contextString == unavailable {
|
||||
err := fmt.Errorf("\"Unavailable\" value from file %q", path)
|
||||
if vendorID == "AuthenticAMD" {
|
||||
kernelBugzillaLink := "https://bugzilla.kernel.org/show_bug.cgi?id=213311"
|
||||
err = fmt.Errorf("%v, possible bug: %q", err, kernelBugzillaLink)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
stat, err := strconv.ParseUint(contextString, 10, 64)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("unable to parse %q as a uint from file %q", string(context), path)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func getIntelRDTStatsFrom(path string, vendorID string) (intelrdt.Stats, error) {
|
||||
stats := intelrdt.Stats{}
|
||||
|
||||
statsDirectories, err := filepath.Glob(filepath.Join(path, monDataDirName, "*"))
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
if len(statsDirectories) == 0 {
|
||||
return stats, fmt.Errorf("there is no mon_data stats directories: %q", path)
|
||||
}
|
||||
|
||||
var cmtStats []intelrdt.CMTNumaNodeStats
|
||||
var mbmStats []intelrdt.MBMNumaNodeStats
|
||||
|
||||
for _, dir := range statsDirectories {
|
||||
if enabledCMT {
|
||||
llcOccupancy, err := readStatFrom(filepath.Join(dir, llcOccupancyFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
cmtStats = append(cmtStats, intelrdt.CMTNumaNodeStats{LLCOccupancy: llcOccupancy})
|
||||
}
|
||||
if enabledMBM {
|
||||
mbmTotalBytes, err := readStatFrom(filepath.Join(dir, mbmTotalBytesFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
mbmLocalBytes, err := readStatFrom(filepath.Join(dir, mbmLocalBytesFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
mbmStats = append(mbmStats, intelrdt.MBMNumaNodeStats{
|
||||
MBMTotalBytes: mbmTotalBytes,
|
||||
MBMLocalBytes: mbmLocalBytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
stats.CMTStats = &cmtStats
|
||||
stats.MBMStats = &mbmStats
|
||||
|
||||
return stats, nil
|
||||
}
|
44
vendor/github.com/google/cadvisor/stats/noop.go
generated
vendored
Normal file
44
vendor/github.com/google/cadvisor/stats/noop.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Noop perf Manager and Collector.
|
||||
package stats
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type NoopManager struct {
|
||||
NoopDestroy
|
||||
}
|
||||
|
||||
type NoopDestroy struct{}
|
||||
|
||||
func (nsd NoopDestroy) Destroy() {
|
||||
klog.V(5).Info("No-op Destroy function called")
|
||||
}
|
||||
|
||||
func (m *NoopManager) GetCollector(cgroup string) (Collector, error) {
|
||||
return &NoopCollector{}, nil
|
||||
}
|
||||
|
||||
type NoopCollector struct {
|
||||
NoopDestroy
|
||||
}
|
||||
|
||||
func (c *NoopCollector) UpdateStats(stats *v1.ContainerStats) error {
|
||||
return nil
|
||||
}
|
35
vendor/github.com/google/cadvisor/stats/types.go
generated
vendored
Normal file
35
vendor/github.com/google/cadvisor/stats/types.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2020 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Handling statistics that are fully controlled in cAdvisor
|
||||
package stats
|
||||
|
||||
import info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
// This is supposed to store global state about an cAdvisor metrics collector.
|
||||
// cAdvisor manager will call Destroy() when it stops.
|
||||
// For each container detected by the cAdvisor manager, it will call
|
||||
// GetCollector() with the devices cgroup path for that container.
|
||||
// GetCollector() is supposed to return an object that can update
|
||||
// external stats for that container.
|
||||
type Manager interface {
|
||||
Destroy()
|
||||
GetCollector(deviceCgroup string) (Collector, error)
|
||||
}
|
||||
|
||||
// Collector can update ContainerStats by adding more metrics.
|
||||
type Collector interface {
|
||||
Destroy()
|
||||
UpdateStats(*info.ContainerStats) error
|
||||
}
|
28
vendor/github.com/google/cadvisor/storage/common_flags.go
generated
vendored
Normal file
28
vendor/github.com/google/cadvisor/storage/common_flags.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ArgDbUsername = flag.String("storage_driver_user", "root", "database username")
|
||||
var ArgDbPassword = flag.String("storage_driver_password", "root", "database password")
|
||||
var ArgDbHost = flag.String("storage_driver_host", "localhost:8086", "database host:port")
|
||||
var ArgDbName = flag.String("storage_driver_db", "cadvisor", "database name")
|
||||
var ArgDbTable = flag.String("storage_driver_table", "stats", "table name")
|
||||
var ArgDbIsSecure = flag.Bool("storage_driver_secure", false, "use secure connection with database")
|
||||
var ArgDbBufferDuration = flag.Duration("storage_driver_buffer_duration", 60*time.Second, "Writes in the storage driver will be buffered for this duration, and committed to the non memory backends as a single transaction")
|
59
vendor/github.com/google/cadvisor/storage/storage.go
generated
vendored
Normal file
59
vendor/github.com/google/cadvisor/storage/storage.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type StorageDriver interface {
|
||||
AddStats(cInfo *info.ContainerInfo, stats *info.ContainerStats) error
|
||||
|
||||
// Close will clear the state of the storage driver. The elements
|
||||
// stored in the underlying storage may or may not be deleted depending
|
||||
// on the implementation of the storage driver.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type StorageDriverFunc func() (StorageDriver, error)
|
||||
|
||||
var registeredPlugins = map[string](StorageDriverFunc){}
|
||||
|
||||
func RegisterStorageDriver(name string, f StorageDriverFunc) {
|
||||
registeredPlugins[name] = f
|
||||
}
|
||||
|
||||
func New(name string) (StorageDriver, error) {
|
||||
if name == "" {
|
||||
return nil, nil
|
||||
}
|
||||
f, ok := registeredPlugins[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown backend storage driver: %s", name)
|
||||
}
|
||||
return f()
|
||||
}
|
||||
|
||||
func ListDrivers() []string {
|
||||
drivers := make([]string, 0, len(registeredPlugins))
|
||||
for name := range registeredPlugins {
|
||||
drivers = append(drivers, name)
|
||||
}
|
||||
sort.Strings(drivers)
|
||||
return drivers
|
||||
}
|
74
vendor/github.com/google/cadvisor/summary/buffer.go
generated
vendored
Normal file
74
vendor/github.com/google/cadvisor/summary/buffer.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package summary
|
||||
|
||||
import (
|
||||
info "github.com/google/cadvisor/info/v2"
|
||||
)
|
||||
|
||||
// Manages a buffer of usage samples.
|
||||
// This is similar to stats buffer in cache/memory.
|
||||
// The main difference is that we do not pre-allocate the buffer as most containers
|
||||
// won't live that long.
|
||||
type SamplesBuffer struct {
|
||||
// list of collected samples.
|
||||
samples []info.Usage
|
||||
// maximum size this buffer can grow to.
|
||||
maxSize int
|
||||
// index for the latest sample.
|
||||
index int
|
||||
}
|
||||
|
||||
// Initializes an empty buffer.
|
||||
func NewSamplesBuffer(size int) *SamplesBuffer {
|
||||
return &SamplesBuffer{
|
||||
index: -1,
|
||||
maxSize: size,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the current number of samples in the buffer.
|
||||
func (s *SamplesBuffer) Size() int {
|
||||
return len(s.samples)
|
||||
}
|
||||
|
||||
// Add an element to the buffer. Oldest one is overwritten if required.
|
||||
func (s *SamplesBuffer) Add(stat info.Usage) {
|
||||
if len(s.samples) < s.maxSize {
|
||||
s.samples = append(s.samples, stat)
|
||||
s.index++
|
||||
return
|
||||
}
|
||||
s.index = (s.index + 1) % s.maxSize
|
||||
s.samples[s.index] = stat
|
||||
}
|
||||
|
||||
// Returns pointers to the last 'n' stats.
|
||||
func (s *SamplesBuffer) RecentStats(n int) []*info.Usage {
|
||||
if n > len(s.samples) {
|
||||
n = len(s.samples)
|
||||
}
|
||||
start := s.index - (n - 1)
|
||||
if start < 0 {
|
||||
start += len(s.samples)
|
||||
}
|
||||
|
||||
out := make([]*info.Usage, n)
|
||||
for i := 0; i < n; i++ {
|
||||
index := (start + i) % len(s.samples)
|
||||
out[i] = &s.samples[index]
|
||||
}
|
||||
return out
|
||||
}
|
201
vendor/github.com/google/cadvisor/summary/percentiles.go
generated
vendored
Normal file
201
vendor/github.com/google/cadvisor/summary/percentiles.go
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Utility methods to calculate percentiles.
|
||||
|
||||
package summary
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
info "github.com/google/cadvisor/info/v2"
|
||||
)
|
||||
|
||||
const secondsToMilliSeconds = 1000
|
||||
const milliSecondsToNanoSeconds = 1000000
|
||||
const secondsToNanoSeconds = secondsToMilliSeconds * milliSecondsToNanoSeconds
|
||||
|
||||
type Uint64Slice []uint64
|
||||
|
||||
func (s Uint64Slice) Len() int { return len(s) }
|
||||
func (s Uint64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s Uint64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// Get percentile of the provided samples. Round to integer.
|
||||
func (s Uint64Slice) GetPercentile(d float64) uint64 {
|
||||
if d < 0.0 || d > 1.0 {
|
||||
return 0
|
||||
}
|
||||
count := s.Len()
|
||||
if count == 0 {
|
||||
return 0
|
||||
}
|
||||
sort.Sort(s)
|
||||
n := float64(d * (float64(count) + 1))
|
||||
idx, frac := math.Modf(n)
|
||||
index := int(idx)
|
||||
percentile := float64(s[index-1])
|
||||
if index > 1 && index < count {
|
||||
percentile += frac * float64(s[index]-s[index-1])
|
||||
}
|
||||
return uint64(percentile)
|
||||
}
|
||||
|
||||
type mean struct {
|
||||
// current count.
|
||||
count uint64
|
||||
// current mean.
|
||||
Mean float64
|
||||
}
|
||||
|
||||
func (m *mean) Add(value uint64) {
|
||||
m.count++
|
||||
if m.count == 1 {
|
||||
m.Mean = float64(value)
|
||||
return
|
||||
}
|
||||
c := float64(m.count)
|
||||
v := float64(value)
|
||||
m.Mean = (m.Mean*(c-1) + v) / c
|
||||
}
|
||||
|
||||
type Percentile interface {
|
||||
Add(info.Percentiles)
|
||||
AddSample(uint64)
|
||||
GetAllPercentiles() info.Percentiles
|
||||
}
|
||||
|
||||
type resource struct {
|
||||
// list of samples being tracked.
|
||||
samples Uint64Slice
|
||||
// average from existing samples.
|
||||
mean mean
|
||||
// maximum value seen so far in the added samples.
|
||||
max uint64
|
||||
}
|
||||
|
||||
// Adds a new percentile sample.
|
||||
func (r *resource) Add(p info.Percentiles) {
|
||||
if !p.Present {
|
||||
return
|
||||
}
|
||||
if p.Max > r.max {
|
||||
r.max = p.Max
|
||||
}
|
||||
r.mean.Add(p.Mean)
|
||||
// Selecting 90p of 90p :(
|
||||
r.samples = append(r.samples, p.Ninety)
|
||||
}
|
||||
|
||||
// Add a single sample. Internally, we convert it to a fake percentile sample.
|
||||
func (r *resource) AddSample(val uint64) {
|
||||
sample := info.Percentiles{
|
||||
Present: true,
|
||||
Mean: val,
|
||||
Max: val,
|
||||
Fifty: val,
|
||||
Ninety: val,
|
||||
NinetyFive: val,
|
||||
}
|
||||
r.Add(sample)
|
||||
}
|
||||
|
||||
// Get max, average, and 90p from existing samples.
|
||||
func (r *resource) GetAllPercentiles() info.Percentiles {
|
||||
p := info.Percentiles{}
|
||||
p.Mean = uint64(r.mean.Mean)
|
||||
p.Max = r.max
|
||||
p.Fifty = r.samples.GetPercentile(0.5)
|
||||
p.Ninety = r.samples.GetPercentile(0.9)
|
||||
p.NinetyFive = r.samples.GetPercentile(0.95)
|
||||
p.Present = true
|
||||
return p
|
||||
}
|
||||
|
||||
func NewResource(size int) Percentile {
|
||||
return &resource{
|
||||
samples: make(Uint64Slice, 0, size),
|
||||
mean: mean{count: 0, Mean: 0},
|
||||
}
|
||||
}
|
||||
|
||||
// Return aggregated percentiles from the provided percentile samples.
|
||||
func GetDerivedPercentiles(stats []*info.Usage) info.Usage {
|
||||
cpu := NewResource(len(stats))
|
||||
memory := NewResource(len(stats))
|
||||
for _, stat := range stats {
|
||||
cpu.Add(stat.Cpu)
|
||||
memory.Add(stat.Memory)
|
||||
}
|
||||
usage := info.Usage{}
|
||||
usage.Cpu = cpu.GetAllPercentiles()
|
||||
usage.Memory = memory.GetAllPercentiles()
|
||||
return usage
|
||||
}
|
||||
|
||||
// Calculate part of a minute this sample set represent.
|
||||
func getPercentComplete(stats []*secondSample) (percent int32) {
|
||||
numSamples := len(stats)
|
||||
if numSamples > 1 {
|
||||
percent = 100
|
||||
timeRange := stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()
|
||||
// allow some slack
|
||||
if timeRange < 58*secondsToNanoSeconds {
|
||||
percent = int32((timeRange * 100) / 60 * secondsToNanoSeconds)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate cpurate from two consecutive total cpu usage samples.
|
||||
func getCPURate(latest, previous secondSample) (uint64, error) {
|
||||
elapsed := latest.Timestamp.Sub(previous.Timestamp).Nanoseconds()
|
||||
if elapsed < 10*milliSecondsToNanoSeconds {
|
||||
return 0, fmt.Errorf("elapsed time too small: %d ns: time now %s last %s", elapsed, latest.Timestamp.String(), previous.Timestamp.String())
|
||||
}
|
||||
if latest.Cpu < previous.Cpu {
|
||||
return 0, fmt.Errorf("bad sample: cumulative cpu usage dropped from %d to %d", latest.Cpu, previous.Cpu)
|
||||
}
|
||||
// Cpurate is calculated in cpu-milliseconds per second.
|
||||
cpuRate := (latest.Cpu - previous.Cpu) * secondsToMilliSeconds / uint64(elapsed)
|
||||
return cpuRate, nil
|
||||
}
|
||||
|
||||
// Returns a percentile sample for a minute by aggregating seconds samples.
|
||||
func GetMinutePercentiles(stats []*secondSample) info.Usage {
|
||||
lastSample := secondSample{}
|
||||
cpu := NewResource(len(stats))
|
||||
memory := NewResource(len(stats))
|
||||
for _, stat := range stats {
|
||||
if !lastSample.Timestamp.IsZero() {
|
||||
cpuRate, err := getCPURate(*stat, lastSample)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
cpu.AddSample(cpuRate)
|
||||
memory.AddSample(stat.Memory)
|
||||
} else {
|
||||
memory.AddSample(stat.Memory)
|
||||
}
|
||||
lastSample = *stat
|
||||
}
|
||||
percent := getPercentComplete(stats)
|
||||
return info.Usage{
|
||||
PercentComplete: percent,
|
||||
Cpu: cpu.GetAllPercentiles(),
|
||||
Memory: memory.GetAllPercentiles(),
|
||||
}
|
||||
}
|
184
vendor/github.com/google/cadvisor/summary/summary.go
generated
vendored
Normal file
184
vendor/github.com/google/cadvisor/summary/summary.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Maintains the summary of aggregated minute, hour, and day stats.
|
||||
// For a container running for more than a day, amount of tracked data can go up to
|
||||
// 40 KB when cpu and memory are tracked. We'll start by enabling collection for the
|
||||
// node, followed by docker, and then all containers as we understand the usage pattern
|
||||
// better
|
||||
// TODO(rjnagal): Optimize the size if we start running it for every container.
|
||||
package summary
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "github.com/google/cadvisor/info/v1"
|
||||
info "github.com/google/cadvisor/info/v2"
|
||||
)
|
||||
|
||||
// Usage fields we track for generating percentiles.
|
||||
type secondSample struct {
|
||||
Timestamp time.Time // time when the sample was recorded.
|
||||
Cpu uint64 // cpu usage
|
||||
Memory uint64 // memory usage
|
||||
}
|
||||
|
||||
type availableResources struct {
|
||||
Cpu bool
|
||||
Memory bool
|
||||
}
|
||||
|
||||
type StatsSummary struct {
|
||||
// Resources being tracked for this container.
|
||||
available availableResources
|
||||
// list of second samples. The list is cleared when a new minute samples is generated.
|
||||
secondSamples []*secondSample
|
||||
// minute percentiles. We track 24 * 60 maximum samples.
|
||||
minuteSamples *SamplesBuffer
|
||||
// latest derived instant, minute, hour, and day stats. Instant sample updated every second.
|
||||
// Others updated every minute.
|
||||
derivedStats info.DerivedStats // Guarded by dataLock.
|
||||
dataLock sync.RWMutex
|
||||
}
|
||||
|
||||
// Adds a new seconds sample.
|
||||
// If enough seconds samples are collected, a minute sample is generated and derived
|
||||
// stats are updated.
|
||||
func (s *StatsSummary) AddSample(stat v1.ContainerStats) error {
|
||||
sample := secondSample{}
|
||||
sample.Timestamp = stat.Timestamp
|
||||
if s.available.Cpu {
|
||||
sample.Cpu = stat.Cpu.Usage.Total
|
||||
}
|
||||
if s.available.Memory {
|
||||
sample.Memory = stat.Memory.WorkingSet
|
||||
}
|
||||
s.secondSamples = append(s.secondSamples, &sample)
|
||||
s.updateLatestUsage()
|
||||
// TODO(jnagal): Use 'available' to avoid unnecessary computation.
|
||||
numSamples := len(s.secondSamples)
|
||||
elapsed := time.Nanosecond
|
||||
if numSamples > 1 {
|
||||
start := s.secondSamples[0].Timestamp
|
||||
end := s.secondSamples[numSamples-1].Timestamp
|
||||
elapsed = end.Sub(start)
|
||||
}
|
||||
if elapsed > 60*time.Second {
|
||||
// Make a minute sample. This works with dynamic housekeeping as long
|
||||
// as we keep max dynamic housekeeping period close to a minute.
|
||||
minuteSample := GetMinutePercentiles(s.secondSamples)
|
||||
// Clear seconds samples. Keep the latest sample for continuity.
|
||||
// Copying and resizing helps avoid slice re-allocation.
|
||||
s.secondSamples[0] = s.secondSamples[numSamples-1]
|
||||
s.secondSamples = s.secondSamples[:1]
|
||||
s.minuteSamples.Add(minuteSample)
|
||||
err := s.updateDerivedStats()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StatsSummary) updateLatestUsage() {
|
||||
usage := info.InstantUsage{}
|
||||
numStats := len(s.secondSamples)
|
||||
if numStats < 1 {
|
||||
return
|
||||
}
|
||||
latest := s.secondSamples[numStats-1]
|
||||
usage.Memory = latest.Memory
|
||||
if numStats > 1 {
|
||||
previous := s.secondSamples[numStats-2]
|
||||
cpu, err := getCPURate(*latest, *previous)
|
||||
if err == nil {
|
||||
usage.Cpu = cpu
|
||||
}
|
||||
}
|
||||
|
||||
s.dataLock.Lock()
|
||||
defer s.dataLock.Unlock()
|
||||
s.derivedStats.LatestUsage = usage
|
||||
s.derivedStats.Timestamp = latest.Timestamp
|
||||
}
|
||||
|
||||
// Generate new derived stats based on current minute stats samples.
|
||||
func (s *StatsSummary) updateDerivedStats() error {
|
||||
derived := info.DerivedStats{}
|
||||
derived.Timestamp = time.Now()
|
||||
minuteSamples := s.minuteSamples.RecentStats(1)
|
||||
if len(minuteSamples) != 1 {
|
||||
return fmt.Errorf("failed to retrieve minute stats")
|
||||
}
|
||||
derived.MinuteUsage = *minuteSamples[0]
|
||||
hourUsage, err := s.getDerivedUsage(60)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute hour stats: %v", err)
|
||||
}
|
||||
dayUsage, err := s.getDerivedUsage(60 * 24)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute day usage: %v", err)
|
||||
}
|
||||
derived.HourUsage = hourUsage
|
||||
derived.DayUsage = dayUsage
|
||||
|
||||
s.dataLock.Lock()
|
||||
defer s.dataLock.Unlock()
|
||||
derived.LatestUsage = s.derivedStats.LatestUsage
|
||||
s.derivedStats = derived
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// helper method to get hour and daily derived stats
|
||||
func (s *StatsSummary) getDerivedUsage(n int) (info.Usage, error) {
|
||||
if n < 1 {
|
||||
return info.Usage{}, fmt.Errorf("invalid number of samples requested: %d", n)
|
||||
}
|
||||
samples := s.minuteSamples.RecentStats(n)
|
||||
numSamples := len(samples)
|
||||
if numSamples < 1 {
|
||||
return info.Usage{}, fmt.Errorf("failed to retrieve any minute stats")
|
||||
}
|
||||
// We generate derived stats even with partial data.
|
||||
usage := GetDerivedPercentiles(samples)
|
||||
// Assumes we have equally placed minute samples.
|
||||
usage.PercentComplete = int32(numSamples * 100 / n)
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Return the latest calculated derived stats.
|
||||
func (s *StatsSummary) DerivedStats() (info.DerivedStats, error) {
|
||||
s.dataLock.RLock()
|
||||
defer s.dataLock.RUnlock()
|
||||
|
||||
return s.derivedStats, nil
|
||||
}
|
||||
|
||||
func New(spec v1.ContainerSpec) (*StatsSummary, error) {
|
||||
summary := StatsSummary{}
|
||||
if spec.HasCpu {
|
||||
summary.available.Cpu = true
|
||||
}
|
||||
if spec.HasMemory {
|
||||
summary.available.Memory = true
|
||||
}
|
||||
if !summary.available.Cpu && !summary.available.Memory {
|
||||
return nil, fmt.Errorf("none of the resources are being tracked")
|
||||
}
|
||||
summary.minuteSamples = NewSamplesBuffer(60 /* one hour */)
|
||||
return &summary, nil
|
||||
}
|
89
vendor/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go
generated
vendored
Normal file
89
vendor/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Get information about the cloud provider (if any) cAdvisor is running on.
|
||||
|
||||
package cloudinfo
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type CloudInfo interface {
|
||||
GetCloudProvider() info.CloudProvider
|
||||
GetInstanceType() info.InstanceType
|
||||
GetInstanceID() info.InstanceID
|
||||
}
|
||||
|
||||
// CloudProvider is an abstraction for providing cloud-specific information.
|
||||
type CloudProvider interface {
|
||||
// IsActiveProvider determines whether this is the cloud provider operating
|
||||
// this instance.
|
||||
IsActiveProvider() bool
|
||||
// GetInstanceType gets the type of instance this process is running on.
|
||||
// The behavior is undefined if this is not the active provider.
|
||||
GetInstanceType() info.InstanceType
|
||||
// GetInstanceType gets the ID of the instance this process is running on.
|
||||
// The behavior is undefined if this is not the active provider.
|
||||
GetInstanceID() info.InstanceID
|
||||
}
|
||||
|
||||
var providers = map[info.CloudProvider]CloudProvider{}
|
||||
|
||||
// RegisterCloudProvider registers the given cloud provider
|
||||
func RegisterCloudProvider(name info.CloudProvider, provider CloudProvider) {
|
||||
if _, alreadyRegistered := providers[name]; alreadyRegistered {
|
||||
klog.Warningf("Duplicate registration of CloudProvider %s", name)
|
||||
}
|
||||
providers[name] = provider
|
||||
}
|
||||
|
||||
type realCloudInfo struct {
|
||||
cloudProvider info.CloudProvider
|
||||
instanceType info.InstanceType
|
||||
instanceID info.InstanceID
|
||||
}
|
||||
|
||||
func NewRealCloudInfo() CloudInfo {
|
||||
for name, provider := range providers {
|
||||
if provider.IsActiveProvider() {
|
||||
return &realCloudInfo{
|
||||
cloudProvider: name,
|
||||
instanceType: provider.GetInstanceType(),
|
||||
instanceID: provider.GetInstanceID(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No registered active provider.
|
||||
return &realCloudInfo{
|
||||
cloudProvider: info.UnknownProvider,
|
||||
instanceType: info.UnknownInstance,
|
||||
instanceID: info.UnNamedInstance,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *realCloudInfo) GetCloudProvider() info.CloudProvider {
|
||||
return i.cloudProvider
|
||||
}
|
||||
|
||||
func (i *realCloudInfo) GetInstanceType() info.InstanceType {
|
||||
return i.instanceType
|
||||
}
|
||||
|
||||
func (i *realCloudInfo) GetInstanceID() info.InstanceID {
|
||||
return i.instanceID
|
||||
}
|
47
vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
Normal file
47
vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cpuload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/utils/cpuload/netlink"
|
||||
)
|
||||
|
||||
type CpuLoadReader interface {
|
||||
// Start the reader.
|
||||
Start() error
|
||||
|
||||
// Stop the reader and clean up internal state.
|
||||
Stop()
|
||||
|
||||
// Retrieve Cpu load for a given group.
|
||||
// name is the full hierarchical name of the container.
|
||||
// Path is an absolute filesystem path for a container under CPU cgroup hierarchy.
|
||||
GetCpuLoad(name string, path string) (info.LoadStats, error)
|
||||
}
|
||||
|
||||
func New() (CpuLoadReader, error) {
|
||||
reader, err := netlink.New()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err)
|
||||
}
|
||||
klog.V(4).Info("Using a netlink-based load reader")
|
||||
return reader, nil
|
||||
}
|
98
vendor/github.com/google/cadvisor/utils/cpuload/netlink/conn.go
generated
vendored
Normal file
98
vendor/github.com/google/cadvisor/utils/cpuload/netlink/conn.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type Connection struct {
|
||||
// netlink socket
|
||||
fd int
|
||||
// cache pid to use in every netlink request.
|
||||
pid uint32
|
||||
// sequence number for netlink messages.
|
||||
seq uint32
|
||||
addr syscall.SockaddrNetlink
|
||||
rbuf *bufio.Reader
|
||||
}
|
||||
|
||||
// Create and bind a new netlink socket.
|
||||
func newConnection() (*Connection, error) {
|
||||
|
||||
fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_DGRAM, syscall.NETLINK_GENERIC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn := new(Connection)
|
||||
conn.fd = fd
|
||||
conn.seq = 0
|
||||
conn.pid = uint32(os.Getpid())
|
||||
conn.addr.Family = syscall.AF_NETLINK
|
||||
conn.rbuf = bufio.NewReader(conn)
|
||||
err = syscall.Bind(fd, &conn.addr)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func (c *Connection) Read(b []byte) (n int, err error) {
|
||||
n, _, err = syscall.Recvfrom(c.fd, b, 0)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *Connection) Write(b []byte) (n int, err error) {
|
||||
err = syscall.Sendto(c.fd, b, 0, &c.addr)
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
func (c *Connection) Close() error {
|
||||
return syscall.Close(c.fd)
|
||||
}
|
||||
|
||||
func (c *Connection) WriteMessage(msg syscall.NetlinkMessage) error {
|
||||
w := bytes.NewBuffer(nil)
|
||||
msg.Header.Len = uint32(syscall.NLMSG_HDRLEN + len(msg.Data))
|
||||
msg.Header.Seq = c.seq
|
||||
c.seq++
|
||||
msg.Header.Pid = c.pid
|
||||
err := binary.Write(w, binary.LittleEndian, msg.Header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(msg.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.Write(w.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Connection) ReadMessage() (msg syscall.NetlinkMessage, err error) {
|
||||
err = binary.Read(c.rbuf, binary.LittleEndian, &msg.Header)
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
msg.Data = make([]byte, msg.Header.Len-syscall.NLMSG_HDRLEN)
|
||||
_, err = c.rbuf.Read(msg.Data)
|
||||
return msg, err
|
||||
}
|
241
vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
Normal file
241
vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(rjnagal): Verify and fix for other architectures.
|
||||
|
||||
Endian = binary.LittleEndian
|
||||
)
|
||||
|
||||
type genMsghdr struct {
|
||||
Command uint8
|
||||
Version uint8
|
||||
Reserved uint16
|
||||
}
|
||||
|
||||
type netlinkMessage struct {
|
||||
Header syscall.NlMsghdr
|
||||
GenHeader genMsghdr
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (m netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
|
||||
rawmsg.Header = m.Header
|
||||
w := bytes.NewBuffer([]byte{})
|
||||
binary.Write(w, Endian, m.GenHeader)
|
||||
w.Write(m.Data)
|
||||
rawmsg.Data = w.Bytes()
|
||||
return rawmsg
|
||||
}
|
||||
|
||||
type loadStatsResp struct {
|
||||
Header syscall.NlMsghdr
|
||||
GenHeader genMsghdr
|
||||
Stats info.LoadStats
|
||||
}
|
||||
|
||||
// Return required padding to align 'size' to 'alignment'.
|
||||
func padding(size int, alignment int) int {
|
||||
unalignedPart := size % alignment
|
||||
return (alignment - unalignedPart) % alignment
|
||||
}
|
||||
|
||||
// Get family id for taskstats subsystem.
|
||||
func getFamilyID(conn *Connection) (uint16, error) {
|
||||
msg := prepareFamilyMessage()
|
||||
err := conn.WriteMessage(msg.toRawMsg())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
id, err := parseFamilyResp(resp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Append an attribute to the message.
|
||||
// Adds attribute info (length and type), followed by the data and necessary padding.
|
||||
// Can be called multiple times to add attributes. Only fixed size and string type
|
||||
// attributes are handled. We don't need nested attributes for task stats.
|
||||
func addAttribute(buf *bytes.Buffer, attrType uint16, data interface{}, dataSize int) {
|
||||
attr := syscall.RtAttr{
|
||||
Len: syscall.SizeofRtAttr,
|
||||
Type: attrType,
|
||||
}
|
||||
attr.Len += uint16(dataSize)
|
||||
binary.Write(buf, Endian, attr)
|
||||
switch data := data.(type) {
|
||||
case string:
|
||||
binary.Write(buf, Endian, []byte(data))
|
||||
buf.WriteByte(0) // terminate
|
||||
default:
|
||||
binary.Write(buf, Endian, data)
|
||||
}
|
||||
for i := 0; i < padding(int(attr.Len), syscall.NLMSG_ALIGNTO); i++ {
|
||||
buf.WriteByte(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Prepares the message and generic headers and appends attributes as data.
|
||||
func prepareMessage(headerType uint16, cmd uint8, attributes []byte) (msg netlinkMessage) {
|
||||
msg.Header.Type = headerType
|
||||
msg.Header.Flags = syscall.NLM_F_REQUEST
|
||||
msg.GenHeader.Command = cmd
|
||||
msg.GenHeader.Version = 0x1
|
||||
msg.Data = attributes
|
||||
return msg
|
||||
}
|
||||
|
||||
// Prepares message to query family id for task stats.
|
||||
func prepareFamilyMessage() (msg netlinkMessage) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
addAttribute(buf, unix.CTRL_ATTR_FAMILY_NAME, unix.TASKSTATS_GENL_NAME, len(unix.TASKSTATS_GENL_NAME)+1)
|
||||
return prepareMessage(unix.GENL_ID_CTRL, unix.CTRL_CMD_GETFAMILY, buf.Bytes())
|
||||
}
|
||||
|
||||
// Prepares message to query task stats for a task group.
|
||||
func prepareCmdMessage(id uint16, cfd uintptr) (msg netlinkMessage) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
addAttribute(buf, unix.CGROUPSTATS_CMD_ATTR_FD, uint32(cfd), 4)
|
||||
return prepareMessage(id, unix.CGROUPSTATS_CMD_GET, buf.Bytes())
|
||||
}
|
||||
|
||||
// Extracts returned family id from the response.
|
||||
func parseFamilyResp(msg syscall.NetlinkMessage) (uint16, error) {
|
||||
m := new(netlinkMessage)
|
||||
m.Header = msg.Header
|
||||
err := verifyHeader(msg)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
buf := bytes.NewBuffer(msg.Data)
|
||||
// extract generic header from data.
|
||||
err = binary.Read(buf, Endian, &m.GenHeader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
id := uint16(0)
|
||||
// Extract attributes. kernel reports family name, id, version, etc.
|
||||
// Scan till we find id.
|
||||
for buf.Len() > syscall.SizeofRtAttr {
|
||||
var attr syscall.RtAttr
|
||||
err = binary.Read(buf, Endian, &attr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if attr.Type == unix.CTRL_ATTR_FAMILY_ID {
|
||||
err = binary.Read(buf, Endian, &id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
payload := int(attr.Len) - syscall.SizeofRtAttr
|
||||
skipLen := payload + padding(payload, syscall.SizeofRtAttr)
|
||||
name := make([]byte, skipLen)
|
||||
err = binary.Read(buf, Endian, name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("family id not found in the response")
|
||||
}
|
||||
|
||||
// Extract task stats from response returned by kernel.
|
||||
func parseLoadStatsResp(msg syscall.NetlinkMessage) (*loadStatsResp, error) {
|
||||
m := new(loadStatsResp)
|
||||
m.Header = msg.Header
|
||||
err := verifyHeader(msg)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
buf := bytes.NewBuffer(msg.Data)
|
||||
// Scan the general header.
|
||||
err = binary.Read(buf, Endian, &m.GenHeader)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
// cgroup stats response should have just one attribute.
|
||||
// Read it directly into the stats structure.
|
||||
var attr syscall.RtAttr
|
||||
err = binary.Read(buf, Endian, &attr)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
err = binary.Read(buf, Endian, &m.Stats)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// Verify and return any error reported by kernel.
|
||||
func verifyHeader(msg syscall.NetlinkMessage) error {
|
||||
switch msg.Header.Type {
|
||||
case syscall.NLMSG_DONE:
|
||||
return fmt.Errorf("expected a response, got nil")
|
||||
case syscall.NLMSG_ERROR:
|
||||
buf := bytes.NewBuffer(msg.Data)
|
||||
var errno int32
|
||||
err := binary.Read(buf, Endian, errno)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("netlink request failed with error %s", syscall.Errno(-errno))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get load stats for a task group.
|
||||
// id: family id for taskstats.
|
||||
// cfd: open file to path to the cgroup directory under cpu hierarchy.
|
||||
// conn: open netlink connection used to communicate with kernel.
|
||||
func getLoadStats(id uint16, cfd *os.File, conn *Connection) (info.LoadStats, error) {
|
||||
msg := prepareCmdMessage(id, cfd.Fd())
|
||||
err := conn.WriteMessage(msg.toRawMsg())
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
|
||||
parsedmsg, err := parseLoadStatsResp(resp)
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
return parsedmsg.Stats, nil
|
||||
}
|
80
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
Normal file
80
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type NetlinkReader struct {
|
||||
familyID uint16
|
||||
conn *Connection
|
||||
}
|
||||
|
||||
func New() (*NetlinkReader, error) {
|
||||
conn, err := newConnection()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a new connection: %s", err)
|
||||
}
|
||||
|
||||
id, err := getFamilyID(conn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err)
|
||||
}
|
||||
klog.V(4).Infof("Family id for taskstats: %d", id)
|
||||
return &NetlinkReader{
|
||||
familyID: id,
|
||||
conn: conn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *NetlinkReader) Stop() {
|
||||
if r.conn != nil {
|
||||
r.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *NetlinkReader) Start() error {
|
||||
// We do the start setup for netlink in New(). Nothing to do here.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns instantaneous number of running tasks in a group.
|
||||
// Caller can use historical data to calculate cpu load.
|
||||
// path is an absolute filesystem path for a container under the CPU cgroup hierarchy.
|
||||
// NOTE: non-hierarchical load is returned. It does not include load for subcontainers.
|
||||
func (r *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, error) {
|
||||
if len(path) == 0 {
|
||||
return info.LoadStats{}, fmt.Errorf("cgroup path can not be empty")
|
||||
}
|
||||
|
||||
cfd, err := os.Open(path)
|
||||
if err != nil {
|
||||
return info.LoadStats{}, fmt.Errorf("failed to open cgroup path %s: %q", path, err)
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
stats, err := getLoadStats(r.familyID, cfd, r.conn)
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
klog.V(4).Infof("Task stats for %q: %+v", path, stats)
|
||||
return stats, nil
|
||||
}
|
174
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
Normal file
174
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oomparser
|
||||
|
||||
import (
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/euank/go-kmsg-parser/kmsgparser"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyContainerRegexp = regexp.MustCompile(`Task in (.*) killed as a result of limit of (.*)`)
|
||||
// Starting in 5.0 linux kernels, the OOM message changed
|
||||
containerRegexp = regexp.MustCompile(`oom-kill:constraint=(.*),nodemask=(.*),cpuset=(.*),mems_allowed=(.*),oom_memcg=(.*),task_memcg=(.*),task=(.*),pid=(.*),uid=(.*)`)
|
||||
lastLineRegexp = regexp.MustCompile(`Killed process ([0-9]+) \((.+)\)`)
|
||||
firstLineRegexp = regexp.MustCompile(`invoked oom-killer:`)
|
||||
)
|
||||
|
||||
// OomParser wraps a kmsgparser in order to extract OOM events from the
|
||||
// individual kernel ring buffer messages.
|
||||
type OomParser struct {
|
||||
parser kmsgparser.Parser
|
||||
}
|
||||
|
||||
// struct that contains information related to an OOM kill instance
|
||||
type OomInstance struct {
|
||||
// process id of the killed process
|
||||
Pid int
|
||||
// the name of the killed process
|
||||
ProcessName string
|
||||
// the time that the process was reported to be killed,
|
||||
// accurate to the minute
|
||||
TimeOfDeath time.Time
|
||||
// the absolute name of the container that OOMed
|
||||
ContainerName string
|
||||
// the absolute name of the container that was killed
|
||||
// due to the OOM.
|
||||
VictimContainerName string
|
||||
// the constraint that triggered the OOM. One of CONSTRAINT_NONE,
|
||||
// CONSTRAINT_CPUSET, CONSTRAINT_MEMORY_POLICY, CONSTRAINT_MEMCG
|
||||
Constraint string
|
||||
}
|
||||
|
||||
// gets the container name from a line and adds it to the oomInstance.
|
||||
func getLegacyContainerName(line string, currentOomInstance *OomInstance) error {
|
||||
parsedLine := legacyContainerRegexp.FindStringSubmatch(line)
|
||||
if parsedLine == nil {
|
||||
return nil
|
||||
}
|
||||
currentOomInstance.ContainerName = path.Join("/", parsedLine[1])
|
||||
currentOomInstance.VictimContainerName = path.Join("/", parsedLine[2])
|
||||
return nil
|
||||
}
|
||||
|
||||
// gets the container name from a line and adds it to the oomInstance.
|
||||
func getContainerName(line string, currentOomInstance *OomInstance) (bool, error) {
|
||||
parsedLine := containerRegexp.FindStringSubmatch(line)
|
||||
if parsedLine == nil {
|
||||
// Fall back to the legacy format if it isn't found here.
|
||||
return false, getLegacyContainerName(line, currentOomInstance)
|
||||
}
|
||||
currentOomInstance.ContainerName = parsedLine[6]
|
||||
currentOomInstance.VictimContainerName = parsedLine[5]
|
||||
currentOomInstance.Constraint = parsedLine[1]
|
||||
pid, err := strconv.Atoi(parsedLine[8])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
currentOomInstance.Pid = pid
|
||||
currentOomInstance.ProcessName = parsedLine[7]
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// gets the pid, name, and date from a line and adds it to oomInstance
|
||||
func getProcessNamePid(line string, currentOomInstance *OomInstance) (bool, error) {
|
||||
reList := lastLineRegexp.FindStringSubmatch(line)
|
||||
|
||||
if reList == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(reList[1])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
currentOomInstance.Pid = pid
|
||||
currentOomInstance.ProcessName = reList[2]
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// uses regex to see if line is the start of a kernel oom log
|
||||
func checkIfStartOfOomMessages(line string) bool {
|
||||
potentialOomStart := firstLineRegexp.MatchString(line)
|
||||
return potentialOomStart
|
||||
}
|
||||
|
||||
// StreamOoms writes to a provided a stream of OomInstance objects representing
|
||||
// OOM events that are found in the logs.
|
||||
// It will block and should be called from a goroutine.
|
||||
func (p *OomParser) StreamOoms(outStream chan<- *OomInstance) {
|
||||
kmsgEntries := p.parser.Parse()
|
||||
defer p.parser.Close()
|
||||
|
||||
for msg := range kmsgEntries {
|
||||
isOomMessage := checkIfStartOfOomMessages(msg.Message)
|
||||
if isOomMessage {
|
||||
oomCurrentInstance := &OomInstance{
|
||||
ContainerName: "/",
|
||||
VictimContainerName: "/",
|
||||
TimeOfDeath: msg.Timestamp,
|
||||
}
|
||||
for msg := range kmsgEntries {
|
||||
finished, err := getContainerName(msg.Message, oomCurrentInstance)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !finished {
|
||||
finished, err = getProcessNamePid(msg.Message, oomCurrentInstance)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
if finished {
|
||||
oomCurrentInstance.TimeOfDeath = msg.Timestamp
|
||||
break
|
||||
}
|
||||
}
|
||||
outStream <- oomCurrentInstance
|
||||
}
|
||||
}
|
||||
// Should not happen
|
||||
klog.Errorf("exiting analyzeLines. OOM events will not be reported.")
|
||||
}
|
||||
|
||||
// initializes an OomParser object. Returns an OomParser object and an error.
|
||||
func New() (*OomParser, error) {
|
||||
parser, err := kmsgparser.NewParser()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parser.SetLogger(glogAdapter{})
|
||||
return &OomParser{parser: parser}, nil
|
||||
}
|
||||
|
||||
type glogAdapter struct{}
|
||||
|
||||
var _ kmsgparser.Logger = glogAdapter{}
|
||||
|
||||
func (glogAdapter) Infof(format string, args ...interface{}) {
|
||||
klog.V(4).Infof(format, args...)
|
||||
}
|
||||
func (glogAdapter) Warningf(format string, args ...interface{}) {
|
||||
klog.V(2).Infof(format, args...)
|
||||
}
|
||||
func (glogAdapter) Errorf(format string, args ...interface{}) {
|
||||
klog.Warningf(format, args...)
|
||||
}
|
24
vendor/github.com/google/cadvisor/utils/path.go
generated
vendored
Normal file
24
vendor/github.com/google/cadvisor/utils/path.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import "os"
|
||||
|
||||
func FileExists(file string) bool {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
603
vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
Normal file
603
vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
Normal file
@ -0,0 +1,603 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
blockDir = "/sys/block"
|
||||
cacheDir = "/sys/devices/system/cpu/cpu"
|
||||
netDir = "/sys/class/net"
|
||||
dmiDir = "/sys/class/dmi"
|
||||
ppcDevTree = "/proc/device-tree"
|
||||
s390xDevTree = "/etc" // s390/s390x changes
|
||||
|
||||
meminfoFile = "meminfo"
|
||||
|
||||
distanceFile = "distance"
|
||||
|
||||
sysFsCPUTopology = "topology"
|
||||
|
||||
// CPUPhysicalPackageID is a physical package id of cpu#. Typically corresponds to a physical socket number,
|
||||
// but the actual value is architecture and platform dependent.
|
||||
CPUPhysicalPackageID = "physical_package_id"
|
||||
// CPUCoreID is the CPU core ID of cpu#. Typically it is the hardware platform's identifier
|
||||
// (rather than the kernel's). The actual value is architecture and platform dependent.
|
||||
CPUCoreID = "core_id"
|
||||
|
||||
coreIDFilePath = "/" + sysFsCPUTopology + "/core_id"
|
||||
packageIDFilePath = "/" + sysFsCPUTopology + "/physical_package_id"
|
||||
bookIDFilePath = "/" + sysFsCPUTopology + "/book_id"
|
||||
drawerIDFilePath = "/" + sysFsCPUTopology + "/drawer_id"
|
||||
|
||||
// memory size calculations
|
||||
|
||||
cpuDirPattern = "cpu*[0-9]"
|
||||
nodeDirPattern = "node*[0-9]"
|
||||
|
||||
//HugePagesNrFile name of nr_hugepages file in sysfs
|
||||
HugePagesNrFile = "nr_hugepages"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeDir = "/sys/devices/system/node/"
|
||||
)
|
||||
|
||||
type CacheInfo struct {
|
||||
// cache id
|
||||
Id int
|
||||
// size in bytes
|
||||
Size uint64
|
||||
// cache type - instruction, data, unified
|
||||
Type string
|
||||
// distance from cpus in a multi-level hierarchy
|
||||
Level int
|
||||
// number of cpus that can access this cache.
|
||||
Cpus int
|
||||
}
|
||||
|
||||
// Abstracts the lowest level calls to sysfs.
|
||||
type SysFs interface {
|
||||
// Get NUMA nodes paths
|
||||
GetNodesPaths() ([]string, error)
|
||||
// Get paths to CPUs in provided directory e.g. /sys/devices/system/node/node0 or /sys/devices/system/cpu
|
||||
GetCPUsPaths(cpusPath string) ([]string, error)
|
||||
// Get physical core id for specified CPU
|
||||
GetCoreID(coreIDFilePath string) (string, error)
|
||||
// Get physical package id for specified CPU
|
||||
GetCPUPhysicalPackageID(cpuPath string) (string, error)
|
||||
// Get book id for specified CPU
|
||||
GetBookID(cpuPath string) (string, error)
|
||||
// Get drawer id for specified CPU
|
||||
GetDrawerID(cpuPath string) (string, error)
|
||||
// Get total memory for specified NUMA node
|
||||
GetMemInfo(nodeDir string) (string, error)
|
||||
// Get hugepages from specified directory
|
||||
GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error)
|
||||
// Get hugepage_nr from specified directory
|
||||
GetHugePagesNr(hugePagesDirectory string, hugePageName string) (string, error)
|
||||
// Get directory information for available block devices.
|
||||
GetBlockDevices() ([]os.FileInfo, error)
|
||||
// Get Size of a given block device.
|
||||
GetBlockDeviceSize(string) (string, error)
|
||||
// Get scheduler type for the block device.
|
||||
GetBlockDeviceScheduler(string) (string, error)
|
||||
// Get device major:minor number string.
|
||||
GetBlockDeviceNumbers(string) (string, error)
|
||||
// Is the device "hidden" (meaning will not have a device handle)
|
||||
// This is the case with native nvme multipathing.
|
||||
IsBlockDeviceHidden(string) (bool, error)
|
||||
|
||||
GetNetworkDevices() ([]os.FileInfo, error)
|
||||
GetNetworkAddress(string) (string, error)
|
||||
GetNetworkMtu(string) (string, error)
|
||||
GetNetworkSpeed(string) (string, error)
|
||||
GetNetworkStatValue(dev string, stat string) (uint64, error)
|
||||
|
||||
// Get directory information for available caches accessible to given cpu.
|
||||
GetCaches(id int) ([]os.FileInfo, error)
|
||||
// Get information for a cache accessible from the given cpu.
|
||||
GetCacheInfo(cpu int, cache string) (CacheInfo, error)
|
||||
|
||||
GetSystemUUID() (string, error)
|
||||
|
||||
// GetDistances returns distance array
|
||||
GetDistances(string) (string, error)
|
||||
|
||||
// IsCPUOnline determines if CPU status from kernel hotplug machanism standpoint.
|
||||
// See: https://www.kernel.org/doc/html/latest/core-api/cpu_hotplug.html
|
||||
IsCPUOnline(dir string) bool
|
||||
}
|
||||
|
||||
type realSysFs struct {
|
||||
cpuPath string
|
||||
}
|
||||
|
||||
func NewRealSysFs() SysFs {
|
||||
return &realSysFs{
|
||||
cpuPath: "/sys/devices/system/cpu",
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNodesPaths() ([]string, error) {
|
||||
pathPattern := fmt.Sprintf("%s%s", nodeDir, nodeDirPattern)
|
||||
return filepath.Glob(pathPattern)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCPUsPaths(cpusPath string) ([]string, error) {
|
||||
pathPattern := fmt.Sprintf("%s/%s", cpusPath, cpuDirPattern)
|
||||
return filepath.Glob(pathPattern)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCoreID(cpuPath string) (string, error) {
|
||||
coreIDFilePath := fmt.Sprintf("%s%s", cpuPath, coreIDFilePath)
|
||||
coreID, err := os.ReadFile(coreIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(coreID)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
|
||||
packageIDFilePath := fmt.Sprintf("%s%s", cpuPath, packageIDFilePath)
|
||||
packageID, err := os.ReadFile(packageIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(packageID)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBookID(cpuPath string) (string, error) {
|
||||
bookIDFilePath := fmt.Sprintf("%s%s", cpuPath, bookIDFilePath)
|
||||
bookID, err := os.ReadFile(bookIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(bookID)), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetDrawerID(cpuPath string) (string, error) {
|
||||
drawerIDFilePath := fmt.Sprintf("%s%s", cpuPath, drawerIDFilePath)
|
||||
drawerID, err := os.ReadFile(drawerIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(drawerID)), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetMemInfo(nodePath string) (string, error) {
|
||||
meminfoPath := fmt.Sprintf("%s/%s", nodePath, meminfoFile)
|
||||
meminfo, err := os.ReadFile(meminfoPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(meminfo)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetDistances(nodePath string) (string, error) {
|
||||
distancePath := fmt.Sprintf("%s/%s", nodePath, distanceFile)
|
||||
distance, err := os.ReadFile(distancePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(distance)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error) {
|
||||
dirs, err := os.ReadDir(hugePagesDirectory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toFileInfo(dirs)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
|
||||
hugePageFilePath := fmt.Sprintf("%s%s/%s", hugepagesDirectory, hugePageName, HugePagesNrFile)
|
||||
hugePageFile, err := os.ReadFile(hugePageFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(hugePageFile)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDevices() ([]os.FileInfo, error) {
|
||||
dirs, err := os.ReadDir(blockDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toFileInfo(dirs)
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
|
||||
dev, err := os.ReadFile(path.Join(blockDir, name, "/dev"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(dev), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) IsBlockDeviceHidden(name string) (bool, error) {
|
||||
// See: https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-block
|
||||
// https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
|
||||
// - c8487d854ba5 ("lsblk: Ignore hidden devices")
|
||||
devHiddenPath := path.Join(blockDir, name, "/hidden")
|
||||
hidden, err := os.ReadFile(devHiddenPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// older OS may not have /hidden sysfs entry, so for sure
|
||||
// it is not a hidden device...
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read %s: %w", devHiddenPath, err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(hidden)) == "1", nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
|
||||
sched, err := os.ReadFile(path.Join(blockDir, name, "/queue/scheduler"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(sched), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetBlockDeviceSize(name string) (string, error) {
|
||||
size, err := os.ReadFile(path.Join(blockDir, name, "/size"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(size), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
|
||||
dirs, err := os.ReadDir(netDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := toFileInfo(dirs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Filter out non-directory & non-symlink files
|
||||
filtered := []os.FileInfo{}
|
||||
for _, f := range files {
|
||||
if f.Mode()|os.ModeSymlink != 0 {
|
||||
f, err = os.Stat(path.Join(netDir, f.Name()))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if f.IsDir() {
|
||||
filtered = append(filtered, f)
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkAddress(name string) (string, error) {
|
||||
address, err := os.ReadFile(path.Join(netDir, name, "/address"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(address), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkMtu(name string) (string, error) {
|
||||
mtu, err := os.ReadFile(path.Join(netDir, name, "/mtu"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(mtu), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkSpeed(name string) (string, error) {
|
||||
speed, err := os.ReadFile(path.Join(netDir, name, "/speed"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(speed), nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, error) {
|
||||
statPath := path.Join(netDir, dev, "/statistics", stat)
|
||||
out, err := os.ReadFile(statPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read stat from %q for device %q", statPath, dev)
|
||||
}
|
||||
var s uint64
|
||||
n, err := fmt.Sscanf(string(out), "%d", &s)
|
||||
if err != nil || n != 1 {
|
||||
return 0, fmt.Errorf("could not parse value from %q for file %s", string(out), statPath)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCaches(id int) ([]os.FileInfo, error) {
|
||||
cpuPath := fmt.Sprintf("%s%d/cache", cacheDir, id)
|
||||
dir, err := os.ReadDir(cpuPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toFileInfo(dir)
|
||||
}
|
||||
|
||||
func toFileInfo(dirs []os.DirEntry) ([]os.FileInfo, error) {
|
||||
info := []os.FileInfo{}
|
||||
for _, dir := range dirs {
|
||||
fI, err := dir.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = append(info, fI)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func bitCount(i uint64) (count int) {
|
||||
for i != 0 {
|
||||
if i&1 == 1 {
|
||||
count++
|
||||
}
|
||||
i >>= 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCPUCount(cache string) (count int, err error) {
|
||||
out, err := os.ReadFile(path.Join(cache, "/shared_cpu_map"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
masks := strings.Split(string(out), ",")
|
||||
for _, mask := range masks {
|
||||
// convert hex string to uint64
|
||||
m, err := strconv.ParseUint(strings.TrimSpace(mask), 16, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse cpu map %q: %v", string(out), err)
|
||||
}
|
||||
count += bitCount(m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetCacheInfo(cpu int, name string) (CacheInfo, error) {
|
||||
cachePath := fmt.Sprintf("%s%d/cache/%s", cacheDir, cpu, name)
|
||||
var id int
|
||||
if runtime.GOARCH != "s390x" {
|
||||
out, err := os.ReadFile(path.Join(cachePath, "/id"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
n, err := fmt.Sscanf(string(out), "%d", &id)
|
||||
if err != nil || n != 1 {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := os.ReadFile(path.Join(cachePath, "/size"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
var size uint64
|
||||
n, err := fmt.Sscanf(string(out), "%dK", &size)
|
||||
if err != nil || n != 1 {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
// convert to bytes
|
||||
size = size * 1024
|
||||
out, err = os.ReadFile(path.Join(cachePath, "/level"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
var level int
|
||||
n, err = fmt.Sscanf(string(out), "%d", &level)
|
||||
if err != nil || n != 1 {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
|
||||
out, err = os.ReadFile(path.Join(cachePath, "/type"))
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
cacheType := strings.TrimSpace(string(out))
|
||||
cpuCount, err := getCPUCount(cachePath)
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
return CacheInfo{
|
||||
Id: id,
|
||||
Size: size,
|
||||
Level: level,
|
||||
Type: cacheType,
|
||||
Cpus: cpuCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetSystemUUID() (string, error) {
|
||||
if id, err := os.ReadFile(path.Join(dmiDir, "id", "product_uuid")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
} else if id, err = os.ReadFile(path.Join(ppcDevTree, "system-id")); err == nil {
|
||||
return strings.TrimSpace(strings.TrimRight(string(id), "\000")), nil
|
||||
} else if id, err = os.ReadFile(path.Join(ppcDevTree, "vm,uuid")); err == nil {
|
||||
return strings.TrimSpace(strings.TrimRight(string(id), "\000")), nil
|
||||
} else if id, err = os.ReadFile(path.Join(s390xDevTree, "machine-id")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *realSysFs) IsCPUOnline(cpuPath string) bool {
|
||||
cpuOnlinePath, err := filepath.Abs(fs.cpuPath + "/online")
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get absolute path for %s", cpuPath)
|
||||
return false
|
||||
}
|
||||
|
||||
// Quick check to determine if file exists: if it does not then kernel CPU hotplug is disabled and all CPUs are online.
|
||||
_, err = os.Stat(cpuOnlinePath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to stat %s: %s", cpuOnlinePath, err)
|
||||
}
|
||||
|
||||
cpuID, err := getCPUID(cpuPath)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get CPU ID from path %s: %s", cpuPath, err)
|
||||
return false
|
||||
}
|
||||
|
||||
isOnline, err := isCPUOnline(cpuOnlinePath, cpuID)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get online CPUs list: %s", err)
|
||||
return false
|
||||
}
|
||||
return isOnline
|
||||
}
|
||||
|
||||
func getCPUID(dir string) (uint16, error) {
|
||||
regex := regexp.MustCompile("cpu([0-9]+)")
|
||||
matches := regex.FindStringSubmatch(dir)
|
||||
if len(matches) == 2 {
|
||||
id, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(id), nil
|
||||
}
|
||||
return 0, fmt.Errorf("can't get CPU ID from %s", dir)
|
||||
}
|
||||
|
||||
// isCPUOnline is copied from github.com/opencontainers/runc/libcontainer/cgroups/fs and modified to suite cAdvisor
|
||||
// needs as Apache 2.0 license allows.
|
||||
// It parses CPU list (such as: 0,3-5,10) into a struct that allows to determine quickly if CPU or particular ID is online.
|
||||
// see: https://github.com/opencontainers/runc/blob/ab27e12cebf148aa5d1ee3ad13d9fc7ae12bf0b6/libcontainer/cgroups/fs/cpuset.go#L45
|
||||
func isCPUOnline(path string, cpuID uint16) (bool, error) {
|
||||
fileContent, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(fileContent) == 0 {
|
||||
return false, fmt.Errorf("%s found to be empty", path)
|
||||
}
|
||||
|
||||
cpuList := strings.TrimSpace(string(fileContent))
|
||||
for _, s := range strings.Split(cpuList, ",") {
|
||||
splitted := strings.SplitN(s, "-", 3)
|
||||
switch len(splitted) {
|
||||
case 3:
|
||||
return false, fmt.Errorf("invalid values in %s", path)
|
||||
case 2:
|
||||
min, err := strconv.ParseUint(splitted[0], 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
max, err := strconv.ParseUint(splitted[1], 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if min > max {
|
||||
return false, fmt.Errorf("invalid values in %s", path)
|
||||
}
|
||||
// Return true, if the CPU under consideration is in the range of online CPUs.
|
||||
if cpuID >= uint16(min) && cpuID <= uint16(max) {
|
||||
return true, nil
|
||||
}
|
||||
case 1:
|
||||
value, err := strconv.ParseUint(s, 10, 16)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if uint16(value) == cpuID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Looks for sysfs cpu path containing given CPU property, e.g. core_id or physical_package_id
|
||||
// and returns number of unique values of given property, exemplary usage: getting number of CPU physical cores
|
||||
func GetUniqueCPUPropertyCount(cpuAttributesPath string, propertyName string) int {
|
||||
absCPUAttributesPath, err := filepath.Abs(cpuAttributesPath)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot make %s absolute", cpuAttributesPath)
|
||||
return 0
|
||||
}
|
||||
pathPattern := absCPUAttributesPath + "/cpu*[0-9]"
|
||||
sysCPUPaths, err := filepath.Glob(pathPattern)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot find files matching pattern (pathPattern: %s), number of unique %s set to 0", pathPattern, propertyName)
|
||||
return 0
|
||||
}
|
||||
cpuOnlinePath, err := filepath.Abs(cpuAttributesPath + "/online")
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get absolute path for %s", cpuAttributesPath+"/../online")
|
||||
return 0
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get online CPUs list: %s", err)
|
||||
return 0
|
||||
}
|
||||
uniques := make(map[string]bool)
|
||||
for _, sysCPUPath := range sysCPUPaths {
|
||||
cpuID, err := getCPUID(sysCPUPath)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get CPU ID from path %s: %s", sysCPUPath, err)
|
||||
return 0
|
||||
}
|
||||
isOnline, err := isCPUOnline(cpuOnlinePath, cpuID)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
klog.V(1).Infof("Unable to determine CPU online state: %s", err)
|
||||
continue
|
||||
}
|
||||
if !isOnline && !os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
propertyPath := filepath.Join(sysCPUPath, sysFsCPUTopology, propertyName)
|
||||
propertyVal, err := os.ReadFile(propertyPath)
|
||||
if err != nil {
|
||||
klog.Warningf("Cannot open %s, assuming 0 for %s of CPU %d", propertyPath, propertyName, cpuID)
|
||||
propertyVal = []byte("0")
|
||||
}
|
||||
packagePath := filepath.Join(sysCPUPath, sysFsCPUTopology, CPUPhysicalPackageID)
|
||||
packageVal, err := os.ReadFile(packagePath)
|
||||
if err != nil {
|
||||
klog.Warningf("Cannot open %s, assuming 0 %s of CPU %d", packagePath, CPUPhysicalPackageID, cpuID)
|
||||
packageVal = []byte("0")
|
||||
|
||||
}
|
||||
uniques[fmt.Sprintf("%s_%s", bytes.TrimSpace(propertyVal), bytes.TrimSpace(packageVal))] = true
|
||||
}
|
||||
return len(uniques)
|
||||
}
|
20
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_notx86.go
generated
vendored
Normal file
20
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_notx86.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
//go:build !x86
|
||||
// +build !x86
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
var isX86 = false
|
20
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_x86.go
generated
vendored
Normal file
20
vendor/github.com/google/cadvisor/utils/sysfs/sysfs_x86.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
//go:build x86
|
||||
// +build x86
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysfs
|
||||
|
||||
var isX86 = true
|
614
vendor/github.com/google/cadvisor/utils/sysinfo/sysinfo.go
generated
vendored
Normal file
614
vendor/github.com/google/cadvisor/utils/sysinfo/sysinfo.go
generated
vendored
Normal file
@ -0,0 +1,614 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sysinfo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
schedulerRegExp = regexp.MustCompile(`.*\[(.*)\].*`)
|
||||
nodeDirRegExp = regexp.MustCompile(`node/node(\d*)`)
|
||||
cpuDirRegExp = regexp.MustCompile(`/cpu(\d+)`)
|
||||
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
|
||||
|
||||
cpusPath = "/sys/devices/system/cpu"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheLevel2 = 2
|
||||
hugepagesDir = "hugepages/"
|
||||
)
|
||||
|
||||
// Get information about block devices present on the system.
|
||||
// Uses the passed in system interface to retrieve the low level OS information.
|
||||
func GetBlockDeviceInfo(sysfs sysfs.SysFs) (map[string]info.DiskInfo, error) {
|
||||
disks, err := sysfs.GetBlockDevices()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskMap := make(map[string]info.DiskInfo)
|
||||
for _, disk := range disks {
|
||||
name := disk.Name()
|
||||
// Ignore non-disk devices.
|
||||
// TODO(rjnagal): Maybe just match hd, sd, and dm prefixes.
|
||||
if strings.HasPrefix(name, "loop") || strings.HasPrefix(name, "ram") || strings.HasPrefix(name, "sr") {
|
||||
continue
|
||||
}
|
||||
// Ignore "hidden" devices (i.e. nvme path device sysfs entries).
|
||||
// These devices are in the form of /dev/nvme$Xc$Yn$Z and will
|
||||
// not have a device handle (i.e. "hidden")
|
||||
isHidden, err := sysfs.IsBlockDeviceHidden(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isHidden {
|
||||
continue
|
||||
}
|
||||
diskInfo := info.DiskInfo{
|
||||
Name: name,
|
||||
}
|
||||
dev, err := sysfs.GetBlockDeviceNumbers(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, err := fmt.Sscanf(dev, "%d:%d", &diskInfo.Major, &diskInfo.Minor)
|
||||
if err != nil || n != 2 {
|
||||
return nil, fmt.Errorf("could not parse device numbers from %s for device %s", dev, name)
|
||||
}
|
||||
out, err := sysfs.GetBlockDeviceSize(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Remove trailing newline before conversion.
|
||||
size, err := strconv.ParseUint(strings.TrimSpace(out), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// size is in 512 bytes blocks.
|
||||
diskInfo.Size = size * 512
|
||||
|
||||
diskInfo.Scheduler = "none"
|
||||
blkSched, err := sysfs.GetBlockDeviceScheduler(name)
|
||||
if err == nil {
|
||||
matches := schedulerRegExp.FindSubmatch([]byte(blkSched))
|
||||
if len(matches) >= 2 {
|
||||
diskInfo.Scheduler = string(matches[1])
|
||||
}
|
||||
}
|
||||
device := fmt.Sprintf("%d:%d", diskInfo.Major, diskInfo.Minor)
|
||||
diskMap[device] = diskInfo
|
||||
}
|
||||
return diskMap, nil
|
||||
}
|
||||
|
||||
// Get information about network devices present on the system.
|
||||
func GetNetworkDevices(sysfs sysfs.SysFs) ([]info.NetInfo, error) {
|
||||
devs, err := sysfs.GetNetworkDevices()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netDevices := []info.NetInfo{}
|
||||
for _, dev := range devs {
|
||||
name := dev.Name()
|
||||
// Ignore docker, loopback, and veth devices.
|
||||
ignoredDevices := []string{"lo", "veth", "docker", "nerdctl"}
|
||||
ignored := false
|
||||
for _, prefix := range ignoredDevices {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
ignored = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if ignored {
|
||||
continue
|
||||
}
|
||||
address, err := sysfs.GetNetworkAddress(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mtuStr, err := sysfs.GetNetworkMtu(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var mtu int64
|
||||
n, err := fmt.Sscanf(mtuStr, "%d", &mtu)
|
||||
if err != nil || n != 1 {
|
||||
return nil, fmt.Errorf("could not parse mtu from %s for device %s", mtuStr, name)
|
||||
}
|
||||
netInfo := info.NetInfo{
|
||||
Name: name,
|
||||
MacAddress: strings.TrimSpace(address),
|
||||
Mtu: mtu,
|
||||
}
|
||||
speed, err := sysfs.GetNetworkSpeed(name)
|
||||
// Some devices don't set speed.
|
||||
if err == nil {
|
||||
var s int64
|
||||
n, err := fmt.Sscanf(speed, "%d", &s)
|
||||
if err != nil || n != 1 {
|
||||
return nil, fmt.Errorf("could not parse speed from %s for device %s", speed, name)
|
||||
}
|
||||
netInfo.Speed = s
|
||||
}
|
||||
netDevices = append(netDevices, netInfo)
|
||||
}
|
||||
return netDevices, nil
|
||||
}
|
||||
|
||||
// GetHugePagesInfo returns information about pre-allocated huge pages
|
||||
// hugepagesDirectory should be top directory of hugepages
|
||||
// Such as: /sys/kernel/mm/hugepages/
|
||||
func GetHugePagesInfo(sysFs sysfs.SysFs, hugepagesDirectory string) ([]info.HugePagesInfo, error) {
|
||||
var hugePagesInfo []info.HugePagesInfo
|
||||
files, err := sysFs.GetHugePagesInfo(hugepagesDirectory)
|
||||
if err != nil {
|
||||
// treat as non-fatal since kernels and machine can be
|
||||
// configured to disable hugepage support
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
for _, st := range files {
|
||||
nameArray := strings.Split(st.Name(), "-")
|
||||
pageSizeArray := strings.Split(nameArray[1], "kB")
|
||||
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
|
||||
val, err := sysFs.GetHugePagesNr(hugepagesDirectory, st.Name())
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
var numPages uint64
|
||||
// we use sscanf as the file as a new-line that trips up ParseUint
|
||||
// it returns the number of tokens successfully parsed, so if
|
||||
// n != 1, it means we were unable to parse a number from the file
|
||||
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
||||
if err != nil || n != 1 {
|
||||
return hugePagesInfo, fmt.Errorf("could not parse file nr_hugepage for %s, contents %q", st.Name(), string(val))
|
||||
}
|
||||
|
||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
||||
NumPages: numPages,
|
||||
PageSize: pageSize,
|
||||
})
|
||||
}
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
// GetNodesInfo returns information about NUMA nodes and their topology
|
||||
func GetNodesInfo(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
nodes := []info.Node{}
|
||||
allLogicalCoresCount := 0
|
||||
|
||||
nodesDirs, err := sysFs.GetNodesPaths()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if len(nodesDirs) == 0 {
|
||||
klog.V(4).Info("Nodes topology is not available, providing CPU topology")
|
||||
return getCPUTopology(sysFs)
|
||||
}
|
||||
|
||||
for _, nodeDir := range nodesDirs {
|
||||
id, err := getMatchedInt(nodeDirRegExp, nodeDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node := info.Node{Id: id}
|
||||
|
||||
cpuDirs, err := sysFs.GetCPUsPaths(nodeDir)
|
||||
if len(cpuDirs) == 0 {
|
||||
klog.Warningf("Found node without any CPU, nodeDir: %s, number of cpuDirs %d, err: %v", nodeDir, len(cpuDirs), err)
|
||||
} else {
|
||||
cores, err := getCoresInfo(sysFs, cpuDirs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node.Cores = cores
|
||||
for _, core := range cores {
|
||||
allLogicalCoresCount += len(core.Threads)
|
||||
}
|
||||
}
|
||||
|
||||
// On some Linux platforms(such as Arm64 guest kernel), cache info may not exist.
|
||||
// So, we should ignore error here.
|
||||
err = addCacheInfo(sysFs, &node)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Found node without cache information, nodeDir: %s", nodeDir)
|
||||
}
|
||||
|
||||
node.Memory, err = getNodeMemInfo(sysFs, nodeDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
hugepagesDirectory := fmt.Sprintf("%s/%s", nodeDir, hugepagesDir)
|
||||
node.HugePages, err = GetHugePagesInfo(sysFs, hugepagesDirectory)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
node.Distances, err = getDistances(sysFs, nodeDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, allLogicalCoresCount, err
|
||||
}
|
||||
|
||||
func getCPUTopology(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
nodes := []info.Node{}
|
||||
|
||||
cpusPaths, err := sysFs.GetCPUsPaths(cpusPath)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
cpusCount := len(cpusPaths)
|
||||
|
||||
if cpusCount == 0 {
|
||||
err = fmt.Errorf("Any CPU is not available, cpusPath: %s", cpusPath)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
cpusByPhysicalPackageID, err := getCpusByPhysicalPackageID(sysFs, cpusPaths)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if len(cpusByPhysicalPackageID) == 0 {
|
||||
klog.Warningf("Cannot read any physical package id for any CPU")
|
||||
return nil, cpusCount, nil
|
||||
}
|
||||
|
||||
for physicalPackageID, cpus := range cpusByPhysicalPackageID {
|
||||
node := info.Node{Id: physicalPackageID}
|
||||
|
||||
cores, err := getCoresInfo(sysFs, cpus)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node.Cores = cores
|
||||
|
||||
// On some Linux platforms(such as Arm64 guest kernel), cache info may not exist.
|
||||
// So, we should ignore error here.
|
||||
err = addCacheInfo(sysFs, &node)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Found cpu without cache information, cpuPath: %s", cpus)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, cpusCount, nil
|
||||
}
|
||||
|
||||
func getCpusByPhysicalPackageID(sysFs sysfs.SysFs, cpusPaths []string) (map[int][]string, error) {
|
||||
cpuPathsByPhysicalPackageID := make(map[int][]string)
|
||||
for _, cpuPath := range cpusPaths {
|
||||
|
||||
rawPhysicalPackageID, err := sysFs.GetCPUPhysicalPackageID(cpuPath)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read physical package id for %s, physical_package_id file does not exist, err: %s", cpuPath, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
physicalPackageID, err := strconv.Atoi(rawPhysicalPackageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := cpuPathsByPhysicalPackageID[physicalPackageID]; !ok {
|
||||
cpuPathsByPhysicalPackageID[physicalPackageID] = make([]string, 0)
|
||||
}
|
||||
|
||||
cpuPathsByPhysicalPackageID[physicalPackageID] = append(cpuPathsByPhysicalPackageID[physicalPackageID], cpuPath)
|
||||
}
|
||||
return cpuPathsByPhysicalPackageID, nil
|
||||
}
|
||||
|
||||
// addCacheInfo adds information about cache for NUMA node
|
||||
func addCacheInfo(sysFs sysfs.SysFs, node *info.Node) error {
|
||||
for coreID, core := range node.Cores {
|
||||
threadID := core.Threads[0] //get any thread for core
|
||||
caches, err := GetCacheInfo(sysFs, threadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numThreadsPerCore := len(core.Threads)
|
||||
numThreadsPerNode := len(node.Cores) * numThreadsPerCore
|
||||
|
||||
for _, cache := range caches {
|
||||
c := info.Cache{
|
||||
Id: cache.Id,
|
||||
Size: cache.Size,
|
||||
Level: cache.Level,
|
||||
Type: cache.Type,
|
||||
}
|
||||
if cache.Level > cacheLevel2 {
|
||||
if cache.Cpus == numThreadsPerNode {
|
||||
// Add a node level cache.
|
||||
cacheFound := false
|
||||
for _, nodeCache := range node.Caches {
|
||||
if nodeCache == c {
|
||||
cacheFound = true
|
||||
}
|
||||
}
|
||||
if !cacheFound {
|
||||
node.Caches = append(node.Caches, c)
|
||||
}
|
||||
} else {
|
||||
// Add uncore cache, for architecture in which l3 cache only shared among some cores.
|
||||
uncoreCacheFound := false
|
||||
for _, uncoreCache := range node.Cores[coreID].UncoreCaches {
|
||||
if uncoreCache == c {
|
||||
uncoreCacheFound = true
|
||||
}
|
||||
}
|
||||
if !uncoreCacheFound {
|
||||
node.Cores[coreID].UncoreCaches = append(node.Cores[coreID].UncoreCaches, c)
|
||||
}
|
||||
}
|
||||
} else if cache.Cpus == numThreadsPerCore {
|
||||
// Add core level cache
|
||||
node.Cores[coreID].Caches = append(node.Cores[coreID].Caches, c)
|
||||
}
|
||||
// Ignore unknown caches.
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNodeMemInfo returns information about total memory for NUMA node
|
||||
func getNodeMemInfo(sysFs sysfs.SysFs, nodeDir string) (uint64, error) {
|
||||
rawMem, err := sysFs.GetMemInfo(nodeDir)
|
||||
if err != nil {
|
||||
//Ignore if per-node info is not available.
|
||||
klog.Warningf("Found node without memory information, nodeDir: %s", nodeDir)
|
||||
return 0, nil
|
||||
}
|
||||
matches := memoryCapacityRegexp.FindStringSubmatch(rawMem)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("failed to match regexp in output: %q", string(rawMem))
|
||||
}
|
||||
memory, err := strconv.ParseUint(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
memory = memory * 1024 // Convert to bytes
|
||||
return uint64(memory), nil
|
||||
}
|
||||
|
||||
// getDistances returns information about distances between NUMA nodes
|
||||
func getDistances(sysFs sysfs.SysFs, nodeDir string) ([]uint64, error) {
|
||||
rawDistance, err := sysFs.GetDistances(nodeDir)
|
||||
if err != nil {
|
||||
//Ignore if per-node info is not available.
|
||||
klog.Warningf("Found node without distance information, nodeDir: %s", nodeDir)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
distances := []uint64{}
|
||||
for _, distance := range strings.Split(rawDistance, " ") {
|
||||
distanceUint, err := strconv.ParseUint(distance, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot convert %s to int", distance)
|
||||
}
|
||||
distances = append(distances, distanceUint)
|
||||
}
|
||||
|
||||
return distances, nil
|
||||
}
|
||||
|
||||
// getCoresInfo returns information about physical cores
|
||||
func getCoresInfo(sysFs sysfs.SysFs, cpuDirs []string) ([]info.Core, error) {
|
||||
cores := make([]info.Core, 0, len(cpuDirs))
|
||||
for _, cpuDir := range cpuDirs {
|
||||
cpuID, err := getMatchedInt(cpuDirRegExp, cpuDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected format of CPU directory, cpuDirRegExp %s, cpuDir: %s", cpuDirRegExp, cpuDir)
|
||||
}
|
||||
if !sysFs.IsCPUOnline(cpuDir) {
|
||||
continue
|
||||
}
|
||||
|
||||
rawPhysicalID, err := sysFs.GetCoreID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read core id for %s, core_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
physicalID, err := strconv.Atoi(rawPhysicalID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawPhysicalPackageID, err := sysFs.GetCPUPhysicalPackageID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read physical package id for %s, physical_package_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
physicalPackageID, err := strconv.Atoi(rawPhysicalPackageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bookID, drawerID string
|
||||
// s390/s390x additional cpu topology levels
|
||||
if runtime.GOARCH == "s390x" {
|
||||
bookID, err = sysFs.GetBookID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read book id for %s, book_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drawerID, err = sysFs.GetDrawerID(cpuDir)
|
||||
if os.IsNotExist(err) {
|
||||
klog.Warningf("Cannot read drawer id for %s, drawer_id file does not exist, err: %s", cpuDir, err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
coreIDx := -1
|
||||
for id, core := range cores {
|
||||
if core.Id == physicalID && core.SocketID == physicalPackageID {
|
||||
// For s390x, we need to check the BookID and DrawerID match as well.
|
||||
if runtime.GOARCH != "s390x" || (core.BookID == bookID && core.DrawerID == drawerID) {
|
||||
coreIDx = id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if coreIDx == -1 {
|
||||
cores = append(cores, info.Core{})
|
||||
coreIDx = len(cores) - 1
|
||||
}
|
||||
desiredCore := &cores[coreIDx]
|
||||
|
||||
desiredCore.Id = physicalID
|
||||
desiredCore.SocketID = physicalPackageID
|
||||
desiredCore.BookID = bookID
|
||||
desiredCore.DrawerID = drawerID
|
||||
|
||||
if len(desiredCore.Threads) == 0 {
|
||||
desiredCore.Threads = []int{cpuID}
|
||||
} else {
|
||||
desiredCore.Threads = append(desiredCore.Threads, cpuID)
|
||||
}
|
||||
|
||||
}
|
||||
return cores, nil
|
||||
}
|
||||
|
||||
// GetCacheInfo return information about a cache accessible from the given cpu thread
|
||||
func GetCacheInfo(sysFs sysfs.SysFs, id int) ([]sysfs.CacheInfo, error) {
|
||||
caches, err := sysFs.GetCaches(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := []sysfs.CacheInfo{}
|
||||
for _, cache := range caches {
|
||||
if !strings.HasPrefix(cache.Name(), "index") {
|
||||
continue
|
||||
}
|
||||
cacheInfo, err := sysFs.GetCacheInfo(id, cache.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = append(info, cacheInfo)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func getNetworkStats(name string, sysFs sysfs.SysFs) (info.InterfaceStats, error) {
|
||||
var stats info.InterfaceStats
|
||||
var err error
|
||||
stats.Name = name
|
||||
stats.RxBytes, err = sysFs.GetNetworkStatValue(name, "rx_bytes")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.RxPackets, err = sysFs.GetNetworkStatValue(name, "rx_packets")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.RxErrors, err = sysFs.GetNetworkStatValue(name, "rx_errors")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.RxDropped, err = sysFs.GetNetworkStatValue(name, "rx_dropped")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxBytes, err = sysFs.GetNetworkStatValue(name, "tx_bytes")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxPackets, err = sysFs.GetNetworkStatValue(name, "tx_packets")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxErrors, err = sysFs.GetNetworkStatValue(name, "tx_errors")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.TxDropped, err = sysFs.GetNetworkStatValue(name, "tx_dropped")
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetSystemUUID(sysFs sysfs.SysFs) (string, error) {
|
||||
return sysFs.GetSystemUUID()
|
||||
}
|
||||
|
||||
func getMatchedInt(rgx *regexp.Regexp, str string) (int, error) {
|
||||
matches := rgx.FindStringSubmatch(str)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("failed to match regexp, str: %s", str)
|
||||
}
|
||||
valInt, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return valInt, nil
|
||||
}
|
||||
|
||||
// GetSocketFromCPU returns Socket ID of passed CPU. If is not present, returns -1.
|
||||
func GetSocketFromCPU(topology []info.Node, cpu int) int {
|
||||
for _, node := range topology {
|
||||
found, coreID := node.FindCoreByThread(cpu)
|
||||
if found {
|
||||
return node.Cores[coreID].SocketID
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// GetOnlineCPUs returns available cores.
|
||||
func GetOnlineCPUs(topology []info.Node) []int {
|
||||
onlineCPUs := make([]int, 0)
|
||||
for _, node := range topology {
|
||||
for _, core := range node.Cores {
|
||||
onlineCPUs = append(onlineCPUs, core.Threads...)
|
||||
}
|
||||
}
|
||||
return onlineCPUs
|
||||
}
|
164
vendor/github.com/google/cadvisor/utils/timed_store.go
generated
vendored
Normal file
164
vendor/github.com/google/cadvisor/utils/timed_store.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timedStoreDataSlice []timedStoreData
|
||||
|
||||
func (t timedStoreDataSlice) Less(i, j int) bool {
|
||||
return t[i].timestamp.Before(t[j].timestamp)
|
||||
}
|
||||
|
||||
func (t timedStoreDataSlice) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
|
||||
func (t timedStoreDataSlice) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
|
||||
// A time-based buffer for ContainerStats.
|
||||
// Holds information for a specific time period and/or a max number of items.
|
||||
type TimedStore struct {
|
||||
buffer timedStoreDataSlice
|
||||
age time.Duration
|
||||
maxItems int
|
||||
}
|
||||
|
||||
type timedStoreData struct {
|
||||
timestamp time.Time
|
||||
data interface{}
|
||||
}
|
||||
|
||||
// Returns a new thread-compatible TimedStore.
|
||||
// A maxItems value of -1 means no limit.
|
||||
func NewTimedStore(age time.Duration, maxItems int) *TimedStore {
|
||||
return &TimedStore{
|
||||
buffer: make(timedStoreDataSlice, 0),
|
||||
age: age,
|
||||
maxItems: maxItems,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an element to the start of the buffer (removing one from the end if necessary).
|
||||
func (s *TimedStore) Add(timestamp time.Time, item interface{}) {
|
||||
data := timedStoreData{
|
||||
timestamp: timestamp,
|
||||
data: item,
|
||||
}
|
||||
// Common case: data is added in order.
|
||||
if len(s.buffer) == 0 || !timestamp.Before(s.buffer[len(s.buffer)-1].timestamp) {
|
||||
s.buffer = append(s.buffer, data)
|
||||
} else {
|
||||
// Data is out of order; insert it in the correct position.
|
||||
index := sort.Search(len(s.buffer), func(index int) bool {
|
||||
return s.buffer[index].timestamp.After(timestamp)
|
||||
})
|
||||
s.buffer = append(s.buffer, timedStoreData{}) // Make room to shift the elements
|
||||
copy(s.buffer[index+1:], s.buffer[index:]) // Shift the elements over
|
||||
s.buffer[index] = data
|
||||
}
|
||||
|
||||
// Remove any elements before eviction time.
|
||||
// TODO(rjnagal): This is assuming that the added entry has timestamp close to now.
|
||||
evictTime := timestamp.Add(-s.age)
|
||||
index := sort.Search(len(s.buffer), func(index int) bool {
|
||||
return s.buffer[index].timestamp.After(evictTime)
|
||||
})
|
||||
if index < len(s.buffer) {
|
||||
s.buffer = s.buffer[index:]
|
||||
}
|
||||
|
||||
// Remove any elements if over our max size.
|
||||
if s.maxItems >= 0 && len(s.buffer) > s.maxItems {
|
||||
startIndex := len(s.buffer) - s.maxItems
|
||||
s.buffer = s.buffer[startIndex:]
|
||||
}
|
||||
}
|
||||
|
||||
// Returns up to maxResult elements in the specified time period (inclusive).
|
||||
// Results are from first to last. maxResults of -1 means no limit.
|
||||
func (s *TimedStore) InTimeRange(start, end time.Time, maxResults int) []interface{} {
|
||||
// No stats, return empty.
|
||||
if len(s.buffer) == 0 {
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
var startIndex int
|
||||
if start.IsZero() {
|
||||
// None specified, start at the beginning.
|
||||
startIndex = len(s.buffer) - 1
|
||||
} else {
|
||||
// Start is the index before the elements smaller than it. We do this by
|
||||
// finding the first element smaller than start and taking the index
|
||||
// before that element
|
||||
startIndex = sort.Search(len(s.buffer), func(index int) bool {
|
||||
// buffer[index] < start
|
||||
return s.getData(index).timestamp.Before(start)
|
||||
}) - 1
|
||||
// Check if start is after all the data we have.
|
||||
if startIndex < 0 {
|
||||
return []interface{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var endIndex int
|
||||
if end.IsZero() {
|
||||
// None specified, end with the latest stats.
|
||||
endIndex = 0
|
||||
} else {
|
||||
// End is the first index smaller than or equal to it (so, not larger).
|
||||
endIndex = sort.Search(len(s.buffer), func(index int) bool {
|
||||
// buffer[index] <= t -> !(buffer[index] > t)
|
||||
return !s.getData(index).timestamp.After(end)
|
||||
})
|
||||
// Check if end is before all the data we have.
|
||||
if endIndex == len(s.buffer) {
|
||||
return []interface{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Trim to maxResults size.
|
||||
numResults := startIndex - endIndex + 1
|
||||
if maxResults != -1 && numResults > maxResults {
|
||||
startIndex -= numResults - maxResults
|
||||
numResults = maxResults
|
||||
}
|
||||
|
||||
// Return in sorted timestamp order so from the "back" to "front".
|
||||
result := make([]interface{}, numResults)
|
||||
for i := 0; i < numResults; i++ {
|
||||
result[i] = s.Get(startIndex - i)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Gets the element at the specified index. Note that elements are output in LIFO order.
|
||||
func (s *TimedStore) Get(index int) interface{} {
|
||||
return s.getData(index).data
|
||||
}
|
||||
|
||||
// Gets the data at the specified index. Note that elements are output in LIFO order.
|
||||
func (s *TimedStore) getData(index int) timedStoreData {
|
||||
return s.buffer[len(s.buffer)-index-1]
|
||||
}
|
||||
|
||||
func (s *TimedStore) Size() int {
|
||||
return len(s.buffer)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user