mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Update to kube v1.17
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
327fcd1b1b
commit
3af1e26d7c
26
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package config
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
@ -54,6 +54,18 @@ const (
|
||||
// WatchChangeDetectionStrategy is a mode in which kubelet uses
|
||||
// watches to observe changes to objects that are in its interest.
|
||||
WatchChangeDetectionStrategy ResourceChangeDetectionStrategy = "Watch"
|
||||
// RestrictedTopologyManagerPolicy is a mode in which kubelet only allows
|
||||
// pods with optimal NUMA node alignment for requested resources
|
||||
RestrictedTopologyManagerPolicy = "restricted"
|
||||
// BestEffortTopologyManagerPolicy is a mode in which kubelet will favour
|
||||
// pods with NUMA alignment of CPU and device resources.
|
||||
BestEffortTopologyManagerPolicy = "best-effort"
|
||||
// NoneTopologyManager Policy is a mode in which kubelet has no knowledge
|
||||
// of NUMA alignment of a pod's CPU and device resources.
|
||||
NoneTopologyManagerPolicy = "none"
|
||||
// SingleNumaNodeTopologyManager Policy iis a mode in which kubelet only allows
|
||||
// pods with a single NUMA alignment of CPU and device resources.
|
||||
SingleNumaNodeTopologyManager = "single-numa-node"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -197,6 +209,9 @@ type KubeletConfiguration struct {
|
||||
// CPU Manager reconciliation period.
|
||||
// Requires the CPUManager feature gate to be enabled.
|
||||
CPUManagerReconcilePeriod metav1.Duration
|
||||
// TopologyManagerPolicy is the name of the policy to use.
|
||||
// Policies other than "none" require the TopologyManager feature gate to be enabled.
|
||||
TopologyManagerPolicy string
|
||||
// Map of QoS resource reservation percentages (memory only for now).
|
||||
// Requires the QOSReserved feature gate to be enabled.
|
||||
QOSReserved map[string]string
|
||||
@ -288,6 +303,11 @@ type KubeletConfiguration struct {
|
||||
ContainerLogMaxFiles int32
|
||||
// ConfigMapAndSecretChangeDetectionStrategy is a mode in which config map and secret managers are running.
|
||||
ConfigMapAndSecretChangeDetectionStrategy ResourceChangeDetectionStrategy
|
||||
// A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in *).
|
||||
// Unsafe sysctl groups are kernel.shm*, kernel.msg*, kernel.sem, fs.mqueue.*, and net.*.
|
||||
// These sysctls are namespaced but not allowed by default. For example: "kernel.msg*,net.ipv4.route.min_pmtu"
|
||||
// +optional
|
||||
AllowedUnsafeSysctls []string
|
||||
|
||||
/* the following fields are meant for Node Allocatable */
|
||||
|
||||
@ -311,6 +331,10 @@ type KubeletConfiguration struct {
|
||||
// This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`.
|
||||
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information.
|
||||
EnforceNodeAllocatable []string
|
||||
// This option specifies the cpu list reserved for the host level system threads and kubernetes related threads.
|
||||
// This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved.
|
||||
// This option overwrites CPUs provided by system-reserved and kube-reserved.
|
||||
ReservedSystemCPUs string
|
||||
}
|
||||
|
||||
type KubeletAuthorizationMode string
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
@ -161,6 +161,11 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AllowedUnsafeSysctls != nil {
|
||||
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.SystemReserved != nil {
|
||||
in, out := &in.SystemReserved, &out.SystemReserved
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/types.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/types.go
generated
vendored
@ -146,7 +146,7 @@ type ContainerStats struct {
|
||||
// User defined metrics that are exposed by containers in the pod. Typically, we expect only one container in the pod to be exposing user defined metrics. In the event of multiple containers exposing metrics, they will be combined here.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
UserDefinedMetrics []UserDefinedMetric `json:"userDefinedMetrics,omitmepty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||
UserDefinedMetrics []UserDefinedMetric `json:"userDefinedMetrics,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||
}
|
||||
|
||||
// PodReference contains enough information to locate the referenced pod.
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go
generated
vendored
@ -34,28 +34,21 @@ const (
|
||||
// and GA labels to ensure backward compatibility.
|
||||
// TODO: stop applying the beta Arch labels in Kubernetes 1.18.
|
||||
LabelArch = "beta.kubernetes.io/arch"
|
||||
|
||||
// GA versions of the legacy beta labels.
|
||||
// TODO: update kubelet and controllers to set both beta and GA labels, then export these constants
|
||||
labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone"
|
||||
labelZoneRegionGA = "failure-domain.kubernetes.io/region"
|
||||
labelInstanceTypeGA = "kubernetes.io/instance-type"
|
||||
)
|
||||
|
||||
var kubeletLabels = sets.NewString(
|
||||
v1.LabelHostname,
|
||||
v1.LabelZoneFailureDomainStable,
|
||||
v1.LabelZoneRegionStable,
|
||||
v1.LabelZoneFailureDomain,
|
||||
v1.LabelZoneRegion,
|
||||
v1.LabelInstanceType,
|
||||
v1.LabelInstanceTypeStable,
|
||||
v1.LabelOSStable,
|
||||
v1.LabelArchStable,
|
||||
|
||||
LabelOS,
|
||||
LabelArch,
|
||||
|
||||
labelZoneFailureDomainGA,
|
||||
labelZoneRegionGA,
|
||||
labelInstanceTypeGA,
|
||||
)
|
||||
|
||||
var kubeletLabelNamespaces = sets.NewString(
|
||||
|
130
vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go
generated
vendored
130
vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package checkpoint
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||
)
|
||||
|
||||
const (
|
||||
// Delimiter used on checkpoints written to disk
|
||||
delimiter = "_"
|
||||
podPrefix = "Pod"
|
||||
)
|
||||
|
||||
// PodCheckpoint defines the operations to retrieve pod
|
||||
type PodCheckpoint interface {
|
||||
checkpointmanager.Checkpoint
|
||||
GetPod() *v1.Pod
|
||||
}
|
||||
|
||||
// Data to be stored as checkpoint
|
||||
type Data struct {
|
||||
Pod *v1.Pod
|
||||
Checksum checksum.Checksum
|
||||
}
|
||||
|
||||
// NewPodCheckpoint returns new pod checkpoint
|
||||
func NewPodCheckpoint(pod *v1.Pod) PodCheckpoint {
|
||||
return &Data{Pod: pod}
|
||||
}
|
||||
|
||||
// MarshalCheckpoint returns marshalled data
|
||||
func (cp *Data) MarshalCheckpoint() ([]byte, error) {
|
||||
cp.Checksum = checksum.New(*cp.Pod)
|
||||
return json.Marshal(*cp)
|
||||
}
|
||||
|
||||
// UnmarshalCheckpoint returns unmarshalled data
|
||||
func (cp *Data) UnmarshalCheckpoint(blob []byte) error {
|
||||
return json.Unmarshal(blob, cp)
|
||||
}
|
||||
|
||||
// VerifyChecksum verifies that passed checksum is same as calculated checksum
|
||||
func (cp *Data) VerifyChecksum() error {
|
||||
return cp.Checksum.Verify(*cp.Pod)
|
||||
}
|
||||
|
||||
// GetPod retrieves the pod from the checkpoint
|
||||
func (cp *Data) GetPod() *v1.Pod {
|
||||
return cp.Pod
|
||||
}
|
||||
|
||||
// checkAnnotations will validate the checkpoint annotations exist on the Pod
|
||||
func checkAnnotations(pod *v1.Pod) bool {
|
||||
if podAnnotations := pod.GetAnnotations(); podAnnotations != nil {
|
||||
if podAnnotations[core.BootstrapCheckpointAnnotationKey] == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//getPodKey returns the full qualified path for the pod checkpoint
|
||||
func getPodKey(pod *v1.Pod) string {
|
||||
return fmt.Sprintf("%s%s%v.yaml", podPrefix, delimiter, pod.GetUID())
|
||||
}
|
||||
|
||||
// LoadPods Loads All Checkpoints from disk
|
||||
func LoadPods(cpm checkpointmanager.CheckpointManager) ([]*v1.Pod, error) {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
|
||||
var err error
|
||||
checkpointKeys := []string{}
|
||||
checkpointKeys, err = cpm.ListCheckpoints()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list checkpoints: %v", err)
|
||||
}
|
||||
|
||||
for _, key := range checkpointKeys {
|
||||
checkpoint := NewPodCheckpoint(nil)
|
||||
err := cpm.GetCheckpoint(key, checkpoint)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to retrieve checkpoint for pod %q: %v", key, err)
|
||||
continue
|
||||
}
|
||||
pods = append(pods, checkpoint.GetPod())
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
// WritePod a checkpoint to a file on disk if annotation is present
|
||||
func WritePod(cpm checkpointmanager.CheckpointManager, pod *v1.Pod) error {
|
||||
var err error
|
||||
if checkAnnotations(pod) {
|
||||
data := NewPodCheckpoint(pod)
|
||||
err = cpm.CreateCheckpoint(getPodKey(pod), data)
|
||||
} else {
|
||||
// This is to handle an edge where a pod update could remove
|
||||
// an annotation and the checkpoint should then be removed.
|
||||
err = cpm.RemoveCheckpoint(getPodKey(pod))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeletePod deletes a checkpoint from disk if present
|
||||
func DeletePod(cpm checkpointmanager.CheckpointManager, pod *v1.Pod) error {
|
||||
return cpm.RemoveCheckpoint(getPodKey(pod))
|
||||
}
|
110
vendor/k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checkpoint_manager.go
generated
vendored
110
vendor/k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checkpoint_manager.go
generated
vendored
@ -1,110 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package checkpointmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
|
||||
utilstore "k8s.io/kubernetes/pkg/kubelet/util/store"
|
||||
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
// Checkpoint provides the process checkpoint data
|
||||
type Checkpoint interface {
|
||||
MarshalCheckpoint() ([]byte, error)
|
||||
UnmarshalCheckpoint(blob []byte) error
|
||||
VerifyChecksum() error
|
||||
}
|
||||
|
||||
// CheckpointManager provides the interface to manage checkpoint
|
||||
type CheckpointManager interface {
|
||||
// CreateCheckpoint persists checkpoint in CheckpointStore. checkpointKey is the key for utilstore to locate checkpoint.
|
||||
// For file backed utilstore, checkpointKey is the file name to write the checkpoint data.
|
||||
CreateCheckpoint(checkpointKey string, checkpoint Checkpoint) error
|
||||
// GetCheckpoint retrieves checkpoint from CheckpointStore.
|
||||
GetCheckpoint(checkpointKey string, checkpoint Checkpoint) error
|
||||
// WARNING: RemoveCheckpoint will not return error if checkpoint does not exist.
|
||||
RemoveCheckpoint(checkpointKey string) error
|
||||
// ListCheckpoint returns the list of existing checkpoints.
|
||||
ListCheckpoints() ([]string, error)
|
||||
}
|
||||
|
||||
// impl is an implementation of CheckpointManager. It persists checkpoint in CheckpointStore
|
||||
type impl struct {
|
||||
path string
|
||||
store utilstore.Store
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewCheckpointManager returns a new instance of a checkpoint manager
|
||||
func NewCheckpointManager(checkpointDir string) (CheckpointManager, error) {
|
||||
fstore, err := utilstore.NewFileStore(checkpointDir, utilfs.DefaultFs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &impl{path: checkpointDir, store: fstore}, nil
|
||||
}
|
||||
|
||||
// CreateCheckpoint persists checkpoint in CheckpointStore.
|
||||
func (manager *impl) CreateCheckpoint(checkpointKey string, checkpoint Checkpoint) error {
|
||||
manager.mutex.Lock()
|
||||
defer manager.mutex.Unlock()
|
||||
blob, err := checkpoint.MarshalCheckpoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return manager.store.Write(checkpointKey, blob)
|
||||
}
|
||||
|
||||
// GetCheckpoint retrieves checkpoint from CheckpointStore.
|
||||
func (manager *impl) GetCheckpoint(checkpointKey string, checkpoint Checkpoint) error {
|
||||
manager.mutex.Lock()
|
||||
defer manager.mutex.Unlock()
|
||||
blob, err := manager.store.Read(checkpointKey)
|
||||
if err != nil {
|
||||
if err == utilstore.ErrKeyNotFound {
|
||||
return errors.ErrCheckpointNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = checkpoint.UnmarshalCheckpoint(blob)
|
||||
if err == nil {
|
||||
err = checkpoint.VerifyChecksum()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveCheckpoint will not return error if checkpoint does not exist.
|
||||
func (manager *impl) RemoveCheckpoint(checkpointKey string) error {
|
||||
manager.mutex.Lock()
|
||||
defer manager.mutex.Unlock()
|
||||
return manager.store.Delete(checkpointKey)
|
||||
}
|
||||
|
||||
// ListCheckpoints returns the list of existing checkpoints.
|
||||
func (manager *impl) ListCheckpoints() ([]string, error) {
|
||||
manager.mutex.Lock()
|
||||
defer manager.mutex.Unlock()
|
||||
keys, err := manager.store.List()
|
||||
if err != nil {
|
||||
return []string{}, fmt.Errorf("failed to list checkpoint store: %v", err)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
47
vendor/k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum/checksum.go
generated
vendored
47
vendor/k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum/checksum.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package checksum
|
||||
|
||||
import (
|
||||
"hash/fnv"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
// Checksum is the data to be stored as checkpoint
|
||||
type Checksum uint64
|
||||
|
||||
// Verify verifies that passed checksum is same as calculated checksum
|
||||
func (cs Checksum) Verify(data interface{}) error {
|
||||
if cs != New(data) {
|
||||
return errors.ErrCorruptCheckpoint
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New returns the Checksum of checkpoint data
|
||||
func New(data interface{}) Checksum {
|
||||
return Checksum(getChecksum(data))
|
||||
}
|
||||
|
||||
// Get returns calculated checksum of checkpoint data
|
||||
func getChecksum(data interface{}) uint64 {
|
||||
hash := fnv.New32a()
|
||||
hashutil.DeepHashObject(hash, data)
|
||||
return uint64(hash.Sum32())
|
||||
}
|
25
vendor/k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors/errors.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors/errors.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errors
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ErrCorruptCheckpoint error is reported when checksum does not match
|
||||
var ErrCorruptCheckpoint = fmt.Errorf("checkpoint is corrupted")
|
||||
|
||||
// ErrCheckpointNotFound is reported when checkpoint is not found for a given key
|
||||
var ErrCheckpointNotFound = fmt.Errorf("checkpoint is not found")
|
149
vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go
generated
vendored
149
vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
// Get configmap by configmap namespace and name.
|
||||
GetConfigMap(namespace, name string) (*v1.ConfigMap, error)
|
||||
|
||||
// WARNING: Register/UnregisterPod functions should be efficient,
|
||||
// i.e. should not block on network operations.
|
||||
|
||||
// RegisterPod registers all configmaps from a given pod.
|
||||
RegisterPod(pod *v1.Pod)
|
||||
|
||||
// UnregisterPod unregisters configmaps from a given pod that are not
|
||||
// used by any other registered pod.
|
||||
UnregisterPod(pod *v1.Pod)
|
||||
}
|
||||
|
||||
// simpleConfigMapManager implements ConfigMap Manager interface with
|
||||
// simple operations to apiserver.
|
||||
type simpleConfigMapManager struct {
|
||||
kubeClient clientset.Interface
|
||||
}
|
||||
|
||||
func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager {
|
||||
return &simpleConfigMapManager{kubeClient: kubeClient}
|
||||
}
|
||||
|
||||
func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
|
||||
return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
func (s *simpleConfigMapManager) UnregisterPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
// configMapManager keeps a cache of all configmaps necessary
|
||||
// for registered pods. Different implementation of the store
|
||||
// may result in different semantics for freshness of configmaps
|
||||
// (e.g. ttl-based implementation vs watch-based implementation).
|
||||
type configMapManager struct {
|
||||
manager manager.Manager
|
||||
}
|
||||
|
||||
func (c *configMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
|
||||
object, err := c.manager.GetObject(namespace, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if configmap, ok := object.(*v1.ConfigMap); ok {
|
||||
return configmap, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected object type: %v", object)
|
||||
}
|
||||
|
||||
func (c *configMapManager) RegisterPod(pod *v1.Pod) {
|
||||
c.manager.RegisterPod(pod)
|
||||
}
|
||||
|
||||
func (c *configMapManager) UnregisterPod(pod *v1.Pod) {
|
||||
c.manager.UnregisterPod(pod)
|
||||
}
|
||||
|
||||
func getConfigMapNames(pod *v1.Pod) sets.String {
|
||||
result := sets.NewString()
|
||||
podutil.VisitPodConfigmapNames(pod, func(name string) bool {
|
||||
result.Insert(name)
|
||||
return true
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
const (
|
||||
defaultTTL = time.Minute
|
||||
)
|
||||
|
||||
// NewCachingConfigMapManager creates a manager that keeps a cache of all configmaps
|
||||
// necessary for registered pods.
|
||||
// It implement the following logic:
|
||||
// - whenever a pod is create or updated, the cached versions of all configmaps
|
||||
// are invalidated
|
||||
// - every GetObject() call tries to fetch the value from local cache; if it is
|
||||
// not there, invalidated or too old, we fetch it from apiserver and refresh the
|
||||
// value in cache; otherwise it is just fetched from cache
|
||||
func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager {
|
||||
getConfigMap := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
|
||||
return kubeClient.CoreV1().ConfigMaps(namespace).Get(name, opts)
|
||||
}
|
||||
configMapStore := manager.NewObjectStore(getConfigMap, clock.RealClock{}, getTTL, defaultTTL)
|
||||
return &configMapManager{
|
||||
manager: manager.NewCacheBasedManager(configMapStore, getConfigMapNames),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWatchingConfigMapManager creates a manager that keeps a cache of all configmaps
|
||||
// necessary for registered pods.
|
||||
// It implements the following logic:
|
||||
// - whenever a pod is created or updated, we start inidvidual watches for all
|
||||
// referenced objects that aren't referenced from other registered pods
|
||||
// - every GetObject() returns a value from local cache propagated via watches
|
||||
func NewWatchingConfigMapManager(kubeClient clientset.Interface) Manager {
|
||||
listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.CoreV1().ConfigMaps(namespace).List(opts)
|
||||
}
|
||||
watchConfigMap := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.CoreV1().ConfigMaps(namespace).Watch(opts)
|
||||
}
|
||||
newConfigMap := func() runtime.Object {
|
||||
return &v1.ConfigMap{}
|
||||
}
|
||||
gr := corev1.Resource("configmap")
|
||||
return &configMapManager{
|
||||
manager: manager.NewWatchBasedManager(listConfigMap, watchConfigMap, newConfigMap, gr, getConfigMapNames),
|
||||
}
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/kubelet/configmap/fake_manager.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/kubelet/configmap/fake_manager.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// fakeManager implements Manager interface for testing purposes.
|
||||
// simple operations to apiserver.
|
||||
type fakeManager struct {
|
||||
}
|
||||
|
||||
func NewFakeManager() Manager {
|
||||
return &fakeManager{}
|
||||
}
|
||||
|
||||
func (s *fakeManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *fakeManager) RegisterPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
func (s *fakeManager) UnregisterPod(pod *v1.Pod) {
|
||||
}
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/cache.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/cache.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
// has no states known by the runtime, Cache returns an empty PodStatus object
|
||||
// with ID populated.
|
||||
//
|
||||
// Cache provides two methods to retrive the PodStatus: the non-blocking Get()
|
||||
// Cache provides two methods to retrieve the PodStatus: the non-blocking Get()
|
||||
// and the blocking GetNewerThan() method. The component responsible for
|
||||
// populating the cache is expected to call Delete() to explicitly free the
|
||||
// cache entries.
|
||||
|
64
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
64
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
@ -17,22 +17,25 @@ limitations under the License.
|
||||
package container
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/record"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
"k8s.io/kubernetes/third_party/forked/golang/expansion"
|
||||
utilsnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
// HandlerRunner runs a lifecycle handler for a container.
|
||||
@ -43,7 +46,7 @@ type HandlerRunner interface {
|
||||
// RuntimeHelper wraps kubelet to make container runtime
|
||||
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
|
||||
type RuntimeHelper interface {
|
||||
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
|
||||
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
|
||||
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
|
||||
// GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host
|
||||
// of a pod.
|
||||
@ -91,9 +94,13 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus
|
||||
|
||||
// HashContainer returns the hash of the container. It is used to compare
|
||||
// the running container with its desired spec.
|
||||
// Note: remember to update hashValues in container_hash_test.go as well.
|
||||
func HashContainer(container *v1.Container) uint64 {
|
||||
hash := fnv.New32a()
|
||||
hashutil.DeepHashObject(hash, *container)
|
||||
// Omit nil or empty field when calculating hash value
|
||||
// Please see https://github.com/kubernetes/kubernetes/issues/53644
|
||||
containerJson, _ := json.Marshal(container)
|
||||
hashutil.DeepHashObject(hash, containerJson)
|
||||
return uint64(hash.Sum32())
|
||||
}
|
||||
|
||||
@ -278,29 +285,28 @@ func FormatPod(pod *Pod) string {
|
||||
|
||||
// GetContainerSpec gets the container spec by containerName.
|
||||
func GetContainerSpec(pod *v1.Pod, containerName string) *v1.Container {
|
||||
for i, c := range pod.Spec.Containers {
|
||||
var containerSpec *v1.Container
|
||||
podutil.VisitContainers(&pod.Spec, func(c *v1.Container) bool {
|
||||
if containerName == c.Name {
|
||||
return &pod.Spec.Containers[i]
|
||||
containerSpec = c
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i, c := range pod.Spec.InitContainers {
|
||||
if containerName == c.Name {
|
||||
return &pod.Spec.InitContainers[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return true
|
||||
})
|
||||
return containerSpec
|
||||
}
|
||||
|
||||
// HasPrivilegedContainer returns true if any of the containers in the pod are privileged.
|
||||
func HasPrivilegedContainer(pod *v1.Pod) bool {
|
||||
for _, c := range append(pod.Spec.Containers, pod.Spec.InitContainers...) {
|
||||
if c.SecurityContext != nil &&
|
||||
c.SecurityContext.Privileged != nil &&
|
||||
*c.SecurityContext.Privileged {
|
||||
return true
|
||||
var hasPrivileged bool
|
||||
podutil.VisitContainers(&pod.Spec, func(c *v1.Container) bool {
|
||||
if c.SecurityContext != nil && c.SecurityContext.Privileged != nil && *c.SecurityContext.Privileged {
|
||||
hasPrivileged = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
return true
|
||||
})
|
||||
return hasPrivileged
|
||||
}
|
||||
|
||||
// MakePortMappings creates internal port mapping from api port mapping.
|
||||
@ -314,16 +320,28 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) {
|
||||
HostIP: p.HostIP,
|
||||
}
|
||||
|
||||
// We need to determine the address family this entry applies to. We do this to ensure
|
||||
// duplicate containerPort / protocol rules work across different address families.
|
||||
// https://github.com/kubernetes/kubernetes/issues/82373
|
||||
family := "any"
|
||||
if p.HostIP != "" {
|
||||
if utilsnet.IsIPv6String(p.HostIP) {
|
||||
family = "v6"
|
||||
} else {
|
||||
family = "v4"
|
||||
}
|
||||
}
|
||||
|
||||
// We need to create some default port name if it's not specified, since
|
||||
// this is necessary for rkt.
|
||||
// http://issue.k8s.io/7710
|
||||
// this is necessary for the dockershim CNI driver.
|
||||
// https://github.com/kubernetes/kubernetes/pull/82374#issuecomment-529496888
|
||||
if p.Name == "" {
|
||||
pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort)
|
||||
pm.Name = fmt.Sprintf("%s-%s-%s:%d", container.Name, family, p.Protocol, p.ContainerPort)
|
||||
} else {
|
||||
pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name)
|
||||
}
|
||||
|
||||
// Protect against exposing the same protocol-port more than once in a container.
|
||||
// Protect against a port name being used more than once in a container.
|
||||
if _, ok := names[pm.Name]; ok {
|
||||
klog.Warningf("Port name conflicted, %q is defined more than once", pm.Name)
|
||||
continue
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go
generated
vendored
@ -20,8 +20,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
var ImplicitContainerPrefix string = "implicitly required container "
|
||||
@ -67,5 +69,16 @@ func fieldPath(pod *v1.Pod, container *v1.Container) (string, error) {
|
||||
return fmt.Sprintf("spec.initContainers{%s}", here.Name), nil
|
||||
}
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
|
||||
for i := range pod.Spec.EphemeralContainers {
|
||||
here := &pod.Spec.EphemeralContainers[i]
|
||||
if here.Name == container.Name {
|
||||
if here.Name == "" {
|
||||
return fmt.Sprintf("spec.ephemeralContainers[%d]", i), nil
|
||||
}
|
||||
return fmt.Sprintf("spec.ephemeralContainers{%s}", here.Name), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("container %q not found in pod %s/%s", container.Name, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
@ -63,6 +63,9 @@ type Runtime interface {
|
||||
// Type returns the type of the container runtime.
|
||||
Type() string
|
||||
|
||||
//SupportsSingleFileMapping returns whether the container runtime supports single file mappings or not.
|
||||
SupportsSingleFileMapping() bool
|
||||
|
||||
// Version returns the version information of the container runtime.
|
||||
Version() (Version, error)
|
||||
|
||||
@ -273,8 +276,8 @@ type PodStatus struct {
|
||||
Name string
|
||||
// Namespace of the pod.
|
||||
Namespace string
|
||||
// IP of the pod.
|
||||
IP string
|
||||
// All IPs assigned to this pod
|
||||
IPs []string
|
||||
// Status of containers in the pod.
|
||||
ContainerStatuses []*ContainerStatus
|
||||
// Status of the pod sandbox.
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go
generated
vendored
@ -116,11 +116,11 @@ func (p *PodSyncResult) Fail(err error) {
|
||||
func (p *PodSyncResult) Error() error {
|
||||
errlist := []error{}
|
||||
if p.SyncError != nil {
|
||||
errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v\n", p.SyncError))
|
||||
errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v", p.SyncError))
|
||||
}
|
||||
for _, result := range p.SyncResults {
|
||||
if result.Error != nil {
|
||||
errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q\n", result.Action, result.Target,
|
||||
errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q", result.Action, result.Target,
|
||||
result.Error, result.Message))
|
||||
}
|
||||
}
|
||||
|
109
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go
generated
vendored
109
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go
generated
vendored
@ -20,7 +20,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -49,79 +50,87 @@ const (
|
||||
var (
|
||||
// DockerOperationsLatency collects operation latency numbers by operation
|
||||
// type.
|
||||
DockerOperationsLatency = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsLatencyKey,
|
||||
Help: "Latency in seconds of Docker operations. Broken down by operation type.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
DockerOperationsLatency = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsLatencyKey,
|
||||
Help: "Latency in seconds of Docker operations. Broken down by operation type.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DockerOperations collects operation counts by operation type.
|
||||
DockerOperations = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsKey,
|
||||
Help: "Cumulative number of Docker operations by operation type.",
|
||||
DockerOperations = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsKey,
|
||||
Help: "Cumulative number of Docker operations by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DockerOperationsErrors collects operation errors by operation
|
||||
// type.
|
||||
DockerOperationsErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsErrorsKey,
|
||||
Help: "Cumulative number of Docker operation errors by operation type.",
|
||||
DockerOperationsErrors = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsErrorsKey,
|
||||
Help: "Cumulative number of Docker operation errors by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DockerOperationsTimeout collects operation timeouts by operation type.
|
||||
DockerOperationsTimeout = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsTimeoutKey,
|
||||
Help: "Cumulative number of Docker operation timeout by operation type.",
|
||||
DockerOperationsTimeout = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DockerOperationsTimeoutKey,
|
||||
Help: "Cumulative number of Docker operation timeout by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
|
||||
// DeprecatedDockerOperationsLatency collects operation latency numbers by operation
|
||||
// type.
|
||||
DeprecatedDockerOperationsLatency = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds of Docker operations. Broken down by operation type.",
|
||||
DeprecatedDockerOperationsLatency = metrics.NewSummaryVec(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds of Docker operations. Broken down by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedDockerOperations collects operation counts by operation type.
|
||||
DeprecatedDockerOperations = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsKey,
|
||||
Help: "(Deprecated) Cumulative number of Docker operations by operation type.",
|
||||
DeprecatedDockerOperations = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsKey,
|
||||
Help: "(Deprecated) Cumulative number of Docker operations by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedDockerOperationsErrors collects operation errors by operation
|
||||
// type.
|
||||
DeprecatedDockerOperationsErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsErrorsKey,
|
||||
Help: "(Deprecated) Cumulative number of Docker operation errors by operation type.",
|
||||
DeprecatedDockerOperationsErrors = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsErrorsKey,
|
||||
Help: "(Deprecated) Cumulative number of Docker operation errors by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedDockerOperationsTimeout collects operation timeouts by operation type.
|
||||
DeprecatedDockerOperationsTimeout = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsTimeoutKey,
|
||||
Help: "(Deprecated) Cumulative number of Docker operation timeout by operation type.",
|
||||
DeprecatedDockerOperationsTimeout = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: kubeletSubsystem,
|
||||
Name: DeprecatedDockerOperationsTimeoutKey,
|
||||
Help: "(Deprecated) Cumulative number of Docker operation timeout by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
@ -132,14 +141,14 @@ var registerMetrics sync.Once
|
||||
// Register all metrics.
|
||||
func Register() {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(DockerOperationsLatency)
|
||||
prometheus.MustRegister(DockerOperations)
|
||||
prometheus.MustRegister(DockerOperationsErrors)
|
||||
prometheus.MustRegister(DockerOperationsTimeout)
|
||||
prometheus.MustRegister(DeprecatedDockerOperationsLatency)
|
||||
prometheus.MustRegister(DeprecatedDockerOperations)
|
||||
prometheus.MustRegister(DeprecatedDockerOperationsErrors)
|
||||
prometheus.MustRegister(DeprecatedDockerOperationsTimeout)
|
||||
legacyregistry.MustRegister(DockerOperationsLatency)
|
||||
legacyregistry.MustRegister(DockerOperations)
|
||||
legacyregistry.MustRegister(DockerOperationsErrors)
|
||||
legacyregistry.MustRegister(DockerOperationsTimeout)
|
||||
legacyregistry.MustRegister(DeprecatedDockerOperationsLatency)
|
||||
legacyregistry.MustRegister(DeprecatedDockerOperations)
|
||||
legacyregistry.MustRegister(DeprecatedDockerOperationsErrors)
|
||||
legacyregistry.MustRegister(DeprecatedDockerOperationsTimeout)
|
||||
})
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go
generated
vendored
@ -58,8 +58,6 @@ const (
|
||||
WarnAlreadyMountedVolume = "AlreadyMountedVolume"
|
||||
SuccessfulAttachVolume = "SuccessfulAttachVolume"
|
||||
SuccessfulMountVolume = "SuccessfulMountVolume"
|
||||
InsufficientFreeCPU = "InsufficientFreeCPU"
|
||||
InsufficientFreeMemory = "InsufficientFreeMemory"
|
||||
NodeRebooted = "Rebooted"
|
||||
ContainerGCFailed = "ContainerGCFailed"
|
||||
ImageGCFailed = "ImageGCFailed"
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go
generated
vendored
@ -18,7 +18,6 @@ package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@ -31,6 +30,11 @@ import (
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
utilio "k8s.io/utils/io"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRespBodyLength = 10 * 1 << 10 // 10KB
|
||||
)
|
||||
|
||||
type HandlerRunner struct {
|
||||
@ -70,7 +74,7 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod,
|
||||
}
|
||||
return msg, err
|
||||
default:
|
||||
err := fmt.Errorf("Invalid handler: %v", handler)
|
||||
err := fmt.Errorf("invalid handler: %v", handler)
|
||||
msg := fmt.Sprintf("Cannot run handler: %v", err)
|
||||
klog.Errorf(msg)
|
||||
return msg, err
|
||||
@ -108,10 +112,10 @@ func (hr *HandlerRunner) runHTTPHandler(pod *v1.Pod, container *v1.Container, ha
|
||||
klog.Errorf("Unable to get pod info, event handlers may be invalid.")
|
||||
return "", err
|
||||
}
|
||||
if status.IP == "" {
|
||||
if len(status.IPs) == 0 {
|
||||
return "", fmt.Errorf("failed to find networking container: %v", status)
|
||||
}
|
||||
host = status.IP
|
||||
host = status.IPs[0]
|
||||
}
|
||||
var port int
|
||||
if handler.HTTPGet.Port.Type == intstr.String && len(handler.HTTPGet.Port.StrVal) == 0 {
|
||||
@ -133,7 +137,8 @@ func getHttpRespBody(resp *http.Response) string {
|
||||
return ""
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if bytes, err := ioutil.ReadAll(resp.Body); err == nil {
|
||||
bytes, err := utilio.ReadAtMost(resp.Body, maxRespBodyLength)
|
||||
if err == nil || err == utilio.ErrLimitReached {
|
||||
return string(bytes)
|
||||
}
|
||||
return ""
|
||||
|
704
vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go
generated
vendored
704
vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go
generated
vendored
@ -21,7 +21,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -30,6 +32,7 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// This const block defines the metric names for the kubelet metrics.
|
||||
const (
|
||||
KubeletSubsystem = "kubelet"
|
||||
NodeNameKey = "node_name"
|
||||
@ -41,7 +44,9 @@ const (
|
||||
PLEGRelistDurationKey = "pleg_relist_duration_seconds"
|
||||
PLEGDiscardEventsKey = "pleg_discard_events"
|
||||
PLEGRelistIntervalKey = "pleg_relist_interval_seconds"
|
||||
EvictionsKey = "evictions"
|
||||
EvictionStatsAgeKey = "eviction_stats_age_seconds"
|
||||
PreemptionsKey = "preemptions"
|
||||
DeprecatedPodWorkerLatencyKey = "pod_worker_latency_microseconds"
|
||||
DeprecatedPodStartLatencyKey = "pod_start_latency_microseconds"
|
||||
DeprecatedCgroupManagerOperationsKey = "cgroup_manager_latency_microseconds"
|
||||
@ -85,326 +90,467 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
NodeName = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: NodeNameKey,
|
||||
Help: "The node's name. The count is always 1.",
|
||||
// NodeName is a Gauge that tracks the ode's name. The count is always 1.
|
||||
NodeName = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: NodeNameKey,
|
||||
Help: "The node's name. The count is always 1.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{NodeLabelKey},
|
||||
)
|
||||
ContainersPerPodCount = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: "containers_per_pod_count",
|
||||
Help: "The number of containers per pod.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// ContainersPerPodCount is a Counter that tracks the number of containers per pod.
|
||||
ContainersPerPodCount = metrics.NewHistogram(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: "containers_per_pod_count",
|
||||
Help: "The number of containers per pod.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
PodWorkerDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PodWorkerDurationKey,
|
||||
Help: "Duration in seconds to sync a single pod. Broken down by operation type: create, update, or sync",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// PodWorkerDuration is a Histogram that tracks the duration (in seconds) in takes to sync a single pod.
|
||||
// Broken down by the operation type.
|
||||
PodWorkerDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PodWorkerDurationKey,
|
||||
Help: "Duration in seconds to sync a single pod. Broken down by operation type: create, update, or sync",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
PodStartDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PodStartDurationKey,
|
||||
Help: "Duration in seconds for a single pod to go from pending to running.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// PodStartDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to go from pending to running.
|
||||
PodStartDuration = metrics.NewHistogram(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PodStartDurationKey,
|
||||
Help: "Duration in seconds for a single pod to go from pending to running.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
CgroupManagerDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: CgroupManagerOperationsKey,
|
||||
Help: "Duration in seconds for cgroup manager operations. Broken down by method.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// CgroupManagerDuration is a Histogram that tracks the duration (in seconds) it takes for cgroup manager operations to complete.
|
||||
// Broken down by method.
|
||||
CgroupManagerDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: CgroupManagerOperationsKey,
|
||||
Help: "Duration in seconds for cgroup manager operations. Broken down by method.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
PodWorkerStartDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PodWorkerStartDurationKey,
|
||||
Help: "Duration in seconds from seeing a pod to starting a worker.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// PodWorkerStartDuration is a Histogram that tracks the duration (in seconds) it takes from seeing a pod to starting a worker.
|
||||
PodWorkerStartDuration = metrics.NewHistogram(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PodWorkerStartDurationKey,
|
||||
Help: "Duration in seconds from seeing a pod to starting a worker.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
PLEGRelistDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PLEGRelistDurationKey,
|
||||
Help: "Duration in seconds for relisting pods in PLEG.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// PLEGRelistDuration is a Histogram that tracks the duration (in seconds) it takes for relisting pods in the Kubelet's
|
||||
// Pod Lifecycle Event Generator (PLEG).
|
||||
PLEGRelistDuration = metrics.NewHistogram(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PLEGRelistDurationKey,
|
||||
Help: "Duration in seconds for relisting pods in PLEG.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
PLEGDiscardEvents = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PLEGDiscardEventsKey,
|
||||
Help: "The number of discard events in PLEG.",
|
||||
// PLEGDiscardEvents is a Histogram that tracks the duration (in seconds) it takes for discarding events in the Kubelet's
|
||||
// Pod Lifecycle Event Generator (PLEG).
|
||||
PLEGDiscardEvents = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PLEGDiscardEventsKey,
|
||||
Help: "The number of discard events in PLEG.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
PLEGRelistInterval = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PLEGRelistIntervalKey,
|
||||
Help: "Interval in seconds between relisting in PLEG.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// PLEGRelistInterval is a Histogram that tracks the intervals (in seconds) between relisting in the Kubelet's
|
||||
// Pod Lifecycle Event Generator (PLEG).
|
||||
PLEGRelistInterval = metrics.NewHistogram(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PLEGRelistIntervalKey,
|
||||
Help: "Interval in seconds between relisting in PLEG.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
// Metrics of remote runtime operations.
|
||||
RuntimeOperations = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RuntimeOperationsKey,
|
||||
Help: "Cumulative number of runtime operations by operation type.",
|
||||
// RuntimeOperations is a Counter that tracks the cumulative number of remote runtime operations.
|
||||
// Broken down by operation type.
|
||||
RuntimeOperations = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RuntimeOperationsKey,
|
||||
Help: "Cumulative number of runtime operations by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
RuntimeOperationsDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RuntimeOperationsDurationKey,
|
||||
Help: "Duration in seconds of runtime operations. Broken down by operation type.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// RuntimeOperationsDuration is a Histogram that tracks the duration (in seconds) for remote runtime operations to complete.
|
||||
// Broken down by operation type.
|
||||
RuntimeOperationsDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RuntimeOperationsDurationKey,
|
||||
Help: "Duration in seconds of runtime operations. Broken down by operation type.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
RuntimeOperationsErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RuntimeOperationsErrorsKey,
|
||||
Help: "Cumulative number of runtime operation errors by operation type.",
|
||||
// RuntimeOperationsErrors is a Counter that tracks the cumulative number of remote runtime operations errors.
|
||||
// Broken down by operation type.
|
||||
RuntimeOperationsErrors = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RuntimeOperationsErrorsKey,
|
||||
Help: "Cumulative number of runtime operation errors by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
EvictionStatsAge = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: EvictionStatsAgeKey,
|
||||
Help: "Time between when stats are collected, and when pod is evicted based on those stats by eviction signal",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
// Evictions is a Counter that tracks the cumulative number of pod evictions initiated by the kubelet.
|
||||
// Broken down by eviction signal.
|
||||
Evictions = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: EvictionsKey,
|
||||
Help: "Cumulative number of pod evictions by eviction signal",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"eviction_signal"},
|
||||
)
|
||||
DevicePluginRegistrationCount = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DevicePluginRegistrationCountKey,
|
||||
Help: "Cumulative number of device plugin registrations. Broken down by resource name.",
|
||||
},
|
||||
[]string{"resource_name"},
|
||||
)
|
||||
DevicePluginAllocationDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DevicePluginAllocationDurationKey,
|
||||
Help: "Duration in seconds to serve a device plugin Allocation request. Broken down by resource name.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
},
|
||||
[]string{"resource_name"},
|
||||
)
|
||||
|
||||
DeprecatedPodWorkerLatency = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPodWorkerLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds to sync a single pod. Broken down by operation type: create, update, or sync",
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
DeprecatedPodStartLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPodStartLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds for a single pod to go from pending to running.",
|
||||
},
|
||||
)
|
||||
DeprecatedCgroupManagerLatency = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedCgroupManagerOperationsKey,
|
||||
Help: "(Deprecated) Latency in microseconds for cgroup manager operations. Broken down by method.",
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
DeprecatedPodWorkerStartLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPodWorkerStartLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds from seeing a pod to starting a worker.",
|
||||
},
|
||||
)
|
||||
DeprecatedPLEGRelistLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPLEGRelistLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds for relisting pods in PLEG.",
|
||||
},
|
||||
)
|
||||
DeprecatedPLEGRelistInterval = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPLEGRelistIntervalKey,
|
||||
Help: "(Deprecated) Interval in microseconds between relisting in PLEG.",
|
||||
},
|
||||
)
|
||||
DeprecatedRuntimeOperations = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedRuntimeOperationsKey,
|
||||
Help: "(Deprecated) Cumulative number of runtime operations by operation type.",
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
DeprecatedRuntimeOperationsLatency = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedRuntimeOperationsLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds of runtime operations. Broken down by operation type.",
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
DeprecatedRuntimeOperationsErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedRuntimeOperationsErrorsKey,
|
||||
Help: "(Deprecated) Cumulative number of runtime operation errors by operation type.",
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
DeprecatedEvictionStatsAge = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedEvictionStatsAgeKey,
|
||||
Help: "(Deprecated) Time between when stats are collected, and when pod is evicted based on those stats by eviction signal",
|
||||
// EvictionStatsAge is a Histogram that tracks the time (in seconds) between when stats are collected and when a pod is evicted
|
||||
// based on those stats. Broken down by eviction signal.
|
||||
EvictionStatsAge = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: EvictionStatsAgeKey,
|
||||
Help: "Time between when stats are collected, and when pod is evicted based on those stats by eviction signal",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"eviction_signal"},
|
||||
)
|
||||
DeprecatedDevicePluginRegistrationCount = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedDevicePluginRegistrationCountKey,
|
||||
Help: "(Deprecated) Cumulative number of device plugin registrations. Broken down by resource name.",
|
||||
// Preemptions is a Counter that tracks the cumulative number of pod preemptions initiated by the kubelet.
|
||||
// Broken down by preemption signal. A preemption is only recorded for one resource, the sum of all signals
|
||||
// is the number of preemptions on the given node.
|
||||
Preemptions = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: PreemptionsKey,
|
||||
Help: "Cumulative number of pod preemptions by preemption resource",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"preemption_signal"},
|
||||
)
|
||||
// DevicePluginRegistrationCount is a Counter that tracks the cumulative number of device plugin registrations.
|
||||
// Broken down by resource name.
|
||||
DevicePluginRegistrationCount = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DevicePluginRegistrationCountKey,
|
||||
Help: "Cumulative number of device plugin registrations. Broken down by resource name.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource_name"},
|
||||
)
|
||||
DeprecatedDevicePluginAllocationLatency = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedDevicePluginAllocationLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds to serve a device plugin Allocation request. Broken down by resource name.",
|
||||
// DevicePluginAllocationDuration is a Histogram that tracks the duration (in seconds) to serve a device plugin allocation request.
|
||||
// Broken down by resource name.
|
||||
DevicePluginAllocationDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DevicePluginAllocationDurationKey,
|
||||
Help: "Duration in seconds to serve a device plugin Allocation request. Broken down by resource name.",
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource_name"},
|
||||
)
|
||||
// DeprecatedPodWorkerLatency is a Summary that tracks the latency (in microseconds) to sync a single pod.
|
||||
// Broken down by operation type. This metric is deprecated.
|
||||
DeprecatedPodWorkerLatency = metrics.NewSummaryVec(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPodWorkerLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds to sync a single pod. Broken down by operation type: create, update, or sync",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedPodStartLatency is a Summary that tracks the latency (in microseconds) for a single pod to go from pending to running.
|
||||
// This metric is deprecated.
|
||||
DeprecatedPodStartLatency = metrics.NewSummary(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPodStartLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds for a single pod to go from pending to running.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
// DeprecatedCgroupManagerLatency is a Summary that tracks the latency (in microseconds) for cgroup manager operations to complete.
|
||||
// Broken down by operation type. This metric is deprecated.
|
||||
DeprecatedCgroupManagerLatency = metrics.NewSummaryVec(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedCgroupManagerOperationsKey,
|
||||
Help: "(Deprecated) Latency in microseconds for cgroup manager operations. Broken down by method.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedPodWorkerStartLatency is a Summary that tracks the latency (in microseconds) from seeing a pod to starting a worker.
|
||||
// This metric is deprecated.
|
||||
DeprecatedPodWorkerStartLatency = metrics.NewSummary(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPodWorkerStartLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds from seeing a pod to starting a worker.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
// DeprecatedPLEGRelistLatency is a Summary that tracks the latency (in microseconds) for relisting pods in PLEG.
|
||||
// This metric is deprecated.
|
||||
DeprecatedPLEGRelistLatency = metrics.NewSummary(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPLEGRelistLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds for relisting pods in PLEG.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
// DeprecatedPLEGRelistInterval is a Summary that tracks the interval (in microseconds) between relistings in PLEG.
|
||||
// This metric is deprecated.
|
||||
DeprecatedPLEGRelistInterval = metrics.NewSummary(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedPLEGRelistIntervalKey,
|
||||
Help: "(Deprecated) Interval in microseconds between relisting in PLEG.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
// DeprecatedRuntimeOperations is a Counter that tracks the cumulative number of remote runtime operations.
|
||||
// Broken down by operation type. This metric is deprecated.
|
||||
DeprecatedRuntimeOperations = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedRuntimeOperationsKey,
|
||||
Help: "(Deprecated) Cumulative number of runtime operations by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedRuntimeOperationsLatency is a Summary that tracks the latency (in microseconds) of remote runtime operations
|
||||
// to complete. Broken down by operation type. This metric is deprecated.
|
||||
DeprecatedRuntimeOperationsLatency = metrics.NewSummaryVec(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedRuntimeOperationsLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds of runtime operations. Broken down by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedRuntimeOperationsErrors is a Counter that tracks the cumulative number of remote runtime operation errors.
|
||||
// Broken down by operation type. This metric is deprecated.
|
||||
DeprecatedRuntimeOperationsErrors = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedRuntimeOperationsErrorsKey,
|
||||
Help: "(Deprecated) Cumulative number of runtime operation errors by operation type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"operation_type"},
|
||||
)
|
||||
// DeprecatedEvictionStatsAge is a Summary that tracks the time (in microseconds) between when stats are collected and when a pod
|
||||
// is evicted based on those stats. Broken down by eviction signal. This metric is deprecated.
|
||||
DeprecatedEvictionStatsAge = metrics.NewSummaryVec(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedEvictionStatsAgeKey,
|
||||
Help: "(Deprecated) Time between when stats are collected, and when pod is evicted based on those stats by eviction signal",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"eviction_signal"},
|
||||
)
|
||||
// DeprecatedDevicePluginRegistrationCount is a Counter that tracks the cumulative number of device plugin registrations.
|
||||
// Broken down by resource name. This metric is deprecated.
|
||||
DeprecatedDevicePluginRegistrationCount = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedDevicePluginRegistrationCountKey,
|
||||
Help: "(Deprecated) Cumulative number of device plugin registrations. Broken down by resource name.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource_name"},
|
||||
)
|
||||
// DeprecatedDevicePluginAllocationLatency is a Summary that tracks the latncy (in microseconds) for serving device plugin allocation requests.
|
||||
// Broken down by resource name. This metric is deprecated.
|
||||
DeprecatedDevicePluginAllocationLatency = metrics.NewSummaryVec(
|
||||
&metrics.SummaryOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: DeprecatedDevicePluginAllocationLatencyKey,
|
||||
Help: "(Deprecated) Latency in microseconds to serve a device plugin Allocation request. Broken down by resource name.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource_name"},
|
||||
)
|
||||
|
||||
// Metrics for node config
|
||||
|
||||
AssignedConfig = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: AssignedConfigKey,
|
||||
Help: "The node's understanding of intended config. The count is always 1.",
|
||||
// AssignedConfig is a Gauge that is set 1 if the Kubelet has a NodeConfig assigned.
|
||||
AssignedConfig = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: AssignedConfigKey,
|
||||
Help: "The node's understanding of intended config. The count is always 1.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
||||
)
|
||||
ActiveConfig = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ActiveConfigKey,
|
||||
Help: "The config source the node is actively using. The count is always 1.",
|
||||
// ActiveConfig is a Gauge that is set to 1 if the Kubelet has an active NodeConfig.
|
||||
ActiveConfig = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ActiveConfigKey,
|
||||
Help: "The config source the node is actively using. The count is always 1.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
||||
)
|
||||
LastKnownGoodConfig = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: LastKnownGoodConfigKey,
|
||||
Help: "The config source the node will fall back to when it encounters certain errors. The count is always 1.",
|
||||
// LastKnownGoodConfig is a Gauge that is set to 1 if the Kubelet has a NodeConfig it can fall back to if there
|
||||
// are certain errors.
|
||||
LastKnownGoodConfig = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: LastKnownGoodConfigKey,
|
||||
Help: "The config source the node will fall back to when it encounters certain errors. The count is always 1.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
||||
)
|
||||
ConfigError = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ConfigErrorKey,
|
||||
Help: "This metric is true (1) if the node is experiencing a configuration-related error, false (0) otherwise.",
|
||||
// ConfigError is a Gauge that is set to 1 if the node is experiencing a configuration-related error.
|
||||
ConfigError = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ConfigErrorKey,
|
||||
Help: "This metric is true (1) if the node is experiencing a configuration-related error, false (0) otherwise.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
RunPodSandboxDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
// RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations.
|
||||
// Broken down by RuntimeClass.
|
||||
RunPodSandboxDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RunPodSandboxDurationKey,
|
||||
Help: "Duration in seconds of the run_podsandbox operations. Broken down by RuntimeClass.",
|
||||
// Use DefBuckets for now, will customize the buckets if necessary.
|
||||
Buckets: prometheus.DefBuckets,
|
||||
Buckets: metrics.DefBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"runtime_handler"},
|
||||
)
|
||||
RunPodSandboxErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RunPodSandboxErrorsKey,
|
||||
Help: "Cumulative number of the run_podsandbox operation errors by RuntimeClass.",
|
||||
// RunPodSandboxErrors is a Counter that tracks the cumulative number of Pod Sandbox operations errors.
|
||||
// Broken down by RuntimeClass.
|
||||
RunPodSandboxErrors = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: RunPodSandboxErrorsKey,
|
||||
Help: "Cumulative number of the run_podsandbox operation errors by RuntimeClass.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"runtime_handler"},
|
||||
)
|
||||
|
||||
// RunningPodCount is a gauge that tracks the number of Pods currently running
|
||||
RunningPodCount = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: "running_pod_count",
|
||||
Help: "Number of pods currently running",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
// RunningContainerCount is a gauge that tracks the number of containers currently running
|
||||
RunningContainerCount = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: "running_container_count",
|
||||
Help: "Number of containers currently running",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"container_state"},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register all metrics.
|
||||
func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheus.Collector) {
|
||||
// Register registers all metrics.
|
||||
func Register(containerCache kubecontainer.RuntimeCache, collectors ...metrics.StableCollector) {
|
||||
// Register the metrics.
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(NodeName)
|
||||
prometheus.MustRegister(PodWorkerDuration)
|
||||
prometheus.MustRegister(PodStartDuration)
|
||||
prometheus.MustRegister(CgroupManagerDuration)
|
||||
prometheus.MustRegister(PodWorkerStartDuration)
|
||||
prometheus.MustRegister(ContainersPerPodCount)
|
||||
prometheus.MustRegister(newPodAndContainerCollector(containerCache))
|
||||
prometheus.MustRegister(PLEGRelistDuration)
|
||||
prometheus.MustRegister(PLEGDiscardEvents)
|
||||
prometheus.MustRegister(PLEGRelistInterval)
|
||||
prometheus.MustRegister(RuntimeOperations)
|
||||
prometheus.MustRegister(RuntimeOperationsDuration)
|
||||
prometheus.MustRegister(RuntimeOperationsErrors)
|
||||
prometheus.MustRegister(EvictionStatsAge)
|
||||
prometheus.MustRegister(DevicePluginRegistrationCount)
|
||||
prometheus.MustRegister(DevicePluginAllocationDuration)
|
||||
prometheus.MustRegister(DeprecatedPodWorkerLatency)
|
||||
prometheus.MustRegister(DeprecatedPodStartLatency)
|
||||
prometheus.MustRegister(DeprecatedCgroupManagerLatency)
|
||||
prometheus.MustRegister(DeprecatedPodWorkerStartLatency)
|
||||
prometheus.MustRegister(DeprecatedPLEGRelistLatency)
|
||||
prometheus.MustRegister(DeprecatedPLEGRelistInterval)
|
||||
prometheus.MustRegister(DeprecatedRuntimeOperations)
|
||||
prometheus.MustRegister(DeprecatedRuntimeOperationsLatency)
|
||||
prometheus.MustRegister(DeprecatedRuntimeOperationsErrors)
|
||||
prometheus.MustRegister(DeprecatedEvictionStatsAge)
|
||||
prometheus.MustRegister(DeprecatedDevicePluginRegistrationCount)
|
||||
prometheus.MustRegister(DeprecatedDevicePluginAllocationLatency)
|
||||
legacyregistry.MustRegister(NodeName)
|
||||
legacyregistry.MustRegister(PodWorkerDuration)
|
||||
legacyregistry.MustRegister(PodStartDuration)
|
||||
legacyregistry.MustRegister(CgroupManagerDuration)
|
||||
legacyregistry.MustRegister(PodWorkerStartDuration)
|
||||
legacyregistry.MustRegister(ContainersPerPodCount)
|
||||
legacyregistry.MustRegister(PLEGRelistDuration)
|
||||
legacyregistry.MustRegister(PLEGDiscardEvents)
|
||||
legacyregistry.MustRegister(PLEGRelistInterval)
|
||||
legacyregistry.MustRegister(RuntimeOperations)
|
||||
legacyregistry.MustRegister(RuntimeOperationsDuration)
|
||||
legacyregistry.MustRegister(RuntimeOperationsErrors)
|
||||
legacyregistry.MustRegister(Evictions)
|
||||
legacyregistry.MustRegister(EvictionStatsAge)
|
||||
legacyregistry.MustRegister(Preemptions)
|
||||
legacyregistry.MustRegister(DevicePluginRegistrationCount)
|
||||
legacyregistry.MustRegister(DevicePluginAllocationDuration)
|
||||
legacyregistry.MustRegister(DeprecatedPodWorkerLatency)
|
||||
legacyregistry.MustRegister(DeprecatedPodStartLatency)
|
||||
legacyregistry.MustRegister(DeprecatedCgroupManagerLatency)
|
||||
legacyregistry.MustRegister(DeprecatedPodWorkerStartLatency)
|
||||
legacyregistry.MustRegister(DeprecatedPLEGRelistLatency)
|
||||
legacyregistry.MustRegister(DeprecatedPLEGRelistInterval)
|
||||
legacyregistry.MustRegister(DeprecatedRuntimeOperations)
|
||||
legacyregistry.MustRegister(DeprecatedRuntimeOperationsLatency)
|
||||
legacyregistry.MustRegister(DeprecatedRuntimeOperationsErrors)
|
||||
legacyregistry.MustRegister(DeprecatedEvictionStatsAge)
|
||||
legacyregistry.MustRegister(DeprecatedDevicePluginRegistrationCount)
|
||||
legacyregistry.MustRegister(DeprecatedDevicePluginAllocationLatency)
|
||||
legacyregistry.MustRegister(RunningContainerCount)
|
||||
legacyregistry.MustRegister(RunningPodCount)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
prometheus.MustRegister(AssignedConfig)
|
||||
prometheus.MustRegister(ActiveConfig)
|
||||
prometheus.MustRegister(LastKnownGoodConfig)
|
||||
prometheus.MustRegister(ConfigError)
|
||||
legacyregistry.MustRegister(AssignedConfig)
|
||||
legacyregistry.MustRegister(ActiveConfig)
|
||||
legacyregistry.MustRegister(LastKnownGoodConfig)
|
||||
legacyregistry.MustRegister(ConfigError)
|
||||
}
|
||||
for _, collector := range collectors {
|
||||
prometheus.MustRegister(collector)
|
||||
legacyregistry.CustomMustRegister(collector)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in microseconds.
|
||||
// GetGather returns the gatherer. It used by test case outside current package.
|
||||
func GetGather() metrics.Gatherer {
|
||||
return legacyregistry.DefaultGatherer
|
||||
}
|
||||
|
||||
// SinceInMicroseconds gets the time since the specified start in microseconds.
|
||||
func SinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
@ -414,56 +560,6 @@ func SinceInSeconds(start time.Time) float64 {
|
||||
return time.Since(start).Seconds()
|
||||
}
|
||||
|
||||
func newPodAndContainerCollector(containerCache kubecontainer.RuntimeCache) *podAndContainerCollector {
|
||||
return &podAndContainerCollector{
|
||||
containerCache: containerCache,
|
||||
}
|
||||
}
|
||||
|
||||
// Custom collector for current pod and container counts.
|
||||
type podAndContainerCollector struct {
|
||||
// Cache for accessing information about running containers.
|
||||
containerCache kubecontainer.RuntimeCache
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Split by source?
|
||||
var (
|
||||
runningPodCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", KubeletSubsystem, "running_pod_count"),
|
||||
"Number of pods currently running",
|
||||
nil, nil)
|
||||
runningContainerCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", KubeletSubsystem, "running_container_count"),
|
||||
"Number of containers currently running",
|
||||
nil, nil)
|
||||
)
|
||||
|
||||
func (pc *podAndContainerCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- runningPodCountDesc
|
||||
ch <- runningContainerCountDesc
|
||||
}
|
||||
|
||||
func (pc *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
runningPods, err := pc.containerCache.GetPods()
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to get running container information while collecting metrics: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
runningContainers := 0
|
||||
for _, p := range runningPods {
|
||||
runningContainers += len(p.Containers)
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
runningPodCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(len(runningPods)))
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
runningContainerCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(runningContainers))
|
||||
}
|
||||
|
||||
const configMapAPIPathFmt = "/api/v1/namespaces/%s/configmaps/%s"
|
||||
|
||||
func configLabels(source *corev1.NodeConfigSource) (map[string]string, error) {
|
||||
@ -488,8 +584,10 @@ func configLabels(source *corev1.NodeConfigSource) (map[string]string, error) {
|
||||
}
|
||||
|
||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
||||
var assignedConfigLabels map[string]string = map[string]string{}
|
||||
var assignedConfigLabels map[string]string
|
||||
|
||||
// SetAssignedConfig tracks labels according to the assigned NodeConfig. It also tracks labels
|
||||
// across metric updates so old labels can be safely deleted.
|
||||
func SetAssignedConfig(source *corev1.NodeConfigSource) error {
|
||||
// compute the timeseries labels from the source
|
||||
labels, err := configLabels(source)
|
||||
@ -497,7 +595,9 @@ func SetAssignedConfig(source *corev1.NodeConfigSource) error {
|
||||
return err
|
||||
}
|
||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
||||
AssignedConfig.Delete(assignedConfigLabels)
|
||||
if !AssignedConfig.Delete(assignedConfigLabels) {
|
||||
klog.Warningf("Failed to delete metric for labels %v. This may result in ambiguity from multiple metrics concurrently indicating different assigned configs.", assignedConfigLabels)
|
||||
}
|
||||
// record the new timeseries
|
||||
assignedConfigLabels = labels
|
||||
// expose the new timeseries with a constant count of 1
|
||||
@ -506,8 +606,10 @@ func SetAssignedConfig(source *corev1.NodeConfigSource) error {
|
||||
}
|
||||
|
||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
||||
var activeConfigLabels map[string]string = map[string]string{}
|
||||
var activeConfigLabels map[string]string
|
||||
|
||||
// SetActiveConfig tracks labels according to the NodeConfig that is currently used by the Kubelet.
|
||||
// It also tracks labels across metric updates so old labels can be safely deleted.
|
||||
func SetActiveConfig(source *corev1.NodeConfigSource) error {
|
||||
// compute the timeseries labels from the source
|
||||
labels, err := configLabels(source)
|
||||
@ -515,7 +617,9 @@ func SetActiveConfig(source *corev1.NodeConfigSource) error {
|
||||
return err
|
||||
}
|
||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
||||
ActiveConfig.Delete(activeConfigLabels)
|
||||
if !ActiveConfig.Delete(activeConfigLabels) {
|
||||
klog.Warningf("Failed to delete metric for labels %v. This may result in ambiguity from multiple metrics concurrently indicating different active configs.", activeConfigLabels)
|
||||
}
|
||||
// record the new timeseries
|
||||
activeConfigLabels = labels
|
||||
// expose the new timeseries with a constant count of 1
|
||||
@ -524,8 +628,10 @@ func SetActiveConfig(source *corev1.NodeConfigSource) error {
|
||||
}
|
||||
|
||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
||||
var lastKnownGoodConfigLabels map[string]string = map[string]string{}
|
||||
var lastKnownGoodConfigLabels map[string]string
|
||||
|
||||
// SetLastKnownGoodConfig tracks labels according to the NodeConfig that was successfully applied last.
|
||||
// It also tracks labels across metric updates so old labels can be safely deleted.
|
||||
func SetLastKnownGoodConfig(source *corev1.NodeConfigSource) error {
|
||||
// compute the timeseries labels from the source
|
||||
labels, err := configLabels(source)
|
||||
@ -533,7 +639,9 @@ func SetLastKnownGoodConfig(source *corev1.NodeConfigSource) error {
|
||||
return err
|
||||
}
|
||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
||||
LastKnownGoodConfig.Delete(lastKnownGoodConfigLabels)
|
||||
if !LastKnownGoodConfig.Delete(lastKnownGoodConfigLabels) {
|
||||
klog.Warningf("Failed to delete metric for labels %v. This may result in ambiguity from multiple metrics concurrently indicating different last known good configs.", lastKnownGoodConfigLabels)
|
||||
}
|
||||
// record the new timeseries
|
||||
lastKnownGoodConfigLabels = labels
|
||||
// expose the new timeseries with a constant count of 1
|
||||
@ -541,6 +649,7 @@ func SetLastKnownGoodConfig(source *corev1.NodeConfigSource) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetConfigError sets a the ConfigError metric to 1 in case any errors were encountered.
|
||||
func SetConfigError(err bool) {
|
||||
if err {
|
||||
ConfigError.Set(1)
|
||||
@ -549,6 +658,7 @@ func SetConfigError(err bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// SetNodeName sets the NodeName Gauge to 1.
|
||||
func SetNodeName(name types.NodeName) {
|
||||
NodeName.WithLabelValues(string(name)).Set(1)
|
||||
}
|
||||
|
113
vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go
generated
vendored
113
vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go
generated
vendored
@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// MirrorClient knows how to create/delete a mirror pod in the API server.
|
||||
type MirrorClient interface {
|
||||
// CreateMirrorPod creates a mirror pod in the API server for the given
|
||||
// pod or returns an error. The mirror pod will have the same annotations
|
||||
// as the given pod as well as an extra annotation containing the hash of
|
||||
// the static pod.
|
||||
CreateMirrorPod(pod *v1.Pod) error
|
||||
// DeleteMirrorPod deletes the mirror pod with the given full name from
|
||||
// the API server or returns an error.
|
||||
DeleteMirrorPod(podFullName string) error
|
||||
}
|
||||
|
||||
// basicMirrorClient is a functional MirrorClient. Mirror pods are stored in
|
||||
// the kubelet directly because they need to be in sync with the internal
|
||||
// pods.
|
||||
type basicMirrorClient struct {
|
||||
apiserverClient clientset.Interface
|
||||
}
|
||||
|
||||
// NewBasicMirrorClient returns a new MirrorClient.
|
||||
func NewBasicMirrorClient(apiserverClient clientset.Interface) MirrorClient {
|
||||
return &basicMirrorClient{apiserverClient: apiserverClient}
|
||||
}
|
||||
|
||||
func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error {
|
||||
if mc.apiserverClient == nil {
|
||||
return nil
|
||||
}
|
||||
// Make a copy of the pod.
|
||||
copyPod := *pod
|
||||
copyPod.Annotations = make(map[string]string)
|
||||
|
||||
for k, v := range pod.Annotations {
|
||||
copyPod.Annotations[k] = v
|
||||
}
|
||||
hash := getPodHash(pod)
|
||||
copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash
|
||||
apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(©Pod)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
// Check if the existing pod is the same as the pod we want to create.
|
||||
if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error {
|
||||
if mc.apiserverClient == nil {
|
||||
return nil
|
||||
}
|
||||
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse a pod full name %q", podFullName)
|
||||
return err
|
||||
}
|
||||
klog.V(2).Infof("Deleting a mirror pod %q", podFullName)
|
||||
// TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager
|
||||
if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStaticPod returns true if the pod is a static pod.
|
||||
func IsStaticPod(pod *v1.Pod) bool {
|
||||
source, err := kubetypes.GetPodSource(pod)
|
||||
return err == nil && source != kubetypes.ApiserverSource
|
||||
}
|
||||
|
||||
// IsMirrorPod returns true if the pod is a mirror pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
_, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey]
|
||||
return ok
|
||||
}
|
||||
|
||||
func getHashFromMirrorPod(pod *v1.Pod) (string, bool) {
|
||||
hash, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey]
|
||||
return hash, ok
|
||||
}
|
||||
|
||||
func getPodHash(pod *v1.Pod) string {
|
||||
// The annotation exists for all static pods.
|
||||
return pod.Annotations[kubetypes.ConfigHashAnnotationKey]
|
||||
}
|
385
vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go
generated
vendored
385
vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go
generated
vendored
@ -1,385 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpoint"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// Manager stores and manages access to pods, maintaining the mappings
|
||||
// between static pods and mirror pods.
|
||||
//
|
||||
// The kubelet discovers pod updates from 3 sources: file, http, and
|
||||
// apiserver. Pods from non-apiserver sources are called static pods, and API
|
||||
// server is not aware of the existence of static pods. In order to monitor
|
||||
// the status of such pods, the kubelet creates a mirror pod for each static
|
||||
// pod via the API server.
|
||||
//
|
||||
// A mirror pod has the same pod full name (name and namespace) as its static
|
||||
// counterpart (albeit different metadata such as UID, etc). By leveraging the
|
||||
// fact that the kubelet reports the pod status using the pod full name, the
|
||||
// status of the mirror pod always reflects the actual status of the static
|
||||
// pod. When a static pod gets deleted, the associated orphaned mirror pod
|
||||
// will also be removed.
|
||||
type Manager interface {
|
||||
// GetPods returns the regular pods bound to the kubelet and their spec.
|
||||
GetPods() []*v1.Pod
|
||||
// GetPodByFullName returns the (non-mirror) pod that matches full name, as well as
|
||||
// whether the pod was found.
|
||||
GetPodByFullName(podFullName string) (*v1.Pod, bool)
|
||||
// GetPodByName provides the (non-mirror) pod that matches namespace and
|
||||
// name, as well as whether the pod was found.
|
||||
GetPodByName(namespace, name string) (*v1.Pod, bool)
|
||||
// GetPodByUID provides the (non-mirror) pod that matches pod UID, as well as
|
||||
// whether the pod is found.
|
||||
GetPodByUID(types.UID) (*v1.Pod, bool)
|
||||
// GetPodByMirrorPod returns the static pod for the given mirror pod and
|
||||
// whether it was known to the pod manger.
|
||||
GetPodByMirrorPod(*v1.Pod) (*v1.Pod, bool)
|
||||
// GetMirrorPodByPod returns the mirror pod for the given static pod and
|
||||
// whether it was known to the pod manager.
|
||||
GetMirrorPodByPod(*v1.Pod) (*v1.Pod, bool)
|
||||
// GetPodsAndMirrorPods returns the both regular and mirror pods.
|
||||
GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod)
|
||||
// SetPods replaces the internal pods with the new pods.
|
||||
// It is currently only used for testing.
|
||||
SetPods(pods []*v1.Pod)
|
||||
// AddPod adds the given pod to the manager.
|
||||
AddPod(pod *v1.Pod)
|
||||
// UpdatePod updates the given pod in the manager.
|
||||
UpdatePod(pod *v1.Pod)
|
||||
// DeletePod deletes the given pod from the manager. For mirror pods,
|
||||
// this means deleting the mappings related to mirror pods. For non-
|
||||
// mirror pods, this means deleting from indexes for all non-mirror pods.
|
||||
DeletePod(pod *v1.Pod)
|
||||
// DeleteOrphanedMirrorPods deletes all mirror pods which do not have
|
||||
// associated static pods. This method sends deletion requests to the API
|
||||
// server, but does NOT modify the internal pod storage in basicManager.
|
||||
DeleteOrphanedMirrorPods()
|
||||
// TranslatePodUID returns the actual UID of a pod. If the UID belongs to
|
||||
// a mirror pod, returns the UID of its static pod. Otherwise, returns the
|
||||
// original UID.
|
||||
//
|
||||
// All public-facing functions should perform this translation for UIDs
|
||||
// because user may provide a mirror pod UID, which is not recognized by
|
||||
// internal Kubelet functions.
|
||||
TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID
|
||||
// GetUIDTranslations returns the mappings of static pod UIDs to mirror pod
|
||||
// UIDs and mirror pod UIDs to static pod UIDs.
|
||||
GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID)
|
||||
// IsMirrorPodOf returns true if mirrorPod is a correct representation of
|
||||
// pod; false otherwise.
|
||||
IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool
|
||||
|
||||
MirrorClient
|
||||
}
|
||||
|
||||
// basicManager is a functional Manager.
|
||||
//
|
||||
// All fields in basicManager are read-only and are updated calling SetPods,
|
||||
// AddPod, UpdatePod, or DeletePod.
|
||||
type basicManager struct {
|
||||
// Protects all internal maps.
|
||||
lock sync.RWMutex
|
||||
|
||||
// Regular pods indexed by UID.
|
||||
podByUID map[kubetypes.ResolvedPodUID]*v1.Pod
|
||||
// Mirror pods indexed by UID.
|
||||
mirrorPodByUID map[kubetypes.MirrorPodUID]*v1.Pod
|
||||
|
||||
// Pods indexed by full name for easy access.
|
||||
podByFullName map[string]*v1.Pod
|
||||
mirrorPodByFullName map[string]*v1.Pod
|
||||
|
||||
// Mirror pod UID to pod UID map.
|
||||
translationByUID map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID
|
||||
|
||||
// basicManager is keeping secretManager and configMapManager up-to-date.
|
||||
secretManager secret.Manager
|
||||
configMapManager configmap.Manager
|
||||
checkpointManager checkpointmanager.CheckpointManager
|
||||
|
||||
// A mirror pod client to create/delete mirror pods.
|
||||
MirrorClient
|
||||
}
|
||||
|
||||
// NewBasicPodManager returns a functional Manager.
|
||||
func NewBasicPodManager(client MirrorClient, secretManager secret.Manager, configMapManager configmap.Manager, cpm checkpointmanager.CheckpointManager) Manager {
|
||||
pm := &basicManager{}
|
||||
pm.secretManager = secretManager
|
||||
pm.configMapManager = configMapManager
|
||||
pm.checkpointManager = cpm
|
||||
pm.MirrorClient = client
|
||||
pm.SetPods(nil)
|
||||
return pm
|
||||
}
|
||||
|
||||
// Set the internal pods based on the new pods.
|
||||
func (pm *basicManager) SetPods(newPods []*v1.Pod) {
|
||||
pm.lock.Lock()
|
||||
defer pm.lock.Unlock()
|
||||
|
||||
pm.podByUID = make(map[kubetypes.ResolvedPodUID]*v1.Pod)
|
||||
pm.podByFullName = make(map[string]*v1.Pod)
|
||||
pm.mirrorPodByUID = make(map[kubetypes.MirrorPodUID]*v1.Pod)
|
||||
pm.mirrorPodByFullName = make(map[string]*v1.Pod)
|
||||
pm.translationByUID = make(map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID)
|
||||
|
||||
pm.updatePodsInternal(newPods...)
|
||||
}
|
||||
|
||||
func (pm *basicManager) AddPod(pod *v1.Pod) {
|
||||
pm.UpdatePod(pod)
|
||||
}
|
||||
|
||||
func (pm *basicManager) UpdatePod(pod *v1.Pod) {
|
||||
pm.lock.Lock()
|
||||
defer pm.lock.Unlock()
|
||||
pm.updatePodsInternal(pod)
|
||||
if pm.checkpointManager != nil {
|
||||
if err := checkpoint.WritePod(pm.checkpointManager, pod); err != nil {
|
||||
klog.Errorf("Error writing checkpoint for pod: %v", pod.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isPodInTerminatedState(pod *v1.Pod) bool {
|
||||
return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded
|
||||
}
|
||||
|
||||
// updatePodsInternal replaces the given pods in the current state of the
|
||||
// manager, updating the various indices. The caller is assumed to hold the
|
||||
// lock.
|
||||
func (pm *basicManager) updatePodsInternal(pods ...*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
if pm.secretManager != nil {
|
||||
if isPodInTerminatedState(pod) {
|
||||
// Pods that are in terminated state and no longer running can be
|
||||
// ignored as they no longer require access to secrets.
|
||||
// It is especially important in watch-based manager, to avoid
|
||||
// unnecessary watches for terminated pods waiting for GC.
|
||||
pm.secretManager.UnregisterPod(pod)
|
||||
} else {
|
||||
// TODO: Consider detecting only status update and in such case do
|
||||
// not register pod, as it doesn't really matter.
|
||||
pm.secretManager.RegisterPod(pod)
|
||||
}
|
||||
}
|
||||
if pm.configMapManager != nil {
|
||||
if isPodInTerminatedState(pod) {
|
||||
// Pods that are in terminated state and no longer running can be
|
||||
// ignored as they no longer require access to configmaps.
|
||||
// It is especially important in watch-based manager, to avoid
|
||||
// unnecessary watches for terminated pods waiting for GC.
|
||||
pm.configMapManager.UnregisterPod(pod)
|
||||
} else {
|
||||
// TODO: Consider detecting only status update and in such case do
|
||||
// not register pod, as it doesn't really matter.
|
||||
pm.configMapManager.RegisterPod(pod)
|
||||
}
|
||||
}
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
// This logic relies on a static pod and its mirror to have the same name.
|
||||
// It is safe to type convert here due to the IsMirrorPod guard.
|
||||
if IsMirrorPod(pod) {
|
||||
mirrorPodUID := kubetypes.MirrorPodUID(pod.UID)
|
||||
pm.mirrorPodByUID[mirrorPodUID] = pod
|
||||
pm.mirrorPodByFullName[podFullName] = pod
|
||||
if p, ok := pm.podByFullName[podFullName]; ok {
|
||||
pm.translationByUID[mirrorPodUID] = kubetypes.ResolvedPodUID(p.UID)
|
||||
}
|
||||
} else {
|
||||
resolvedPodUID := kubetypes.ResolvedPodUID(pod.UID)
|
||||
pm.podByUID[resolvedPodUID] = pod
|
||||
pm.podByFullName[podFullName] = pod
|
||||
if mirror, ok := pm.mirrorPodByFullName[podFullName]; ok {
|
||||
pm.translationByUID[kubetypes.MirrorPodUID(mirror.UID)] = resolvedPodUID
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *basicManager) DeletePod(pod *v1.Pod) {
|
||||
pm.lock.Lock()
|
||||
defer pm.lock.Unlock()
|
||||
if pm.secretManager != nil {
|
||||
pm.secretManager.UnregisterPod(pod)
|
||||
}
|
||||
if pm.configMapManager != nil {
|
||||
pm.configMapManager.UnregisterPod(pod)
|
||||
}
|
||||
podFullName := kubecontainer.GetPodFullName(pod)
|
||||
// It is safe to type convert here due to the IsMirrorPod guard.
|
||||
if IsMirrorPod(pod) {
|
||||
mirrorPodUID := kubetypes.MirrorPodUID(pod.UID)
|
||||
delete(pm.mirrorPodByUID, mirrorPodUID)
|
||||
delete(pm.mirrorPodByFullName, podFullName)
|
||||
delete(pm.translationByUID, mirrorPodUID)
|
||||
} else {
|
||||
delete(pm.podByUID, kubetypes.ResolvedPodUID(pod.UID))
|
||||
delete(pm.podByFullName, podFullName)
|
||||
}
|
||||
if pm.checkpointManager != nil {
|
||||
if err := checkpoint.DeletePod(pm.checkpointManager, pod); err != nil {
|
||||
klog.Errorf("Error deleting checkpoint for pod: %v", pod.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetPods() []*v1.Pod {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
return podsMapToPods(pm.podByUID)
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod) {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
pods := podsMapToPods(pm.podByUID)
|
||||
mirrorPods := mirrorPodsMapToMirrorPods(pm.mirrorPodByUID)
|
||||
return pods, mirrorPods
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
pod, ok := pm.podByUID[kubetypes.ResolvedPodUID(uid)] // Safe conversion, map only holds non-mirrors.
|
||||
return pod, ok
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetPodByName(namespace, name string) (*v1.Pod, bool) {
|
||||
podFullName := kubecontainer.BuildPodFullName(name, namespace)
|
||||
return pm.GetPodByFullName(podFullName)
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
pod, ok := pm.podByFullName[podFullName]
|
||||
return pod, ok
|
||||
}
|
||||
|
||||
func (pm *basicManager) TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID {
|
||||
// It is safe to type convert to a resolved UID because type conversion is idempotent.
|
||||
if uid == "" {
|
||||
return kubetypes.ResolvedPodUID(uid)
|
||||
}
|
||||
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
if translated, ok := pm.translationByUID[kubetypes.MirrorPodUID(uid)]; ok {
|
||||
return translated
|
||||
}
|
||||
return kubetypes.ResolvedPodUID(uid)
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID,
|
||||
mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID) {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
|
||||
podToMirror = make(map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, len(pm.translationByUID))
|
||||
mirrorToPod = make(map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID, len(pm.translationByUID))
|
||||
// Insert empty translation mapping for all static pods.
|
||||
for uid, pod := range pm.podByUID {
|
||||
if !IsStaticPod(pod) {
|
||||
continue
|
||||
}
|
||||
podToMirror[uid] = ""
|
||||
}
|
||||
// Fill in translations. Notice that if there is no mirror pod for a
|
||||
// static pod, its uid will be translated into empty string "". This
|
||||
// is WAI, from the caller side we can know that the static pod doesn't
|
||||
// have a corresponding mirror pod instead of using static pod uid directly.
|
||||
for k, v := range pm.translationByUID {
|
||||
mirrorToPod[k] = v
|
||||
podToMirror[v] = k
|
||||
}
|
||||
return podToMirror, mirrorToPod
|
||||
}
|
||||
|
||||
func (pm *basicManager) getOrphanedMirrorPodNames() []string {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
var podFullNames []string
|
||||
for podFullName := range pm.mirrorPodByFullName {
|
||||
if _, ok := pm.podByFullName[podFullName]; !ok {
|
||||
podFullNames = append(podFullNames, podFullName)
|
||||
}
|
||||
}
|
||||
return podFullNames
|
||||
}
|
||||
|
||||
func (pm *basicManager) DeleteOrphanedMirrorPods() {
|
||||
podFullNames := pm.getOrphanedMirrorPodNames()
|
||||
for _, podFullName := range podFullNames {
|
||||
pm.MirrorClient.DeleteMirrorPod(podFullName)
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool {
|
||||
// Check name and namespace first.
|
||||
if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace {
|
||||
return false
|
||||
}
|
||||
hash, ok := getHashFromMirrorPod(mirrorPod)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return hash == getPodHash(pod)
|
||||
}
|
||||
|
||||
func podsMapToPods(UIDMap map[kubetypes.ResolvedPodUID]*v1.Pod) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0, len(UIDMap))
|
||||
for _, pod := range UIDMap {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func mirrorPodsMapToMirrorPods(UIDMap map[kubetypes.MirrorPodUID]*v1.Pod) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0, len(UIDMap))
|
||||
for _, pod := range UIDMap {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetMirrorPodByPod(pod *v1.Pod) (*v1.Pod, bool) {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
mirrorPod, ok := pm.mirrorPodByFullName[kubecontainer.GetPodFullName(pod)]
|
||||
return mirrorPod, ok
|
||||
}
|
||||
|
||||
func (pm *basicManager) GetPodByMirrorPod(mirrorPod *v1.Pod) (*v1.Pod, bool) {
|
||||
pm.lock.RLock()
|
||||
defer pm.lock.RUnlock()
|
||||
pod, ok := pm.podByFullName[kubecontainer.GetPodFullName(mirrorPod)]
|
||||
return pod, ok
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/kubelet/secret/fake_manager.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/kubelet/secret/fake_manager.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package secret
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// fakeManager implements Manager interface for testing purposes.
|
||||
// simple operations to apiserver.
|
||||
type fakeManager struct {
|
||||
}
|
||||
|
||||
func NewFakeManager() Manager {
|
||||
return &fakeManager{}
|
||||
}
|
||||
|
||||
func (s *fakeManager) GetSecret(namespace, name string) (*v1.Secret, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *fakeManager) RegisterPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
func (s *fakeManager) UnregisterPod(pod *v1.Pod) {
|
||||
}
|
149
vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go
generated
vendored
149
vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package secret
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
// Get secret by secret namespace and name.
|
||||
GetSecret(namespace, name string) (*v1.Secret, error)
|
||||
|
||||
// WARNING: Register/UnregisterPod functions should be efficient,
|
||||
// i.e. should not block on network operations.
|
||||
|
||||
// RegisterPod registers all secrets from a given pod.
|
||||
RegisterPod(pod *v1.Pod)
|
||||
|
||||
// UnregisterPod unregisters secrets from a given pod that are not
|
||||
// used by any other registered pod.
|
||||
UnregisterPod(pod *v1.Pod)
|
||||
}
|
||||
|
||||
// simpleSecretManager implements SecretManager interfaces with
|
||||
// simple operations to apiserver.
|
||||
type simpleSecretManager struct {
|
||||
kubeClient clientset.Interface
|
||||
}
|
||||
|
||||
func NewSimpleSecretManager(kubeClient clientset.Interface) Manager {
|
||||
return &simpleSecretManager{kubeClient: kubeClient}
|
||||
}
|
||||
|
||||
func (s *simpleSecretManager) GetSecret(namespace, name string) (*v1.Secret, error) {
|
||||
return s.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (s *simpleSecretManager) RegisterPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
func (s *simpleSecretManager) UnregisterPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
// secretManager keeps a store with secrets necessary
|
||||
// for registered pods. Different implementations of the store
|
||||
// may result in different semantics for freshness of secrets
|
||||
// (e.g. ttl-based implementation vs watch-based implementation).
|
||||
type secretManager struct {
|
||||
manager manager.Manager
|
||||
}
|
||||
|
||||
func (s *secretManager) GetSecret(namespace, name string) (*v1.Secret, error) {
|
||||
object, err := s.manager.GetObject(namespace, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret, ok := object.(*v1.Secret); ok {
|
||||
return secret, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected object type: %v", object)
|
||||
}
|
||||
|
||||
func (s *secretManager) RegisterPod(pod *v1.Pod) {
|
||||
s.manager.RegisterPod(pod)
|
||||
}
|
||||
|
||||
func (s *secretManager) UnregisterPod(pod *v1.Pod) {
|
||||
s.manager.UnregisterPod(pod)
|
||||
}
|
||||
|
||||
func getSecretNames(pod *v1.Pod) sets.String {
|
||||
result := sets.NewString()
|
||||
podutil.VisitPodSecretNames(pod, func(name string) bool {
|
||||
result.Insert(name)
|
||||
return true
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
const (
|
||||
defaultTTL = time.Minute
|
||||
)
|
||||
|
||||
// NewCachingSecretManager creates a manager that keeps a cache of all secrets
|
||||
// necessary for registered pods.
|
||||
// It implements the following logic:
|
||||
// - whenever a pod is created or updated, the cached versions of all secrets
|
||||
// are invalidated
|
||||
// - every GetObject() call tries to fetch the value from local cache; if it is
|
||||
// not there, invalidated or too old, we fetch it from apiserver and refresh the
|
||||
// value in cache; otherwise it is just fetched from cache
|
||||
func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager {
|
||||
getSecret := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
|
||||
return kubeClient.CoreV1().Secrets(namespace).Get(name, opts)
|
||||
}
|
||||
secretStore := manager.NewObjectStore(getSecret, clock.RealClock{}, getTTL, defaultTTL)
|
||||
return &secretManager{
|
||||
manager: manager.NewCacheBasedManager(secretStore, getSecretNames),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWatchingSecretManager creates a manager that keeps a cache of all secrets
|
||||
// necessary for registered pods.
|
||||
// It implements the following logic:
|
||||
// - whenever a pod is created or updated, we start individual watches for all
|
||||
// referenced objects that aren't referenced from other registered pods
|
||||
// - every GetObject() returns a value from local cache propagated via watches
|
||||
func NewWatchingSecretManager(kubeClient clientset.Interface) Manager {
|
||||
listSecret := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.CoreV1().Secrets(namespace).List(opts)
|
||||
}
|
||||
watchSecret := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.CoreV1().Secrets(namespace).Watch(opts)
|
||||
}
|
||||
newSecret := func() runtime.Object {
|
||||
return &v1.Secret{}
|
||||
}
|
||||
gr := corev1.Resource("secret")
|
||||
return &secretManager{
|
||||
manager: manager.NewWatchBasedManager(listSecret, watchSecret, newSecret, gr, getSecretNames),
|
||||
}
|
||||
}
|
68
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go
generated
vendored
68
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go
generated
vendored
@ -19,12 +19,9 @@ package types
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -32,7 +29,6 @@ const (
|
||||
ConfigMirrorAnnotationKey = v1.MirrorPodAnnotationKey
|
||||
ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen"
|
||||
ConfigHashAnnotationKey = "kubernetes.io/config.hash"
|
||||
CriticalPodAnnotationKey = "scheduler.alpha.kubernetes.io/critical-pod"
|
||||
)
|
||||
|
||||
// PodOperation defines what changes will be made on a pod configuration.
|
||||
@ -92,9 +88,8 @@ func GetValidatedSources(sources []string) ([]string, error) {
|
||||
return []string{FileSource, HTTPSource, ApiserverSource}, nil
|
||||
case FileSource, HTTPSource, ApiserverSource:
|
||||
validated = append(validated, source)
|
||||
break
|
||||
case "":
|
||||
break
|
||||
// Skip
|
||||
default:
|
||||
return []string{}, fmt.Errorf("unknown pod source %q", source)
|
||||
}
|
||||
@ -142,19 +137,28 @@ func (sp SyncPodType) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// IsCriticalPod returns true if the pod bears the critical pod annotation key or if pod's priority is greater than
|
||||
// or equal to SystemCriticalPriority. Both the default scheduler and the kubelet use this function
|
||||
// to make admission and scheduling decisions.
|
||||
// IsMirrorPod returns true if the passed Pod is a Mirror Pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
_, ok := pod.Annotations[ConfigMirrorAnnotationKey]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsStaticPod returns true if the pod is a static pod.
|
||||
func IsStaticPod(pod *v1.Pod) bool {
|
||||
source, err := GetPodSource(pod)
|
||||
return err == nil && source != ApiserverSource
|
||||
}
|
||||
|
||||
// IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority.
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
||||
return true
|
||||
}
|
||||
if IsStaticPod(pod) {
|
||||
return true
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) {
|
||||
if IsCritical(pod.Namespace, pod.Annotations) {
|
||||
return true
|
||||
}
|
||||
if IsMirrorPod(pod) {
|
||||
return true
|
||||
}
|
||||
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -165,35 +169,15 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool {
|
||||
if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) {
|
||||
return true
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
|
||||
(preemptee != nil && preemptee.Spec.Priority != nil) {
|
||||
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
|
||||
}
|
||||
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
|
||||
(preemptee != nil && preemptee.Spec.Priority != nil) {
|
||||
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCritical returns true if parameters bear the critical pod annotation
|
||||
// key. The DaemonSetController use this key directly to make scheduling decisions.
|
||||
// TODO: @ravig - Deprecated. Remove this when we move to resolving critical pods based on priorityClassName.
|
||||
func IsCritical(ns string, annotations map[string]string) bool {
|
||||
// Critical pods are restricted to "kube-system" namespace as of now.
|
||||
if ns != kubeapi.NamespaceSystem {
|
||||
return false
|
||||
}
|
||||
val, ok := annotations[CriticalPodAnnotationKey]
|
||||
if ok && val == "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
|
||||
func IsCriticalPodBasedOnPriority(priority int32) bool {
|
||||
if priority >= scheduling.SystemCriticalPriority {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return priority >= scheduling.SystemCriticalPriority
|
||||
}
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
// +build darwin
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
output, err := unix.SysctlRaw("kern.boottime")
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
var timeval syscall.Timeval
|
||||
if len(output) != int(unsafe.Sizeof(timeval)) {
|
||||
return time.Time{}, fmt.Errorf("unexpected output when calling syscall kern.bootime. Expected len(output) to be %v, but got %v",
|
||||
int(unsafe.Sizeof(timeval)), len(output))
|
||||
}
|
||||
timeval = *(*syscall.Timeval)(unsafe.Pointer(&output[0]))
|
||||
sec, nsec := timeval.Unix()
|
||||
return time.Unix(sec, nsec).Truncate(time.Second), nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
// +build freebsd linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
var info unix.Sysinfo_t
|
||||
if err := unix.Sysinfo(&info); err != nil {
|
||||
return time.Time{}, fmt.Errorf("error getting system uptime: %s", err)
|
||||
}
|
||||
return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/util/doc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/util/doc.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package util holds utility functions.
|
||||
package util // import "k8s.io/kubernetes/pkg/kubelet/util"
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go
generated
vendored
@ -68,5 +68,5 @@ func aggregatePods(pods []*v1.Pod, handler podHandler) string {
|
||||
for _, pod := range pods {
|
||||
podStrings = append(podStrings, handler(pod))
|
||||
}
|
||||
return fmt.Sprintf(strings.Join(podStrings, ", "))
|
||||
return strings.Join(podStrings, ", ")
|
||||
}
|
||||
|
272
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/cache_based_manager.go
generated
vendored
272
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/cache_based_manager.go
generated
vendored
@ -1,272 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storageetcd "k8s.io/apiserver/pkg/storage/etcd"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// GetObjectTTLFunc defines a function to get value of TTL.
|
||||
type GetObjectTTLFunc func() (time.Duration, bool)
|
||||
|
||||
// GetObjectFunc defines a function to get object with a given namespace and name.
|
||||
type GetObjectFunc func(string, string, metav1.GetOptions) (runtime.Object, error)
|
||||
|
||||
type objectKey struct {
|
||||
namespace string
|
||||
name string
|
||||
}
|
||||
|
||||
// objectStoreItems is a single item stored in objectStore.
|
||||
type objectStoreItem struct {
|
||||
refCount int
|
||||
data *objectData
|
||||
}
|
||||
|
||||
type objectData struct {
|
||||
sync.Mutex
|
||||
|
||||
object runtime.Object
|
||||
err error
|
||||
lastUpdateTime time.Time
|
||||
}
|
||||
|
||||
// objectStore is a local cache of objects.
|
||||
type objectStore struct {
|
||||
getObject GetObjectFunc
|
||||
clock clock.Clock
|
||||
|
||||
lock sync.Mutex
|
||||
items map[objectKey]*objectStoreItem
|
||||
|
||||
defaultTTL time.Duration
|
||||
getTTL GetObjectTTLFunc
|
||||
}
|
||||
|
||||
// NewObjectStore returns a new ttl-based instance of Store interface.
|
||||
func NewObjectStore(getObject GetObjectFunc, clock clock.Clock, getTTL GetObjectTTLFunc, ttl time.Duration) Store {
|
||||
return &objectStore{
|
||||
getObject: getObject,
|
||||
clock: clock,
|
||||
items: make(map[objectKey]*objectStoreItem),
|
||||
defaultTTL: ttl,
|
||||
getTTL: getTTL,
|
||||
}
|
||||
}
|
||||
|
||||
func isObjectOlder(newObject, oldObject runtime.Object) bool {
|
||||
if newObject == nil || oldObject == nil {
|
||||
return false
|
||||
}
|
||||
newVersion, _ := storageetcd.Versioner.ObjectResourceVersion(newObject)
|
||||
oldVersion, _ := storageetcd.Versioner.ObjectResourceVersion(oldObject)
|
||||
return newVersion < oldVersion
|
||||
}
|
||||
|
||||
func (s *objectStore) AddReference(namespace, name string) {
|
||||
key := objectKey{namespace: namespace, name: name}
|
||||
|
||||
// AddReference is called from RegisterPod, thus it needs to be efficient.
|
||||
// Thus Add() is only increasing refCount and generation of a given object.
|
||||
// Then Get() is responsible for fetching if needed.
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
item, exists := s.items[key]
|
||||
if !exists {
|
||||
item = &objectStoreItem{
|
||||
refCount: 0,
|
||||
data: &objectData{},
|
||||
}
|
||||
s.items[key] = item
|
||||
}
|
||||
|
||||
item.refCount++
|
||||
// This will trigger fetch on the next Get() operation.
|
||||
item.data = nil
|
||||
}
|
||||
|
||||
func (s *objectStore) DeleteReference(namespace, name string) {
|
||||
key := objectKey{namespace: namespace, name: name}
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if item, ok := s.items[key]; ok {
|
||||
item.refCount--
|
||||
if item.refCount == 0 {
|
||||
delete(s.items, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetObjectTTLFromNodeFunc returns a function that returns TTL value
|
||||
// from a given Node object.
|
||||
func GetObjectTTLFromNodeFunc(getNode func() (*v1.Node, error)) GetObjectTTLFunc {
|
||||
return func() (time.Duration, bool) {
|
||||
node, err := getNode()
|
||||
if err != nil {
|
||||
return time.Duration(0), false
|
||||
}
|
||||
if node != nil && node.Annotations != nil {
|
||||
if value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]; ok {
|
||||
if intValue, err := strconv.Atoi(value); err == nil {
|
||||
return time.Duration(intValue) * time.Second, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return time.Duration(0), false
|
||||
}
|
||||
}
|
||||
|
||||
func (s *objectStore) isObjectFresh(data *objectData) bool {
|
||||
objectTTL := s.defaultTTL
|
||||
if ttl, ok := s.getTTL(); ok {
|
||||
objectTTL = ttl
|
||||
}
|
||||
return s.clock.Now().Before(data.lastUpdateTime.Add(objectTTL))
|
||||
}
|
||||
|
||||
func (s *objectStore) Get(namespace, name string) (runtime.Object, error) {
|
||||
key := objectKey{namespace: namespace, name: name}
|
||||
|
||||
data := func() *objectData {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
item, exists := s.items[key]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
if item.data == nil {
|
||||
item.data = &objectData{}
|
||||
}
|
||||
return item.data
|
||||
}()
|
||||
if data == nil {
|
||||
return nil, fmt.Errorf("object %q/%q not registered", namespace, name)
|
||||
}
|
||||
|
||||
// After updating data in objectStore, lock the data, fetch object if
|
||||
// needed and return data.
|
||||
data.Lock()
|
||||
defer data.Unlock()
|
||||
if data.err != nil || !s.isObjectFresh(data) {
|
||||
opts := metav1.GetOptions{}
|
||||
if data.object != nil && data.err == nil {
|
||||
// This is just a periodic refresh of an object we successfully fetched previously.
|
||||
// In this case, server data from apiserver cache to reduce the load on both
|
||||
// etcd and apiserver (the cache is eventually consistent).
|
||||
util.FromApiserverCache(&opts)
|
||||
}
|
||||
|
||||
object, err := s.getObject(namespace, name, opts)
|
||||
if err != nil && !apierrors.IsNotFound(err) && data.object == nil && data.err == nil {
|
||||
// Couldn't fetch the latest object, but there is no cached data to return.
|
||||
// Return the fetch result instead.
|
||||
return object, err
|
||||
}
|
||||
if (err == nil && !isObjectOlder(object, data.object)) || apierrors.IsNotFound(err) {
|
||||
// If the fetch succeeded with a newer version of the object, or if the
|
||||
// object could not be found in the apiserver, update the cached data to
|
||||
// reflect the current status.
|
||||
data.object = object
|
||||
data.err = err
|
||||
data.lastUpdateTime = s.clock.Now()
|
||||
}
|
||||
}
|
||||
return data.object, data.err
|
||||
}
|
||||
|
||||
// cacheBasedManager keeps a store with objects necessary
|
||||
// for registered pods. Different implementations of the store
|
||||
// may result in different semantics for freshness of objects
|
||||
// (e.g. ttl-based implementation vs watch-based implementation).
|
||||
type cacheBasedManager struct {
|
||||
objectStore Store
|
||||
getReferencedObjects func(*v1.Pod) sets.String
|
||||
|
||||
lock sync.Mutex
|
||||
registeredPods map[objectKey]*v1.Pod
|
||||
}
|
||||
|
||||
func (c *cacheBasedManager) GetObject(namespace, name string) (runtime.Object, error) {
|
||||
return c.objectStore.Get(namespace, name)
|
||||
}
|
||||
|
||||
func (c *cacheBasedManager) RegisterPod(pod *v1.Pod) {
|
||||
names := c.getReferencedObjects(pod)
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
for name := range names {
|
||||
c.objectStore.AddReference(pod.Namespace, name)
|
||||
}
|
||||
var prev *v1.Pod
|
||||
key := objectKey{namespace: pod.Namespace, name: pod.Name}
|
||||
prev = c.registeredPods[key]
|
||||
c.registeredPods[key] = pod
|
||||
if prev != nil {
|
||||
for name := range c.getReferencedObjects(prev) {
|
||||
// On an update, the .Add() call above will have re-incremented the
|
||||
// ref count of any existing object, so any objects that are in both
|
||||
// names and prev need to have their ref counts decremented. Any that
|
||||
// are only in prev need to be completely removed. This unconditional
|
||||
// call takes care of both cases.
|
||||
c.objectStore.DeleteReference(prev.Namespace, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacheBasedManager) UnregisterPod(pod *v1.Pod) {
|
||||
var prev *v1.Pod
|
||||
key := objectKey{namespace: pod.Namespace, name: pod.Name}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
prev = c.registeredPods[key]
|
||||
delete(c.registeredPods, key)
|
||||
if prev != nil {
|
||||
for name := range c.getReferencedObjects(prev) {
|
||||
c.objectStore.DeleteReference(prev.Namespace, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewCacheBasedManager creates a manager that keeps a cache of all objects
|
||||
// necessary for registered pods.
|
||||
// It implements the following logic:
|
||||
// - whenever a pod is created or updated, the cached versions of all objects
|
||||
// is referencing are invalidated
|
||||
// - every GetObject() call tries to fetch the value from local cache; if it is
|
||||
// not there, invalidated or too old, we fetch it from apiserver and refresh the
|
||||
// value in cache; otherwise it is just fetched from cache
|
||||
func NewCacheBasedManager(objectStore Store, getReferencedObjects func(*v1.Pod) sets.String) Manager {
|
||||
return &cacheBasedManager{
|
||||
objectStore: objectStore,
|
||||
getReferencedObjects: getReferencedObjects,
|
||||
registeredPods: make(map[objectKey]*v1.Pod),
|
||||
}
|
||||
}
|
60
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/manager.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/manager.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Manager is the interface for registering and unregistering
|
||||
// objects referenced by pods in the underlying cache and
|
||||
// extracting those from that cache if needed.
|
||||
type Manager interface {
|
||||
// Get object by its namespace and name.
|
||||
GetObject(namespace, name string) (runtime.Object, error)
|
||||
|
||||
// WARNING: Register/UnregisterPod functions should be efficient,
|
||||
// i.e. should not block on network operations.
|
||||
|
||||
// RegisterPod registers all objects referenced from a given pod.
|
||||
//
|
||||
// NOTE: All implementations of RegisterPod should be idempotent.
|
||||
RegisterPod(pod *v1.Pod)
|
||||
|
||||
// UnregisterPod unregisters objects referenced from a given pod that are not
|
||||
// used by any other registered pod.
|
||||
//
|
||||
// NOTE: All implementations of UnregisterPod should be idempotent.
|
||||
UnregisterPod(pod *v1.Pod)
|
||||
}
|
||||
|
||||
// Store is the interface for a object cache that
|
||||
// can be used by cacheBasedManager.
|
||||
type Store interface {
|
||||
// AddReference adds a reference to the object to the store.
|
||||
// Note that multiple additions to the store has to be allowed
|
||||
// in the implementations and effectively treated as refcounted.
|
||||
AddReference(namespace, name string)
|
||||
// DeleteReference deletes reference to the object from the store.
|
||||
// Note that object should be deleted only when there was a
|
||||
// corresponding Delete call for each of Add calls (effectively
|
||||
// when refcount was reduced to zero).
|
||||
DeleteReference(namespace, name string)
|
||||
// Get an object from a store.
|
||||
Get(namespace, name string) (runtime.Object, error)
|
||||
}
|
194
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go
generated
vendored
194
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go
generated
vendored
@ -1,194 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// TODO: We did some scalability tests and using watchBasedManager
|
||||
// seems to help with apiserver performance at scale visibly.
|
||||
// No issues we also observed at the scale of ~200k watchers with a
|
||||
// single apiserver.
|
||||
// However, we need to perform more extensive testing before we
|
||||
// enable this in production setups.
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error)
|
||||
type watchObjectFunc func(string, metav1.ListOptions) (watch.Interface, error)
|
||||
type newObjectFunc func() runtime.Object
|
||||
|
||||
// objectCacheItem is a single item stored in objectCache.
|
||||
type objectCacheItem struct {
|
||||
refCount int
|
||||
store cache.Store
|
||||
hasSynced func() (bool, error)
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// objectCache is a local cache of objects propagated via
|
||||
// individual watches.
|
||||
type objectCache struct {
|
||||
listObject listObjectFunc
|
||||
watchObject watchObjectFunc
|
||||
newObject newObjectFunc
|
||||
groupResource schema.GroupResource
|
||||
|
||||
lock sync.Mutex
|
||||
items map[objectKey]*objectCacheItem
|
||||
}
|
||||
|
||||
// NewObjectCache returns a new watch-based instance of Store interface.
|
||||
func NewObjectCache(listObject listObjectFunc, watchObject watchObjectFunc, newObject newObjectFunc, groupResource schema.GroupResource) Store {
|
||||
return &objectCache{
|
||||
listObject: listObject,
|
||||
watchObject: watchObject,
|
||||
newObject: newObject,
|
||||
groupResource: groupResource,
|
||||
items: make(map[objectKey]*objectCacheItem),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *objectCache) newStore() cache.Store {
|
||||
// TODO: We may consider created a dedicated store keeping just a single
|
||||
// item, instead of using a generic store implementation for this purpose.
|
||||
// However, simple benchmarks show that memory overhead in that case is
|
||||
// decrease from ~600B to ~300B per object. So we are not optimizing it
|
||||
// until we will see a good reason for that.
|
||||
return cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
}
|
||||
|
||||
func (c *objectCache) newReflector(namespace, name string) *objectCacheItem {
|
||||
fieldSelector := fields.Set{"metadata.name": name}.AsSelector().String()
|
||||
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return c.listObject(namespace, options)
|
||||
}
|
||||
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
return c.watchObject(namespace, options)
|
||||
}
|
||||
store := c.newStore()
|
||||
reflector := cache.NewNamedReflector(
|
||||
fmt.Sprintf("object-%q/%q", namespace, name),
|
||||
&cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc},
|
||||
c.newObject(),
|
||||
store,
|
||||
0,
|
||||
)
|
||||
stopCh := make(chan struct{})
|
||||
go reflector.Run(stopCh)
|
||||
return &objectCacheItem{
|
||||
refCount: 0,
|
||||
store: store,
|
||||
hasSynced: func() (bool, error) { return reflector.LastSyncResourceVersion() != "", nil },
|
||||
stopCh: stopCh,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *objectCache) AddReference(namespace, name string) {
|
||||
key := objectKey{namespace: namespace, name: name}
|
||||
|
||||
// AddReference is called from RegisterPod thus it needs to be efficient.
|
||||
// Thus, it is only increaisng refCount and in case of first registration
|
||||
// of a given object it starts corresponding reflector.
|
||||
// It's responsibility of the first Get operation to wait until the
|
||||
// reflector propagated the store.
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
item, exists := c.items[key]
|
||||
if !exists {
|
||||
item = c.newReflector(namespace, name)
|
||||
c.items[key] = item
|
||||
}
|
||||
item.refCount++
|
||||
}
|
||||
|
||||
func (c *objectCache) DeleteReference(namespace, name string) {
|
||||
key := objectKey{namespace: namespace, name: name}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if item, ok := c.items[key]; ok {
|
||||
item.refCount--
|
||||
if item.refCount == 0 {
|
||||
// Stop the underlying reflector.
|
||||
close(item.stopCh)
|
||||
delete(c.items, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// key returns key of an object with a given name and namespace.
|
||||
// This has to be in-sync with cache.MetaNamespaceKeyFunc.
|
||||
func (c *objectCache) key(namespace, name string) string {
|
||||
if len(namespace) > 0 {
|
||||
return namespace + "/" + name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (c *objectCache) Get(namespace, name string) (runtime.Object, error) {
|
||||
key := objectKey{namespace: namespace, name: name}
|
||||
|
||||
c.lock.Lock()
|
||||
item, exists := c.items[key]
|
||||
c.lock.Unlock()
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("object %q/%q not registered", namespace, name)
|
||||
}
|
||||
if err := wait.PollImmediate(10*time.Millisecond, time.Second, item.hasSynced); err != nil {
|
||||
return nil, fmt.Errorf("couldn't propagate object cache: %v", err)
|
||||
}
|
||||
|
||||
obj, exists, err := item.store.GetByKey(c.key(namespace, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, apierrors.NewNotFound(c.groupResource, name)
|
||||
}
|
||||
if object, ok := obj.(runtime.Object); ok {
|
||||
return object, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected object type: %v", obj)
|
||||
}
|
||||
|
||||
// NewWatchBasedManager creates a manager that keeps a cache of all objects
|
||||
// necessary for registered pods.
|
||||
// It implements the following logic:
|
||||
// - whenever a pod is created or updated, we start individual watches for all
|
||||
// referenced objects that aren't referenced from other registered pods
|
||||
// - every GetObject() returns a value from local cache propagated via watches
|
||||
func NewWatchBasedManager(listObject listObjectFunc, watchObject watchObjectFunc, newObject newObjectFunc, groupResource schema.GroupResource, getReferencedObjects func(*v1.Pod) sets.String) Manager {
|
||||
objectStore := NewObjectCache(listObject, watchObject, newObject, groupResource)
|
||||
return NewCacheBasedManager(objectStore, getReferencedObjects)
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package store hosts a Store interface and its implementations.
|
||||
package store // import "k8s.io/kubernetes/pkg/kubelet/util/store"
|
167
vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go
generated
vendored
167
vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go
generated
vendored
@ -1,167 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name prefix for the temporary files.
|
||||
tmpPrefix = "."
|
||||
)
|
||||
|
||||
// FileStore is an implementation of the Store interface which stores data in files.
|
||||
type FileStore struct {
|
||||
// Absolute path to the base directory for storing data files.
|
||||
directoryPath string
|
||||
|
||||
// filesystem to use.
|
||||
filesystem utilfs.Filesystem
|
||||
}
|
||||
|
||||
// NewFileStore returns an instance of FileStore.
|
||||
func NewFileStore(path string, fs utilfs.Filesystem) (Store, error) {
|
||||
if err := ensureDirectory(fs, path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileStore{directoryPath: path, filesystem: fs}, nil
|
||||
}
|
||||
|
||||
// Write writes the given data to a file named key.
|
||||
func (f *FileStore) Write(key string, data []byte) error {
|
||||
if err := ValidateKey(key); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ensureDirectory(f.filesystem, f.directoryPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeFile(f.filesystem, f.getPathByKey(key), data)
|
||||
}
|
||||
|
||||
// Read reads the data from the file named key.
|
||||
func (f *FileStore) Read(key string) ([]byte, error) {
|
||||
if err := ValidateKey(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bytes, err := f.filesystem.ReadFile(f.getPathByKey(key))
|
||||
if os.IsNotExist(err) {
|
||||
return bytes, ErrKeyNotFound
|
||||
}
|
||||
return bytes, err
|
||||
}
|
||||
|
||||
// Delete deletes the key file.
|
||||
func (f *FileStore) Delete(key string) error {
|
||||
if err := ValidateKey(key); err != nil {
|
||||
return err
|
||||
}
|
||||
return removePath(f.filesystem, f.getPathByKey(key))
|
||||
}
|
||||
|
||||
// List returns all keys in the store.
|
||||
func (f *FileStore) List() ([]string, error) {
|
||||
keys := make([]string, 0)
|
||||
files, err := f.filesystem.ReadDir(f.directoryPath)
|
||||
if err != nil {
|
||||
return keys, err
|
||||
}
|
||||
for _, f := range files {
|
||||
if !strings.HasPrefix(f.Name(), tmpPrefix) {
|
||||
keys = append(keys, f.Name())
|
||||
}
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// getPathByKey returns the full path of the file for the key.
|
||||
func (f *FileStore) getPathByKey(key string) string {
|
||||
return filepath.Join(f.directoryPath, key)
|
||||
}
|
||||
|
||||
// ensureDirectory creates the directory if it does not exist.
|
||||
func ensureDirectory(fs utilfs.Filesystem, path string) error {
|
||||
if _, err := fs.Stat(path); err != nil {
|
||||
// MkdirAll returns nil if directory already exists.
|
||||
return fs.MkdirAll(path, 0755)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeFile writes data to path in a single transaction.
|
||||
func writeFile(fs utilfs.Filesystem, path string, data []byte) (retErr error) {
|
||||
// Create a temporary file in the base directory of `path` with a prefix.
|
||||
tmpFile, err := fs.TempFile(filepath.Dir(path), tmpPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpPath := tmpFile.Name()
|
||||
shouldClose := true
|
||||
|
||||
defer func() {
|
||||
// Close the file.
|
||||
if shouldClose {
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
if retErr == nil {
|
||||
retErr = fmt.Errorf("close error: %v", err)
|
||||
} else {
|
||||
retErr = fmt.Errorf("failed to close temp file after error %v; close error: %v", retErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the temp file on error.
|
||||
if retErr != nil && tmpPath != "" {
|
||||
if err := removePath(fs, tmpPath); err != nil {
|
||||
retErr = fmt.Errorf("failed to remove the temporary file (%q) after error %v; remove error: %v", tmpPath, retErr, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Write data.
|
||||
if _, err := tmpFile.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sync file.
|
||||
if err := tmpFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Closing the file before renaming.
|
||||
err = tmpFile.Close()
|
||||
shouldClose = false
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.Rename(tmpPath, path)
|
||||
}
|
||||
|
||||
func removePath(fs utilfs.Filesystem, path string) error {
|
||||
if err := fs.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
64
vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go
generated
vendored
64
vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
keyMaxLength = 250
|
||||
|
||||
keyCharFmt string = "[A-Za-z0-9]"
|
||||
keyExtCharFmt string = "[-A-Za-z0-9_.]"
|
||||
qualifiedKeyFmt string = "(" + keyCharFmt + keyExtCharFmt + "*)?" + keyCharFmt
|
||||
)
|
||||
|
||||
var (
|
||||
// Key must consist of alphanumeric characters, '-', '_' or '.', and must start
|
||||
// and end with an alphanumeric character.
|
||||
keyRegex = regexp.MustCompile("^" + qualifiedKeyFmt + "$")
|
||||
|
||||
// ErrKeyNotFound is the error returned if key is not found in Store.
|
||||
ErrKeyNotFound = fmt.Errorf("key is not found")
|
||||
)
|
||||
|
||||
// Store provides the interface for storing keyed data.
|
||||
// Store must be thread-safe
|
||||
type Store interface {
|
||||
// key must contain one or more characters in [A-Za-z0-9]
|
||||
// Write writes data with key.
|
||||
Write(key string, data []byte) error
|
||||
// Read retrieves data with key
|
||||
// Read must return ErrKeyNotFound if key is not found.
|
||||
Read(key string) ([]byte, error)
|
||||
// Delete deletes data by key
|
||||
// Delete must not return error if key does not exist
|
||||
Delete(key string) error
|
||||
// List lists all existing keys.
|
||||
List() ([]string, error)
|
||||
}
|
||||
|
||||
// ValidateKey returns an error if the given key does not meet the requirement
|
||||
// of the key format and length.
|
||||
func ValidateKey(key string) error {
|
||||
if len(key) <= keyMaxLength && keyRegex.MatchString(key) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invalid key: %q", key)
|
||||
}
|
27
vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// FromApiserverCache modifies <opts> so that the GET request will
|
||||
// be served from apiserver cache instead of from etcd.
|
||||
func FromApiserverCache(opts *metav1.GetOptions) {
|
||||
opts.ResourceVersion = "0"
|
||||
}
|
137
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go
generated
vendored
137
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go
generated
vendored
@ -1,137 +0,0 @@
|
||||
// +build freebsd linux darwin
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// unixProtocol is the network protocol of unix socket.
|
||||
unixProtocol = "unix"
|
||||
)
|
||||
|
||||
// CreateListener creates a listener on the specified endpoint.
|
||||
func CreateListener(endpoint string) (net.Listener, error) {
|
||||
protocol, addr, err := parseEndpointWithFallbackProtocol(endpoint, unixProtocol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if protocol != unixProtocol {
|
||||
return nil, fmt.Errorf("only support unix socket endpoint")
|
||||
}
|
||||
|
||||
// Unlink to cleanup the previous socket file.
|
||||
err = unix.Unlink(addr)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("failed to unlink socket file %q: %v", addr, err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(addr), 0750); err != nil {
|
||||
return nil, fmt.Errorf("error creating socket directory %q: %v", filepath.Dir(addr), err)
|
||||
}
|
||||
|
||||
// Create the socket on a tempfile and move it to the destination socket to handle improprer cleanup
|
||||
file, err := ioutil.TempFile(filepath.Dir(addr), "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary file: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Remove(file.Name()); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove temporary file: %v", err)
|
||||
}
|
||||
|
||||
l, err := net.Listen(protocol, file.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = os.Rename(file.Name(), addr); err != nil {
|
||||
return nil, fmt.Errorf("failed to move temporary file to addr %q: %v", addr, err)
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// GetAddressAndDialer returns the address parsed from the given endpoint and a dialer.
|
||||
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
|
||||
protocol, addr, err := parseEndpointWithFallbackProtocol(endpoint, unixProtocol)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if protocol != unixProtocol {
|
||||
return "", nil, fmt.Errorf("only support unix socket endpoint")
|
||||
}
|
||||
|
||||
return addr, dial, nil
|
||||
}
|
||||
|
||||
func dial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout(unixProtocol, addr, timeout)
|
||||
}
|
||||
|
||||
func parseEndpointWithFallbackProtocol(endpoint string, fallbackProtocol string) (protocol string, addr string, err error) {
|
||||
if protocol, addr, err = parseEndpoint(endpoint); err != nil && protocol == "" {
|
||||
fallbackEndpoint := fallbackProtocol + "://" + endpoint
|
||||
protocol, addr, err = parseEndpoint(fallbackEndpoint)
|
||||
if err == nil {
|
||||
klog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (string, string, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "tcp":
|
||||
return "tcp", u.Host, nil
|
||||
|
||||
case "unix":
|
||||
return "unix", u.Path, nil
|
||||
|
||||
case "":
|
||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||
|
||||
default:
|
||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
// LocalEndpoint returns the full path to a unix socket at the given endpoint
|
||||
func LocalEndpoint(path, file string) (string, error) {
|
||||
u := url.URL{
|
||||
Scheme: unixProtocol,
|
||||
Path: path,
|
||||
}
|
||||
return filepath.Join(u.String(), file+".sock"), nil
|
||||
}
|
54
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
// +build !freebsd,!linux,!windows,!darwin
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CreateListener creates a listener on the specified endpoint.
|
||||
func CreateListener(endpoint string) (net.Listener, error) {
|
||||
return nil, fmt.Errorf("CreateListener is unsupported in this build")
|
||||
}
|
||||
|
||||
// GetAddressAndDialer returns the address parsed from the given endpoint and a dialer.
|
||||
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
|
||||
return "", nil, fmt.Errorf("GetAddressAndDialer is unsupported in this build")
|
||||
}
|
||||
|
||||
// LockAndCheckSubPath empty implementation
|
||||
func LockAndCheckSubPath(volumePath, subPath string) ([]uintptr, error) {
|
||||
return []uintptr{}, nil
|
||||
}
|
||||
|
||||
// UnlockPath empty implementation
|
||||
func UnlockPath(fileHandles []uintptr) {
|
||||
}
|
||||
|
||||
// LocalEndpoint empty implementation
|
||||
func LocalEndpoint(path, file string) (string, error) {
|
||||
return "", fmt.Errorf("LocalEndpoints are unsupported in this build")
|
||||
}
|
||||
|
||||
// GetBootTime empty implementation
|
||||
func GetBootTime() (time.Time, error) {
|
||||
return time.Time{}, fmt.Errorf("GetBootTime is unsupported in this build")
|
||||
}
|
125
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
125
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
@ -1,125 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
const (
|
||||
tcpProtocol = "tcp"
|
||||
npipeProtocol = "npipe"
|
||||
)
|
||||
|
||||
// CreateListener creates a listener on the specified endpoint.
|
||||
func CreateListener(endpoint string) (net.Listener, error) {
|
||||
protocol, addr, err := parseEndpoint(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch protocol {
|
||||
case tcpProtocol:
|
||||
return net.Listen(tcpProtocol, addr)
|
||||
|
||||
case npipeProtocol:
|
||||
return winio.ListenPipe(addr, nil)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("only support tcp and npipe endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
// GetAddressAndDialer returns the address parsed from the given endpoint and a dialer.
|
||||
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
|
||||
protocol, addr, err := parseEndpoint(endpoint)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if protocol == tcpProtocol {
|
||||
return addr, tcpDial, nil
|
||||
}
|
||||
|
||||
if protocol == npipeProtocol {
|
||||
return addr, npipeDial, nil
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("only support tcp and npipe endpoint")
|
||||
}
|
||||
|
||||
func tcpDial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout(tcpProtocol, addr, timeout)
|
||||
}
|
||||
|
||||
func npipeDial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return winio.DialPipe(addr, &timeout)
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (string, string, error) {
|
||||
// url.Parse doesn't recognize \, so replace with / first.
|
||||
endpoint = strings.Replace(endpoint, "\\", "/", -1)
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if u.Scheme == "tcp" {
|
||||
return "tcp", u.Host, nil
|
||||
} else if u.Scheme == "npipe" {
|
||||
if strings.HasPrefix(u.Path, "//./pipe") {
|
||||
return "npipe", u.Path, nil
|
||||
}
|
||||
|
||||
// fallback host if not provided.
|
||||
host := u.Host
|
||||
if host == "" {
|
||||
host = "."
|
||||
}
|
||||
return "npipe", fmt.Sprintf("//%s%s", host, u.Path), nil
|
||||
} else if u.Scheme == "" {
|
||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||
} else {
|
||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
// LocalEndpoint empty implementation
|
||||
func LocalEndpoint(path, file string) (string, error) {
|
||||
return "", fmt.Errorf("LocalEndpoints are unsupported in this build")
|
||||
}
|
||||
|
||||
var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64")
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
output, _, err := tickCount.Call()
|
||||
if errno, ok := err.(syscall.Errno); !ok || errno != 0 {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil
|
||||
}
|
Reference in New Issue
Block a user